code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def register_service(service):
"""
Register the ryu application specified by 'service' as
a provider of events defined in the calling module.
If an application being loaded consumes events (in the sense of
set_ev_cls) provided by the 'service' application, the latter
application will be automatically loaded.
This mechanism is used to e.g. automatically start ofp_handler if
there are applications consuming OFP events.
"""
frame = inspect.currentframe()
m_name = frame.f_back.f_globals['__name__']
m = sys.modules[m_name]
m._SERVICE_NAME = service | def function[register_service, parameter[service]]:
constant[
Register the ryu application specified by 'service' as
a provider of events defined in the calling module.
If an application being loaded consumes events (in the sense of
set_ev_cls) provided by the 'service' application, the latter
application will be automatically loaded.
This mechanism is used to e.g. automatically start ofp_handler if
there are applications consuming OFP events.
]
variable[frame] assign[=] call[name[inspect].currentframe, parameter[]]
variable[m_name] assign[=] call[name[frame].f_back.f_globals][constant[__name__]]
variable[m] assign[=] call[name[sys].modules][name[m_name]]
name[m]._SERVICE_NAME assign[=] name[service] | keyword[def] identifier[register_service] ( identifier[service] ):
literal[string]
identifier[frame] = identifier[inspect] . identifier[currentframe] ()
identifier[m_name] = identifier[frame] . identifier[f_back] . identifier[f_globals] [ literal[string] ]
identifier[m] = identifier[sys] . identifier[modules] [ identifier[m_name] ]
identifier[m] . identifier[_SERVICE_NAME] = identifier[service] | def register_service(service):
"""
Register the ryu application specified by 'service' as
a provider of events defined in the calling module.
If an application being loaded consumes events (in the sense of
set_ev_cls) provided by the 'service' application, the latter
application will be automatically loaded.
This mechanism is used to e.g. automatically start ofp_handler if
there are applications consuming OFP events.
"""
frame = inspect.currentframe()
m_name = frame.f_back.f_globals['__name__']
m = sys.modules[m_name]
m._SERVICE_NAME = service |
def _findNearest(arr, value):
""" Finds the value in arr that value is closest to
"""
arr = np.array(arr)
# find nearest value in array
idx = (abs(arr-value)).argmin()
return arr[idx] | def function[_findNearest, parameter[arr, value]]:
constant[ Finds the value in arr that value is closest to
]
variable[arr] assign[=] call[name[np].array, parameter[name[arr]]]
variable[idx] assign[=] call[call[name[abs], parameter[binary_operation[name[arr] - name[value]]]].argmin, parameter[]]
return[call[name[arr]][name[idx]]] | keyword[def] identifier[_findNearest] ( identifier[arr] , identifier[value] ):
literal[string]
identifier[arr] = identifier[np] . identifier[array] ( identifier[arr] )
identifier[idx] =( identifier[abs] ( identifier[arr] - identifier[value] )). identifier[argmin] ()
keyword[return] identifier[arr] [ identifier[idx] ] | def _findNearest(arr, value):
""" Finds the value in arr that value is closest to
"""
arr = np.array(arr)
# find nearest value in array
idx = abs(arr - value).argmin()
return arr[idx] |
def set_active_vectors(self, name, preference='cell'):
"""Finds the vectors by name and appropriately sets it as active"""
_, field = get_scalar(self, name, preference=preference, info=True)
if field == POINT_DATA_FIELD:
self.GetPointData().SetActiveVectors(name)
elif field == CELL_DATA_FIELD:
self.GetCellData().SetActiveVectors(name)
else:
raise RuntimeError('Data field ({}) not useable'.format(field))
self._active_vectors_info = [field, name] | def function[set_active_vectors, parameter[self, name, preference]]:
constant[Finds the vectors by name and appropriately sets it as active]
<ast.Tuple object at 0x7da18f8138b0> assign[=] call[name[get_scalar], parameter[name[self], name[name]]]
if compare[name[field] equal[==] name[POINT_DATA_FIELD]] begin[:]
call[call[name[self].GetPointData, parameter[]].SetActiveVectors, parameter[name[name]]]
name[self]._active_vectors_info assign[=] list[[<ast.Name object at 0x7da18c4cd150>, <ast.Name object at 0x7da18c4ced70>]] | keyword[def] identifier[set_active_vectors] ( identifier[self] , identifier[name] , identifier[preference] = literal[string] ):
literal[string]
identifier[_] , identifier[field] = identifier[get_scalar] ( identifier[self] , identifier[name] , identifier[preference] = identifier[preference] , identifier[info] = keyword[True] )
keyword[if] identifier[field] == identifier[POINT_DATA_FIELD] :
identifier[self] . identifier[GetPointData] (). identifier[SetActiveVectors] ( identifier[name] )
keyword[elif] identifier[field] == identifier[CELL_DATA_FIELD] :
identifier[self] . identifier[GetCellData] (). identifier[SetActiveVectors] ( identifier[name] )
keyword[else] :
keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] ( identifier[field] ))
identifier[self] . identifier[_active_vectors_info] =[ identifier[field] , identifier[name] ] | def set_active_vectors(self, name, preference='cell'):
"""Finds the vectors by name and appropriately sets it as active"""
(_, field) = get_scalar(self, name, preference=preference, info=True)
if field == POINT_DATA_FIELD:
self.GetPointData().SetActiveVectors(name) # depends on [control=['if'], data=[]]
elif field == CELL_DATA_FIELD:
self.GetCellData().SetActiveVectors(name) # depends on [control=['if'], data=[]]
else:
raise RuntimeError('Data field ({}) not useable'.format(field))
self._active_vectors_info = [field, name] |
def create(self, type, name=None, data=None, priority=None,
port=None, weight=None):
"""
Parameters
----------
type: str
{A, AAAA, CNAME, MX, TXT, SRV, NS}
name: str
Name of the record
data: object, type-dependent
type == 'A' : IPv4 address
type == 'AAAA' : IPv6 address
type == 'CNAME' : destination host name
type == 'MX' : mail host name
type == 'TXT' : txt contents
type == 'SRV' : target host name to direct requests for the service
type == 'NS' : name server that is authoritative for the domain
priority:
port:
weight:
"""
if type == 'A' and name is None:
name = self.domain
return self.post(type=type, name=name, data=data, priority=priority,
port=port, weight=weight)[self.singular] | def function[create, parameter[self, type, name, data, priority, port, weight]]:
constant[
Parameters
----------
type: str
{A, AAAA, CNAME, MX, TXT, SRV, NS}
name: str
Name of the record
data: object, type-dependent
type == 'A' : IPv4 address
type == 'AAAA' : IPv6 address
type == 'CNAME' : destination host name
type == 'MX' : mail host name
type == 'TXT' : txt contents
type == 'SRV' : target host name to direct requests for the service
type == 'NS' : name server that is authoritative for the domain
priority:
port:
weight:
]
if <ast.BoolOp object at 0x7da1b008ac20> begin[:]
variable[name] assign[=] name[self].domain
return[call[call[name[self].post, parameter[]]][name[self].singular]] | keyword[def] identifier[create] ( identifier[self] , identifier[type] , identifier[name] = keyword[None] , identifier[data] = keyword[None] , identifier[priority] = keyword[None] ,
identifier[port] = keyword[None] , identifier[weight] = keyword[None] ):
literal[string]
keyword[if] identifier[type] == literal[string] keyword[and] identifier[name] keyword[is] keyword[None] :
identifier[name] = identifier[self] . identifier[domain]
keyword[return] identifier[self] . identifier[post] ( identifier[type] = identifier[type] , identifier[name] = identifier[name] , identifier[data] = identifier[data] , identifier[priority] = identifier[priority] ,
identifier[port] = identifier[port] , identifier[weight] = identifier[weight] )[ identifier[self] . identifier[singular] ] | def create(self, type, name=None, data=None, priority=None, port=None, weight=None):
"""
Parameters
----------
type: str
{A, AAAA, CNAME, MX, TXT, SRV, NS}
name: str
Name of the record
data: object, type-dependent
type == 'A' : IPv4 address
type == 'AAAA' : IPv6 address
type == 'CNAME' : destination host name
type == 'MX' : mail host name
type == 'TXT' : txt contents
type == 'SRV' : target host name to direct requests for the service
type == 'NS' : name server that is authoritative for the domain
priority:
port:
weight:
"""
if type == 'A' and name is None:
name = self.domain # depends on [control=['if'], data=[]]
return self.post(type=type, name=name, data=data, priority=priority, port=port, weight=weight)[self.singular] |
def history(self, channel, **kwargs):
""" https://api.slack.com/methods/im.history
"""
self.params.update({
'channel': channel,
})
if kwargs:
self.params.update(kwargs)
return FromUrl('https://slack.com/api/im.history', self._requests)(data=self.params).get() | def function[history, parameter[self, channel]]:
constant[ https://api.slack.com/methods/im.history
]
call[name[self].params.update, parameter[dictionary[[<ast.Constant object at 0x7da18f723e80>], [<ast.Name object at 0x7da18f721ed0>]]]]
if name[kwargs] begin[:]
call[name[self].params.update, parameter[name[kwargs]]]
return[call[call[call[name[FromUrl], parameter[constant[https://slack.com/api/im.history], name[self]._requests]], parameter[]].get, parameter[]]] | keyword[def] identifier[history] ( identifier[self] , identifier[channel] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[params] . identifier[update] ({
literal[string] : identifier[channel] ,
})
keyword[if] identifier[kwargs] :
identifier[self] . identifier[params] . identifier[update] ( identifier[kwargs] )
keyword[return] identifier[FromUrl] ( literal[string] , identifier[self] . identifier[_requests] )( identifier[data] = identifier[self] . identifier[params] ). identifier[get] () | def history(self, channel, **kwargs):
""" https://api.slack.com/methods/im.history
"""
self.params.update({'channel': channel})
if kwargs:
self.params.update(kwargs) # depends on [control=['if'], data=[]]
return FromUrl('https://slack.com/api/im.history', self._requests)(data=self.params).get() |
def check_resistor_board_measurements(data_file, reference_data_file=None,
create_plot=True, **kwargs):
""" To check basic system function a test board was built with multiple
resistors attached to for connectors each. Measurements can thus be
validated against known electrical (ohmic) resistances.
Note that the normal-reciprocal difference is not yet analyzed!
The referenc_data_file should have the following structure:
The file contains the four-point spreads to be imported from
the measurement. This file is a text file with four columns (A, B, M, N),
separated by spaces or tabs. Each line denotes one measurement and its
expected resistance, the allowed variation, and its allow difference
towards its reciprocal counterpart: ::
1 2 4 3 1000 1 20
4 3 2 1 1000 1 20
Parameters
----------
data_file : string
path to mnu0 data file
reference_data_file: string, optional
path to reference data file with structure as describe above. Default
data is used if set to None
create_plot : bool, optional
if True, create a plot with measured and expected resistances
**kwargs : dict, optional
**kwargs will be redirected to the sEIT.import_eit_fzj call
Returns
-------
fig : figure object, optional
if create_plot is True, return a matplotlib figure
"""
# reference_data = np.loadtxt(reference_data_file)
# configs = reference_data[:, 0:4]
column_names = [
'a', 'b', 'm', 'n', 'expected_r', 'variation_r', 'variation_diffr'
]
if reference_data_file is None:
ref_data = pd.DataFrame(_resistor_data, columns=column_names)
else:
ref_data = pd.read_csv(
reference_data_file,
names=column_names,
delim_whitespace=True,
)
print(ref_data)
configs = ref_data[['a', 'b', 'm', 'n']].values.astype(int)
seit = reda.sEIT()
seit.import_eit_fzj(data_file, configs, **kwargs)
seit.data = seit.data.merge(ref_data, on=('a', 'b', 'm', 'n'))
# iterate through the test configurations
test_frequency = 1
failing = []
for nr, row in enumerate(ref_data.values):
print(nr, row)
key = tuple(row[0:4].astype(int))
item = seit.abmn.get_group(key)
expected_r = row[4]
allowed_variation = row[5]
# expected_r_diff = row[6]
measured_r, measured_rdiff = item.query(
'frequency == {}'.format(test_frequency)
)[['r', 'rdiff']].values.squeeze()
minr = expected_r - allowed_variation
maxr = expected_r + allowed_variation
if not (minr <= measured_r and maxr >= measured_r):
print(' ', 'not passing', row)
print(' ', minr, maxr)
print(' ', measured_r)
failing.append((nr, measured_r))
if len(failing) == 0:
failing = None
else:
failing = np.atleast_2d(np.array(failing))
if create_plot:
fig, ax = plt.subplots(1, 1, figsize=(16 / 2.54, 8 / 2.54))
data = seit.data.query('frequency == 1')
x = np.arange(0, data.shape[0])
ax.plot(
x,
data['r'],
'.-',
label='data',
)
ax.fill_between(
x,
data['expected_r'] - data['variation_r'],
data['expected_r'] + data['variation_r'],
color='green',
alpha=0.8,
label='allowed limits',
)
if failing is not None:
ax.scatter(
failing[:, 0],
failing[:, 1],
color='r',
label='not passing',
s=40,
)
ax.legend()
ax.set_xticks(x)
xticklabels = [
'{}-{} {}-{}'.format(*row) for row
in data[['a', 'b', 'm', 'n']].values.astype(int)
]
ax.set_xticklabels(xticklabels, rotation=45)
ax.set_ylabel('resistance $[\Omega]$')
ax.set_xlabel('configuration a-b m-n')
if failing is None:
suffix = ' PASSED'
else:
suffix = ''
ax.set_title('Resistor-check for FZJ-EIT systems' + suffix)
fig.tight_layout()
# fig.savefig('out.pdf')
return fig | def function[check_resistor_board_measurements, parameter[data_file, reference_data_file, create_plot]]:
constant[ To check basic system function a test board was built with multiple
resistors attached to for connectors each. Measurements can thus be
validated against known electrical (ohmic) resistances.
Note that the normal-reciprocal difference is not yet analyzed!
The referenc_data_file should have the following structure:
The file contains the four-point spreads to be imported from
the measurement. This file is a text file with four columns (A, B, M, N),
separated by spaces or tabs. Each line denotes one measurement and its
expected resistance, the allowed variation, and its allow difference
towards its reciprocal counterpart: ::
1 2 4 3 1000 1 20
4 3 2 1 1000 1 20
Parameters
----------
data_file : string
path to mnu0 data file
reference_data_file: string, optional
path to reference data file with structure as describe above. Default
data is used if set to None
create_plot : bool, optional
if True, create a plot with measured and expected resistances
**kwargs : dict, optional
**kwargs will be redirected to the sEIT.import_eit_fzj call
Returns
-------
fig : figure object, optional
if create_plot is True, return a matplotlib figure
]
variable[column_names] assign[=] list[[<ast.Constant object at 0x7da207f98df0>, <ast.Constant object at 0x7da207f9baf0>, <ast.Constant object at 0x7da207f9b550>, <ast.Constant object at 0x7da207f9b580>, <ast.Constant object at 0x7da207f9bdc0>, <ast.Constant object at 0x7da207f99810>, <ast.Constant object at 0x7da207f99f00>]]
if compare[name[reference_data_file] is constant[None]] begin[:]
variable[ref_data] assign[=] call[name[pd].DataFrame, parameter[name[_resistor_data]]]
call[name[print], parameter[name[ref_data]]]
variable[configs] assign[=] call[call[name[ref_data]][list[[<ast.Constant object at 0x7da207f9ab30>, <ast.Constant object at 0x7da207f9add0>, <ast.Constant object at 0x7da207f9afe0>, <ast.Constant object at 0x7da207f9b430>]]].values.astype, parameter[name[int]]]
variable[seit] assign[=] call[name[reda].sEIT, parameter[]]
call[name[seit].import_eit_fzj, parameter[name[data_file], name[configs]]]
name[seit].data assign[=] call[name[seit].data.merge, parameter[name[ref_data]]]
variable[test_frequency] assign[=] constant[1]
variable[failing] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da207f99420>, <ast.Name object at 0x7da207f98760>]]] in starred[call[name[enumerate], parameter[name[ref_data].values]]] begin[:]
call[name[print], parameter[name[nr], name[row]]]
variable[key] assign[=] call[name[tuple], parameter[call[call[name[row]][<ast.Slice object at 0x7da207f9aa10>].astype, parameter[name[int]]]]]
variable[item] assign[=] call[name[seit].abmn.get_group, parameter[name[key]]]
variable[expected_r] assign[=] call[name[row]][constant[4]]
variable[allowed_variation] assign[=] call[name[row]][constant[5]]
<ast.Tuple object at 0x7da207f9a800> assign[=] call[call[call[name[item].query, parameter[call[constant[frequency == {}].format, parameter[name[test_frequency]]]]]][list[[<ast.Constant object at 0x7da207f99ff0>, <ast.Constant object at 0x7da207f995d0>]]].values.squeeze, parameter[]]
variable[minr] assign[=] binary_operation[name[expected_r] - name[allowed_variation]]
variable[maxr] assign[=] binary_operation[name[expected_r] + name[allowed_variation]]
if <ast.UnaryOp object at 0x7da204565660> begin[:]
call[name[print], parameter[constant[ ], constant[not passing], name[row]]]
call[name[print], parameter[constant[ ], name[minr], name[maxr]]]
call[name[print], parameter[constant[ ], name[measured_r]]]
call[name[failing].append, parameter[tuple[[<ast.Name object at 0x7da204564c40>, <ast.Name object at 0x7da204564af0>]]]]
if compare[call[name[len], parameter[name[failing]]] equal[==] constant[0]] begin[:]
variable[failing] assign[=] constant[None]
if name[create_plot] begin[:]
<ast.Tuple object at 0x7da204564430> assign[=] call[name[plt].subplots, parameter[constant[1], constant[1]]]
variable[data] assign[=] call[name[seit].data.query, parameter[constant[frequency == 1]]]
variable[x] assign[=] call[name[np].arange, parameter[constant[0], call[name[data].shape][constant[0]]]]
call[name[ax].plot, parameter[name[x], call[name[data]][constant[r]], constant[.-]]]
call[name[ax].fill_between, parameter[name[x], binary_operation[call[name[data]][constant[expected_r]] - call[name[data]][constant[variation_r]]], binary_operation[call[name[data]][constant[expected_r]] + call[name[data]][constant[variation_r]]]]]
if compare[name[failing] is_not constant[None]] begin[:]
call[name[ax].scatter, parameter[call[name[failing]][tuple[[<ast.Slice object at 0x7da2045651b0>, <ast.Constant object at 0x7da204565270>]]], call[name[failing]][tuple[[<ast.Slice object at 0x7da204564310>, <ast.Constant object at 0x7da204565690>]]]]]
call[name[ax].legend, parameter[]]
call[name[ax].set_xticks, parameter[name[x]]]
variable[xticklabels] assign[=] <ast.ListComp object at 0x7da204564be0>
call[name[ax].set_xticklabels, parameter[name[xticklabels]]]
call[name[ax].set_ylabel, parameter[constant[resistance $[\Omega]$]]]
call[name[ax].set_xlabel, parameter[constant[configuration a-b m-n]]]
if compare[name[failing] is constant[None]] begin[:]
variable[suffix] assign[=] constant[ PASSED]
call[name[ax].set_title, parameter[binary_operation[constant[Resistor-check for FZJ-EIT systems] + name[suffix]]]]
call[name[fig].tight_layout, parameter[]]
return[name[fig]] | keyword[def] identifier[check_resistor_board_measurements] ( identifier[data_file] , identifier[reference_data_file] = keyword[None] ,
identifier[create_plot] = keyword[True] ,** identifier[kwargs] ):
literal[string]
identifier[column_names] =[
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string]
]
keyword[if] identifier[reference_data_file] keyword[is] keyword[None] :
identifier[ref_data] = identifier[pd] . identifier[DataFrame] ( identifier[_resistor_data] , identifier[columns] = identifier[column_names] )
keyword[else] :
identifier[ref_data] = identifier[pd] . identifier[read_csv] (
identifier[reference_data_file] ,
identifier[names] = identifier[column_names] ,
identifier[delim_whitespace] = keyword[True] ,
)
identifier[print] ( identifier[ref_data] )
identifier[configs] = identifier[ref_data] [[ literal[string] , literal[string] , literal[string] , literal[string] ]]. identifier[values] . identifier[astype] ( identifier[int] )
identifier[seit] = identifier[reda] . identifier[sEIT] ()
identifier[seit] . identifier[import_eit_fzj] ( identifier[data_file] , identifier[configs] ,** identifier[kwargs] )
identifier[seit] . identifier[data] = identifier[seit] . identifier[data] . identifier[merge] ( identifier[ref_data] , identifier[on] =( literal[string] , literal[string] , literal[string] , literal[string] ))
identifier[test_frequency] = literal[int]
identifier[failing] =[]
keyword[for] identifier[nr] , identifier[row] keyword[in] identifier[enumerate] ( identifier[ref_data] . identifier[values] ):
identifier[print] ( identifier[nr] , identifier[row] )
identifier[key] = identifier[tuple] ( identifier[row] [ literal[int] : literal[int] ]. identifier[astype] ( identifier[int] ))
identifier[item] = identifier[seit] . identifier[abmn] . identifier[get_group] ( identifier[key] )
identifier[expected_r] = identifier[row] [ literal[int] ]
identifier[allowed_variation] = identifier[row] [ literal[int] ]
identifier[measured_r] , identifier[measured_rdiff] = identifier[item] . identifier[query] (
literal[string] . identifier[format] ( identifier[test_frequency] )
)[[ literal[string] , literal[string] ]]. identifier[values] . identifier[squeeze] ()
identifier[minr] = identifier[expected_r] - identifier[allowed_variation]
identifier[maxr] = identifier[expected_r] + identifier[allowed_variation]
keyword[if] keyword[not] ( identifier[minr] <= identifier[measured_r] keyword[and] identifier[maxr] >= identifier[measured_r] ):
identifier[print] ( literal[string] , literal[string] , identifier[row] )
identifier[print] ( literal[string] , identifier[minr] , identifier[maxr] )
identifier[print] ( literal[string] , identifier[measured_r] )
identifier[failing] . identifier[append] (( identifier[nr] , identifier[measured_r] ))
keyword[if] identifier[len] ( identifier[failing] )== literal[int] :
identifier[failing] = keyword[None]
keyword[else] :
identifier[failing] = identifier[np] . identifier[atleast_2d] ( identifier[np] . identifier[array] ( identifier[failing] ))
keyword[if] identifier[create_plot] :
identifier[fig] , identifier[ax] = identifier[plt] . identifier[subplots] ( literal[int] , literal[int] , identifier[figsize] =( literal[int] / literal[int] , literal[int] / literal[int] ))
identifier[data] = identifier[seit] . identifier[data] . identifier[query] ( literal[string] )
identifier[x] = identifier[np] . identifier[arange] ( literal[int] , identifier[data] . identifier[shape] [ literal[int] ])
identifier[ax] . identifier[plot] (
identifier[x] ,
identifier[data] [ literal[string] ],
literal[string] ,
identifier[label] = literal[string] ,
)
identifier[ax] . identifier[fill_between] (
identifier[x] ,
identifier[data] [ literal[string] ]- identifier[data] [ literal[string] ],
identifier[data] [ literal[string] ]+ identifier[data] [ literal[string] ],
identifier[color] = literal[string] ,
identifier[alpha] = literal[int] ,
identifier[label] = literal[string] ,
)
keyword[if] identifier[failing] keyword[is] keyword[not] keyword[None] :
identifier[ax] . identifier[scatter] (
identifier[failing] [:, literal[int] ],
identifier[failing] [:, literal[int] ],
identifier[color] = literal[string] ,
identifier[label] = literal[string] ,
identifier[s] = literal[int] ,
)
identifier[ax] . identifier[legend] ()
identifier[ax] . identifier[set_xticks] ( identifier[x] )
identifier[xticklabels] =[
literal[string] . identifier[format] (* identifier[row] ) keyword[for] identifier[row]
keyword[in] identifier[data] [[ literal[string] , literal[string] , literal[string] , literal[string] ]]. identifier[values] . identifier[astype] ( identifier[int] )
]
identifier[ax] . identifier[set_xticklabels] ( identifier[xticklabels] , identifier[rotation] = literal[int] )
identifier[ax] . identifier[set_ylabel] ( literal[string] )
identifier[ax] . identifier[set_xlabel] ( literal[string] )
keyword[if] identifier[failing] keyword[is] keyword[None] :
identifier[suffix] = literal[string]
keyword[else] :
identifier[suffix] = literal[string]
identifier[ax] . identifier[set_title] ( literal[string] + identifier[suffix] )
identifier[fig] . identifier[tight_layout] ()
keyword[return] identifier[fig] | def check_resistor_board_measurements(data_file, reference_data_file=None, create_plot=True, **kwargs):
""" To check basic system function a test board was built with multiple
resistors attached to for connectors each. Measurements can thus be
validated against known electrical (ohmic) resistances.
Note that the normal-reciprocal difference is not yet analyzed!
The referenc_data_file should have the following structure:
The file contains the four-point spreads to be imported from
the measurement. This file is a text file with four columns (A, B, M, N),
separated by spaces or tabs. Each line denotes one measurement and its
expected resistance, the allowed variation, and its allow difference
towards its reciprocal counterpart: ::
1 2 4 3 1000 1 20
4 3 2 1 1000 1 20
Parameters
----------
data_file : string
path to mnu0 data file
reference_data_file: string, optional
path to reference data file with structure as describe above. Default
data is used if set to None
create_plot : bool, optional
if True, create a plot with measured and expected resistances
**kwargs : dict, optional
**kwargs will be redirected to the sEIT.import_eit_fzj call
Returns
-------
fig : figure object, optional
if create_plot is True, return a matplotlib figure
"""
# reference_data = np.loadtxt(reference_data_file)
# configs = reference_data[:, 0:4]
column_names = ['a', 'b', 'm', 'n', 'expected_r', 'variation_r', 'variation_diffr']
if reference_data_file is None:
ref_data = pd.DataFrame(_resistor_data, columns=column_names) # depends on [control=['if'], data=[]]
else:
ref_data = pd.read_csv(reference_data_file, names=column_names, delim_whitespace=True)
print(ref_data)
configs = ref_data[['a', 'b', 'm', 'n']].values.astype(int)
seit = reda.sEIT()
seit.import_eit_fzj(data_file, configs, **kwargs)
seit.data = seit.data.merge(ref_data, on=('a', 'b', 'm', 'n'))
# iterate through the test configurations
test_frequency = 1
failing = []
for (nr, row) in enumerate(ref_data.values):
print(nr, row)
key = tuple(row[0:4].astype(int))
item = seit.abmn.get_group(key)
expected_r = row[4]
allowed_variation = row[5]
# expected_r_diff = row[6]
(measured_r, measured_rdiff) = item.query('frequency == {}'.format(test_frequency))[['r', 'rdiff']].values.squeeze()
minr = expected_r - allowed_variation
maxr = expected_r + allowed_variation
if not (minr <= measured_r and maxr >= measured_r):
print(' ', 'not passing', row)
print(' ', minr, maxr)
print(' ', measured_r)
failing.append((nr, measured_r)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if len(failing) == 0:
failing = None # depends on [control=['if'], data=[]]
else:
failing = np.atleast_2d(np.array(failing))
if create_plot:
(fig, ax) = plt.subplots(1, 1, figsize=(16 / 2.54, 8 / 2.54))
data = seit.data.query('frequency == 1')
x = np.arange(0, data.shape[0])
ax.plot(x, data['r'], '.-', label='data')
ax.fill_between(x, data['expected_r'] - data['variation_r'], data['expected_r'] + data['variation_r'], color='green', alpha=0.8, label='allowed limits')
if failing is not None:
ax.scatter(failing[:, 0], failing[:, 1], color='r', label='not passing', s=40) # depends on [control=['if'], data=['failing']]
ax.legend()
ax.set_xticks(x)
xticklabels = ['{}-{} {}-{}'.format(*row) for row in data[['a', 'b', 'm', 'n']].values.astype(int)]
ax.set_xticklabels(xticklabels, rotation=45)
ax.set_ylabel('resistance $[\\Omega]$')
ax.set_xlabel('configuration a-b m-n')
if failing is None:
suffix = ' PASSED' # depends on [control=['if'], data=[]]
else:
suffix = ''
ax.set_title('Resistor-check for FZJ-EIT systems' + suffix)
fig.tight_layout()
# fig.savefig('out.pdf')
return fig # depends on [control=['if'], data=[]] |
def index_agreement(s, o):
"""
index of agreement
input:
s: simulated
o: observed
output:
ia: index of agreement
"""
# s,o = filter_nan(s,o)
ia = 1 - (np.sum((o-s)**2)) /\
(np.sum((np.abs(s-np.mean(o))+np.abs(o-np.mean(o)))**2))
return ia | def function[index_agreement, parameter[s, o]]:
constant[
index of agreement
input:
s: simulated
o: observed
output:
ia: index of agreement
]
variable[ia] assign[=] binary_operation[constant[1] - binary_operation[call[name[np].sum, parameter[binary_operation[binary_operation[name[o] - name[s]] ** constant[2]]]] / call[name[np].sum, parameter[binary_operation[binary_operation[call[name[np].abs, parameter[binary_operation[name[s] - call[name[np].mean, parameter[name[o]]]]]] + call[name[np].abs, parameter[binary_operation[name[o] - call[name[np].mean, parameter[name[o]]]]]]] ** constant[2]]]]]]
return[name[ia]] | keyword[def] identifier[index_agreement] ( identifier[s] , identifier[o] ):
literal[string]
identifier[ia] = literal[int] -( identifier[np] . identifier[sum] (( identifier[o] - identifier[s] )** literal[int] ))/( identifier[np] . identifier[sum] (( identifier[np] . identifier[abs] ( identifier[s] - identifier[np] . identifier[mean] ( identifier[o] ))+ identifier[np] . identifier[abs] ( identifier[o] - identifier[np] . identifier[mean] ( identifier[o] )))** literal[int] ))
keyword[return] identifier[ia] | def index_agreement(s, o):
"""
index of agreement
input:
s: simulated
o: observed
output:
ia: index of agreement
"""
# s,o = filter_nan(s,o)
ia = 1 - np.sum((o - s) ** 2) / np.sum((np.abs(s - np.mean(o)) + np.abs(o - np.mean(o))) ** 2)
return ia |
def get_time_slide_id(xmldoc, time_slide, create_new = None, superset_ok = False, nonunique_ok = False):
"""
Return the time_slide_id corresponding to the offset vector
described by time_slide, a dictionary of instrument/offset pairs.
Example:
>>> get_time_slide_id(xmldoc, {"H1": 0, "L1": 0})
'time_slide:time_slide_id:10'
This function is a wrapper around the .get_time_slide_id() method
of the pycbc_glue.ligolw.lsctables.TimeSlideTable class. See the
documentation for that class for the meaning of the create_new,
superset_ok and nonunique_ok keyword arguments.
This function requires the document to contain exactly one
time_slide table. If the document does not contain exactly one
time_slide table then ValueError is raised, unless the optional
create_new argument is not None. In that case a new table is
created. This effect of the create_new argument is in addition to
the affects described by the TimeSlideTable class.
"""
try:
tisitable = lsctables.TimeSlideTable.get_table(xmldoc)
except ValueError:
# table not found
if create_new is None:
raise
tisitable = lsctables.New(lsctables.TimeSlideTable)
xmldoc.childNodes[0].appendChild(tisitable)
# make sure the next_id attribute is correct
tisitable.sync_next_id()
# get the id
return tisitable.get_time_slide_id(time_slide, create_new = create_new, superset_ok = superset_ok, nonunique_ok = nonunique_ok) | def function[get_time_slide_id, parameter[xmldoc, time_slide, create_new, superset_ok, nonunique_ok]]:
constant[
Return the time_slide_id corresponding to the offset vector
described by time_slide, a dictionary of instrument/offset pairs.
Example:
>>> get_time_slide_id(xmldoc, {"H1": 0, "L1": 0})
'time_slide:time_slide_id:10'
This function is a wrapper around the .get_time_slide_id() method
of the pycbc_glue.ligolw.lsctables.TimeSlideTable class. See the
documentation for that class for the meaning of the create_new,
superset_ok and nonunique_ok keyword arguments.
This function requires the document to contain exactly one
time_slide table. If the document does not contain exactly one
time_slide table then ValueError is raised, unless the optional
create_new argument is not None. In that case a new table is
created. This effect of the create_new argument is in addition to
the affects described by the TimeSlideTable class.
]
<ast.Try object at 0x7da1b0b57340>
call[name[tisitable].sync_next_id, parameter[]]
return[call[name[tisitable].get_time_slide_id, parameter[name[time_slide]]]] | keyword[def] identifier[get_time_slide_id] ( identifier[xmldoc] , identifier[time_slide] , identifier[create_new] = keyword[None] , identifier[superset_ok] = keyword[False] , identifier[nonunique_ok] = keyword[False] ):
literal[string]
keyword[try] :
identifier[tisitable] = identifier[lsctables] . identifier[TimeSlideTable] . identifier[get_table] ( identifier[xmldoc] )
keyword[except] identifier[ValueError] :
keyword[if] identifier[create_new] keyword[is] keyword[None] :
keyword[raise]
identifier[tisitable] = identifier[lsctables] . identifier[New] ( identifier[lsctables] . identifier[TimeSlideTable] )
identifier[xmldoc] . identifier[childNodes] [ literal[int] ]. identifier[appendChild] ( identifier[tisitable] )
identifier[tisitable] . identifier[sync_next_id] ()
keyword[return] identifier[tisitable] . identifier[get_time_slide_id] ( identifier[time_slide] , identifier[create_new] = identifier[create_new] , identifier[superset_ok] = identifier[superset_ok] , identifier[nonunique_ok] = identifier[nonunique_ok] ) | def get_time_slide_id(xmldoc, time_slide, create_new=None, superset_ok=False, nonunique_ok=False):
"""
Return the time_slide_id corresponding to the offset vector
described by time_slide, a dictionary of instrument/offset pairs.
Example:
>>> get_time_slide_id(xmldoc, {"H1": 0, "L1": 0})
'time_slide:time_slide_id:10'
This function is a wrapper around the .get_time_slide_id() method
of the pycbc_glue.ligolw.lsctables.TimeSlideTable class. See the
documentation for that class for the meaning of the create_new,
superset_ok and nonunique_ok keyword arguments.
This function requires the document to contain exactly one
time_slide table. If the document does not contain exactly one
time_slide table then ValueError is raised, unless the optional
create_new argument is not None. In that case a new table is
created. This effect of the create_new argument is in addition to
the affects described by the TimeSlideTable class.
"""
try:
tisitable = lsctables.TimeSlideTable.get_table(xmldoc) # depends on [control=['try'], data=[]]
except ValueError: # table not found
if create_new is None:
raise # depends on [control=['if'], data=[]]
tisitable = lsctables.New(lsctables.TimeSlideTable)
xmldoc.childNodes[0].appendChild(tisitable) # depends on [control=['except'], data=[]] # make sure the next_id attribute is correct
tisitable.sync_next_id() # get the id
return tisitable.get_time_slide_id(time_slide, create_new=create_new, superset_ok=superset_ok, nonunique_ok=nonunique_ok) |
def read_constraints_from_config(cp, transforms=None,
constraint_section='constraint'):
"""Loads parameter constraints from a configuration file.
Parameters
----------
cp : WorkflowConfigParser
An open config parser to read from.
transforms : list, optional
List of transforms to apply to parameters before applying constraints.
constraint_section : str, optional
The section to get the constraints from. Default is 'constraint'.
Returns
-------
list
List of ``Constraint`` objects. Empty if no constraints were provided.
"""
cons = []
for subsection in cp.get_subsections(constraint_section):
name = cp.get_opt_tag(constraint_section, "name", subsection)
constraint_arg = cp.get_opt_tag(
constraint_section, "constraint_arg", subsection)
# get any other keyword arguments
kwargs = {}
section = constraint_section + "-" + subsection
extra_opts = [key for key in cp.options(section)
if key not in ["name", "constraint_arg"]]
for key in extra_opts:
val = cp.get(section, key)
if key == "required_parameters":
val = val.split(_VARARGS_DELIM)
else:
try:
val = float(val)
except ValueError:
pass
kwargs[key] = val
cons.append(constraints.constraints[name](constraint_arg,
transforms=transforms,
**kwargs))
return cons | def function[read_constraints_from_config, parameter[cp, transforms, constraint_section]]:
constant[Loads parameter constraints from a configuration file.
Parameters
----------
cp : WorkflowConfigParser
An open config parser to read from.
transforms : list, optional
List of transforms to apply to parameters before applying constraints.
constraint_section : str, optional
The section to get the constraints from. Default is 'constraint'.
Returns
-------
list
List of ``Constraint`` objects. Empty if no constraints were provided.
]
variable[cons] assign[=] list[[]]
for taget[name[subsection]] in starred[call[name[cp].get_subsections, parameter[name[constraint_section]]]] begin[:]
variable[name] assign[=] call[name[cp].get_opt_tag, parameter[name[constraint_section], constant[name], name[subsection]]]
variable[constraint_arg] assign[=] call[name[cp].get_opt_tag, parameter[name[constraint_section], constant[constraint_arg], name[subsection]]]
variable[kwargs] assign[=] dictionary[[], []]
variable[section] assign[=] binary_operation[binary_operation[name[constraint_section] + constant[-]] + name[subsection]]
variable[extra_opts] assign[=] <ast.ListComp object at 0x7da2044c2050>
for taget[name[key]] in starred[name[extra_opts]] begin[:]
variable[val] assign[=] call[name[cp].get, parameter[name[section], name[key]]]
if compare[name[key] equal[==] constant[required_parameters]] begin[:]
variable[val] assign[=] call[name[val].split, parameter[name[_VARARGS_DELIM]]]
call[name[kwargs]][name[key]] assign[=] name[val]
call[name[cons].append, parameter[call[call[name[constraints].constraints][name[name]], parameter[name[constraint_arg]]]]]
return[name[cons]] | keyword[def] identifier[read_constraints_from_config] ( identifier[cp] , identifier[transforms] = keyword[None] ,
identifier[constraint_section] = literal[string] ):
literal[string]
identifier[cons] =[]
keyword[for] identifier[subsection] keyword[in] identifier[cp] . identifier[get_subsections] ( identifier[constraint_section] ):
identifier[name] = identifier[cp] . identifier[get_opt_tag] ( identifier[constraint_section] , literal[string] , identifier[subsection] )
identifier[constraint_arg] = identifier[cp] . identifier[get_opt_tag] (
identifier[constraint_section] , literal[string] , identifier[subsection] )
identifier[kwargs] ={}
identifier[section] = identifier[constraint_section] + literal[string] + identifier[subsection]
identifier[extra_opts] =[ identifier[key] keyword[for] identifier[key] keyword[in] identifier[cp] . identifier[options] ( identifier[section] )
keyword[if] identifier[key] keyword[not] keyword[in] [ literal[string] , literal[string] ]]
keyword[for] identifier[key] keyword[in] identifier[extra_opts] :
identifier[val] = identifier[cp] . identifier[get] ( identifier[section] , identifier[key] )
keyword[if] identifier[key] == literal[string] :
identifier[val] = identifier[val] . identifier[split] ( identifier[_VARARGS_DELIM] )
keyword[else] :
keyword[try] :
identifier[val] = identifier[float] ( identifier[val] )
keyword[except] identifier[ValueError] :
keyword[pass]
identifier[kwargs] [ identifier[key] ]= identifier[val]
identifier[cons] . identifier[append] ( identifier[constraints] . identifier[constraints] [ identifier[name] ]( identifier[constraint_arg] ,
identifier[transforms] = identifier[transforms] ,
** identifier[kwargs] ))
keyword[return] identifier[cons] | def read_constraints_from_config(cp, transforms=None, constraint_section='constraint'):
"""Loads parameter constraints from a configuration file.
Parameters
----------
cp : WorkflowConfigParser
An open config parser to read from.
transforms : list, optional
List of transforms to apply to parameters before applying constraints.
constraint_section : str, optional
The section to get the constraints from. Default is 'constraint'.
Returns
-------
list
List of ``Constraint`` objects. Empty if no constraints were provided.
"""
cons = []
for subsection in cp.get_subsections(constraint_section):
name = cp.get_opt_tag(constraint_section, 'name', subsection)
constraint_arg = cp.get_opt_tag(constraint_section, 'constraint_arg', subsection)
# get any other keyword arguments
kwargs = {}
section = constraint_section + '-' + subsection
extra_opts = [key for key in cp.options(section) if key not in ['name', 'constraint_arg']]
for key in extra_opts:
val = cp.get(section, key)
if key == 'required_parameters':
val = val.split(_VARARGS_DELIM) # depends on [control=['if'], data=[]]
else:
try:
val = float(val) # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]]
kwargs[key] = val # depends on [control=['for'], data=['key']]
cons.append(constraints.constraints[name](constraint_arg, transforms=transforms, **kwargs)) # depends on [control=['for'], data=['subsection']]
return cons |
def port_profile_fcoe_profile_fcoeport_fcoe_map_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
fcoe_profile = ET.SubElement(port_profile, "fcoe-profile")
fcoeport = ET.SubElement(fcoe_profile, "fcoeport")
fcoe_map_name = ET.SubElement(fcoeport, "fcoe-map-name")
fcoe_map_name.text = kwargs.pop('fcoe_map_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[port_profile_fcoe_profile_fcoeport_fcoe_map_name, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[port_profile] assign[=] call[name[ET].SubElement, parameter[name[config], constant[port-profile]]]
variable[name_key] assign[=] call[name[ET].SubElement, parameter[name[port_profile], constant[name]]]
name[name_key].text assign[=] call[name[kwargs].pop, parameter[constant[name]]]
variable[fcoe_profile] assign[=] call[name[ET].SubElement, parameter[name[port_profile], constant[fcoe-profile]]]
variable[fcoeport] assign[=] call[name[ET].SubElement, parameter[name[fcoe_profile], constant[fcoeport]]]
variable[fcoe_map_name] assign[=] call[name[ET].SubElement, parameter[name[fcoeport], constant[fcoe-map-name]]]
name[fcoe_map_name].text assign[=] call[name[kwargs].pop, parameter[constant[fcoe_map_name]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[port_profile_fcoe_profile_fcoeport_fcoe_map_name] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[port_profile] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[name_key] = identifier[ET] . identifier[SubElement] ( identifier[port_profile] , literal[string] )
identifier[name_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[fcoe_profile] = identifier[ET] . identifier[SubElement] ( identifier[port_profile] , literal[string] )
identifier[fcoeport] = identifier[ET] . identifier[SubElement] ( identifier[fcoe_profile] , literal[string] )
identifier[fcoe_map_name] = identifier[ET] . identifier[SubElement] ( identifier[fcoeport] , literal[string] )
identifier[fcoe_map_name] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def port_profile_fcoe_profile_fcoeport_fcoe_map_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
port_profile = ET.SubElement(config, 'port-profile', xmlns='urn:brocade.com:mgmt:brocade-port-profile')
name_key = ET.SubElement(port_profile, 'name')
name_key.text = kwargs.pop('name')
fcoe_profile = ET.SubElement(port_profile, 'fcoe-profile')
fcoeport = ET.SubElement(fcoe_profile, 'fcoeport')
fcoe_map_name = ET.SubElement(fcoeport, 'fcoe-map-name')
fcoe_map_name.text = kwargs.pop('fcoe_map_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _prepend_name(self, prefix, dict_):
'''changes the keys of the dictionary prepending them with "name."'''
return dict(['.'.join([prefix, name]), msg]
for name, msg in dict_.iteritems()) | def function[_prepend_name, parameter[self, prefix, dict_]]:
constant[changes the keys of the dictionary prepending them with "name."]
return[call[name[dict], parameter[<ast.GeneratorExp object at 0x7da18dc9aaa0>]]] | keyword[def] identifier[_prepend_name] ( identifier[self] , identifier[prefix] , identifier[dict_] ):
literal[string]
keyword[return] identifier[dict] ([ literal[string] . identifier[join] ([ identifier[prefix] , identifier[name] ]), identifier[msg] ]
keyword[for] identifier[name] , identifier[msg] keyword[in] identifier[dict_] . identifier[iteritems] ()) | def _prepend_name(self, prefix, dict_):
'''changes the keys of the dictionary prepending them with "name."'''
return dict((['.'.join([prefix, name]), msg] for (name, msg) in dict_.iteritems())) |
def get_more(self, show=True, proxy=None, timeout=0):
"""
Calls get_querymore() Is for convenience. You like.
"""
return self.get_querymore(show, proxy, timeout) | def function[get_more, parameter[self, show, proxy, timeout]]:
constant[
Calls get_querymore() Is for convenience. You like.
]
return[call[name[self].get_querymore, parameter[name[show], name[proxy], name[timeout]]]] | keyword[def] identifier[get_more] ( identifier[self] , identifier[show] = keyword[True] , identifier[proxy] = keyword[None] , identifier[timeout] = literal[int] ):
literal[string]
keyword[return] identifier[self] . identifier[get_querymore] ( identifier[show] , identifier[proxy] , identifier[timeout] ) | def get_more(self, show=True, proxy=None, timeout=0):
"""
Calls get_querymore() Is for convenience. You like.
"""
return self.get_querymore(show, proxy, timeout) |
def _urlquote(string, safe=''):
"""
Quotes a unicode string for use in a URL
:param string:
A unicode string
:param safe:
A unicode string of character to not encode
:return:
None (if string is None) or an ASCII byte string of the quoted string
"""
if string is None or string == '':
return None
# Anything already hex quoted is pulled out of the URL and unquoted if
# possible
escapes = []
if re.search('%[0-9a-fA-F]{2}', string):
# Try to unquote any percent values, restoring them if they are not
# valid UTF-8. Also, requote any safe chars since encoded versions of
# those are functionally different than the unquoted ones.
def _try_unescape(match):
byte_string = unquote_to_bytes(match.group(0))
unicode_string = byte_string.decode('utf-8', 'iriutf8')
for safe_char in list(safe):
unicode_string = unicode_string.replace(safe_char, '%%%02x' % ord(safe_char))
return unicode_string
string = re.sub('(?:%[0-9a-fA-F]{2})+', _try_unescape, string)
# Once we have the minimal set of hex quoted values, removed them from
# the string so that they are not double quoted
def _extract_escape(match):
escapes.append(match.group(0).encode('ascii'))
return '\x00'
string = re.sub('%[0-9a-fA-F]{2}', _extract_escape, string)
output = urlquote(string.encode('utf-8'), safe=safe.encode('utf-8'))
if not isinstance(output, byte_cls):
output = output.encode('ascii')
# Restore the existing quoted values that we extracted
if len(escapes) > 0:
def _return_escape(_):
return escapes.pop(0)
output = re.sub(b'%00', _return_escape, output)
return output | def function[_urlquote, parameter[string, safe]]:
constant[
Quotes a unicode string for use in a URL
:param string:
A unicode string
:param safe:
A unicode string of character to not encode
:return:
None (if string is None) or an ASCII byte string of the quoted string
]
if <ast.BoolOp object at 0x7da20e9b2950> begin[:]
return[constant[None]]
variable[escapes] assign[=] list[[]]
if call[name[re].search, parameter[constant[%[0-9a-fA-F]{2}], name[string]]] begin[:]
def function[_try_unescape, parameter[match]]:
variable[byte_string] assign[=] call[name[unquote_to_bytes], parameter[call[name[match].group, parameter[constant[0]]]]]
variable[unicode_string] assign[=] call[name[byte_string].decode, parameter[constant[utf-8], constant[iriutf8]]]
for taget[name[safe_char]] in starred[call[name[list], parameter[name[safe]]]] begin[:]
variable[unicode_string] assign[=] call[name[unicode_string].replace, parameter[name[safe_char], binary_operation[constant[%%%02x] <ast.Mod object at 0x7da2590d6920> call[name[ord], parameter[name[safe_char]]]]]]
return[name[unicode_string]]
variable[string] assign[=] call[name[re].sub, parameter[constant[(?:%[0-9a-fA-F]{2})+], name[_try_unescape], name[string]]]
def function[_extract_escape, parameter[match]]:
call[name[escapes].append, parameter[call[call[name[match].group, parameter[constant[0]]].encode, parameter[constant[ascii]]]]]
return[constant[ ]]
variable[string] assign[=] call[name[re].sub, parameter[constant[%[0-9a-fA-F]{2}], name[_extract_escape], name[string]]]
variable[output] assign[=] call[name[urlquote], parameter[call[name[string].encode, parameter[constant[utf-8]]]]]
if <ast.UnaryOp object at 0x7da18f00cac0> begin[:]
variable[output] assign[=] call[name[output].encode, parameter[constant[ascii]]]
if compare[call[name[len], parameter[name[escapes]]] greater[>] constant[0]] begin[:]
def function[_return_escape, parameter[_]]:
return[call[name[escapes].pop, parameter[constant[0]]]]
variable[output] assign[=] call[name[re].sub, parameter[constant[b'%00'], name[_return_escape], name[output]]]
return[name[output]] | keyword[def] identifier[_urlquote] ( identifier[string] , identifier[safe] = literal[string] ):
literal[string]
keyword[if] identifier[string] keyword[is] keyword[None] keyword[or] identifier[string] == literal[string] :
keyword[return] keyword[None]
identifier[escapes] =[]
keyword[if] identifier[re] . identifier[search] ( literal[string] , identifier[string] ):
keyword[def] identifier[_try_unescape] ( identifier[match] ):
identifier[byte_string] = identifier[unquote_to_bytes] ( identifier[match] . identifier[group] ( literal[int] ))
identifier[unicode_string] = identifier[byte_string] . identifier[decode] ( literal[string] , literal[string] )
keyword[for] identifier[safe_char] keyword[in] identifier[list] ( identifier[safe] ):
identifier[unicode_string] = identifier[unicode_string] . identifier[replace] ( identifier[safe_char] , literal[string] % identifier[ord] ( identifier[safe_char] ))
keyword[return] identifier[unicode_string]
identifier[string] = identifier[re] . identifier[sub] ( literal[string] , identifier[_try_unescape] , identifier[string] )
keyword[def] identifier[_extract_escape] ( identifier[match] ):
identifier[escapes] . identifier[append] ( identifier[match] . identifier[group] ( literal[int] ). identifier[encode] ( literal[string] ))
keyword[return] literal[string]
identifier[string] = identifier[re] . identifier[sub] ( literal[string] , identifier[_extract_escape] , identifier[string] )
identifier[output] = identifier[urlquote] ( identifier[string] . identifier[encode] ( literal[string] ), identifier[safe] = identifier[safe] . identifier[encode] ( literal[string] ))
keyword[if] keyword[not] identifier[isinstance] ( identifier[output] , identifier[byte_cls] ):
identifier[output] = identifier[output] . identifier[encode] ( literal[string] )
keyword[if] identifier[len] ( identifier[escapes] )> literal[int] :
keyword[def] identifier[_return_escape] ( identifier[_] ):
keyword[return] identifier[escapes] . identifier[pop] ( literal[int] )
identifier[output] = identifier[re] . identifier[sub] ( literal[string] , identifier[_return_escape] , identifier[output] )
keyword[return] identifier[output] | def _urlquote(string, safe=''):
"""
Quotes a unicode string for use in a URL
:param string:
A unicode string
:param safe:
A unicode string of character to not encode
:return:
None (if string is None) or an ASCII byte string of the quoted string
"""
if string is None or string == '':
return None # depends on [control=['if'], data=[]]
# Anything already hex quoted is pulled out of the URL and unquoted if
# possible
escapes = []
if re.search('%[0-9a-fA-F]{2}', string):
# Try to unquote any percent values, restoring them if they are not
# valid UTF-8. Also, requote any safe chars since encoded versions of
# those are functionally different than the unquoted ones.
def _try_unescape(match):
byte_string = unquote_to_bytes(match.group(0))
unicode_string = byte_string.decode('utf-8', 'iriutf8')
for safe_char in list(safe):
unicode_string = unicode_string.replace(safe_char, '%%%02x' % ord(safe_char)) # depends on [control=['for'], data=['safe_char']]
return unicode_string
string = re.sub('(?:%[0-9a-fA-F]{2})+', _try_unescape, string)
# Once we have the minimal set of hex quoted values, removed them from
# the string so that they are not double quoted
def _extract_escape(match):
escapes.append(match.group(0).encode('ascii'))
return '\x00'
string = re.sub('%[0-9a-fA-F]{2}', _extract_escape, string) # depends on [control=['if'], data=[]]
output = urlquote(string.encode('utf-8'), safe=safe.encode('utf-8'))
if not isinstance(output, byte_cls):
output = output.encode('ascii') # depends on [control=['if'], data=[]]
# Restore the existing quoted values that we extracted
if len(escapes) > 0:
def _return_escape(_):
return escapes.pop(0)
output = re.sub(b'%00', _return_escape, output) # depends on [control=['if'], data=[]]
return output |
def init_fields(self):
"""
Initialize each fields of the fields_desc dict
"""
if self.class_dont_cache.get(self.__class__, False):
self.do_init_fields(self.fields_desc)
else:
self.do_init_cached_fields() | def function[init_fields, parameter[self]]:
constant[
Initialize each fields of the fields_desc dict
]
if call[name[self].class_dont_cache.get, parameter[name[self].__class__, constant[False]]] begin[:]
call[name[self].do_init_fields, parameter[name[self].fields_desc]] | keyword[def] identifier[init_fields] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[class_dont_cache] . identifier[get] ( identifier[self] . identifier[__class__] , keyword[False] ):
identifier[self] . identifier[do_init_fields] ( identifier[self] . identifier[fields_desc] )
keyword[else] :
identifier[self] . identifier[do_init_cached_fields] () | def init_fields(self):
"""
Initialize each fields of the fields_desc dict
"""
if self.class_dont_cache.get(self.__class__, False):
self.do_init_fields(self.fields_desc) # depends on [control=['if'], data=[]]
else:
self.do_init_cached_fields() |
def parse_navigation_html_to_tree(html, id):
"""Parse the given ``html`` (an etree object) to a tree.
The ``id`` is required in order to assign the top-level tree id value.
"""
def xpath(x):
return html.xpath(x, namespaces=HTML_DOCUMENT_NAMESPACES)
try:
value = xpath('//*[@data-type="binding"]/@data-value')[0]
is_translucent = value == 'translucent'
except IndexError:
is_translucent = False
if is_translucent:
id = TRANSLUCENT_BINDER_ID
tree = {'id': id,
'title': xpath('//*[@data-type="document-title"]/text()')[0],
'contents': [x for x in _nav_to_tree(xpath('//xhtml:nav')[0])]
}
return tree | def function[parse_navigation_html_to_tree, parameter[html, id]]:
constant[Parse the given ``html`` (an etree object) to a tree.
The ``id`` is required in order to assign the top-level tree id value.
]
def function[xpath, parameter[x]]:
return[call[name[html].xpath, parameter[name[x]]]]
<ast.Try object at 0x7da1b191c5b0>
if name[is_translucent] begin[:]
variable[id] assign[=] name[TRANSLUCENT_BINDER_ID]
variable[tree] assign[=] dictionary[[<ast.Constant object at 0x7da1b191c2b0>, <ast.Constant object at 0x7da1b191c220>, <ast.Constant object at 0x7da2044c20b0>], [<ast.Name object at 0x7da2044c0d60>, <ast.Subscript object at 0x7da2044c25c0>, <ast.ListComp object at 0x7da2044c0940>]]
return[name[tree]] | keyword[def] identifier[parse_navigation_html_to_tree] ( identifier[html] , identifier[id] ):
literal[string]
keyword[def] identifier[xpath] ( identifier[x] ):
keyword[return] identifier[html] . identifier[xpath] ( identifier[x] , identifier[namespaces] = identifier[HTML_DOCUMENT_NAMESPACES] )
keyword[try] :
identifier[value] = identifier[xpath] ( literal[string] )[ literal[int] ]
identifier[is_translucent] = identifier[value] == literal[string]
keyword[except] identifier[IndexError] :
identifier[is_translucent] = keyword[False]
keyword[if] identifier[is_translucent] :
identifier[id] = identifier[TRANSLUCENT_BINDER_ID]
identifier[tree] ={ literal[string] : identifier[id] ,
literal[string] : identifier[xpath] ( literal[string] )[ literal[int] ],
literal[string] :[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[_nav_to_tree] ( identifier[xpath] ( literal[string] )[ literal[int] ])]
}
keyword[return] identifier[tree] | def parse_navigation_html_to_tree(html, id):
"""Parse the given ``html`` (an etree object) to a tree.
The ``id`` is required in order to assign the top-level tree id value.
"""
def xpath(x):
return html.xpath(x, namespaces=HTML_DOCUMENT_NAMESPACES)
try:
value = xpath('//*[@data-type="binding"]/@data-value')[0]
is_translucent = value == 'translucent' # depends on [control=['try'], data=[]]
except IndexError:
is_translucent = False # depends on [control=['except'], data=[]]
if is_translucent:
id = TRANSLUCENT_BINDER_ID # depends on [control=['if'], data=[]]
tree = {'id': id, 'title': xpath('//*[@data-type="document-title"]/text()')[0], 'contents': [x for x in _nav_to_tree(xpath('//xhtml:nav')[0])]}
return tree |
def __parse_json_file(self, file_path):
"""Process Json file data
:@param file_path
:@type file_path: string
:@throws IOError
"""
if file_path == '' or os.path.splitext(file_path)[1] != '.json':
raise IOError('Invalid Json file')
with open(file_path) as json_file:
self._raw_data = json.load(json_file)
self._json_data = copy.deepcopy(self._raw_data) | def function[__parse_json_file, parameter[self, file_path]]:
constant[Process Json file data
:@param file_path
:@type file_path: string
:@throws IOError
]
if <ast.BoolOp object at 0x7da18eb54d30> begin[:]
<ast.Raise object at 0x7da18eb561d0>
with call[name[open], parameter[name[file_path]]] begin[:]
name[self]._raw_data assign[=] call[name[json].load, parameter[name[json_file]]]
name[self]._json_data assign[=] call[name[copy].deepcopy, parameter[name[self]._raw_data]] | keyword[def] identifier[__parse_json_file] ( identifier[self] , identifier[file_path] ):
literal[string]
keyword[if] identifier[file_path] == literal[string] keyword[or] identifier[os] . identifier[path] . identifier[splitext] ( identifier[file_path] )[ literal[int] ]!= literal[string] :
keyword[raise] identifier[IOError] ( literal[string] )
keyword[with] identifier[open] ( identifier[file_path] ) keyword[as] identifier[json_file] :
identifier[self] . identifier[_raw_data] = identifier[json] . identifier[load] ( identifier[json_file] )
identifier[self] . identifier[_json_data] = identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[_raw_data] ) | def __parse_json_file(self, file_path):
"""Process Json file data
:@param file_path
:@type file_path: string
:@throws IOError
"""
if file_path == '' or os.path.splitext(file_path)[1] != '.json':
raise IOError('Invalid Json file') # depends on [control=['if'], data=[]]
with open(file_path) as json_file:
self._raw_data = json.load(json_file) # depends on [control=['with'], data=['json_file']]
self._json_data = copy.deepcopy(self._raw_data) |
def geocode(self):
"""A Generator that reads from the address generators and returns
geocode results.
The generator yields ( address, geocode_results, object)
"""
submit_set = []
data_map = {}
for address, o in self.gen:
submit_set.append(address)
data_map[address] = o
if len(submit_set) >= self.submit_size:
results = self._send(submit_set)
submit_set = []
for k, result in results.items():
o = data_map[k]
yield (k, result, o)
if len(submit_set) > 0:
results = self._send(submit_set)
# submit_set = []
for k, result in results.items():
o = data_map[k]
yield (k, result, o) | def function[geocode, parameter[self]]:
constant[A Generator that reads from the address generators and returns
geocode results.
The generator yields ( address, geocode_results, object)
]
variable[submit_set] assign[=] list[[]]
variable[data_map] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da18dc049d0>, <ast.Name object at 0x7da18dc06b00>]]] in starred[name[self].gen] begin[:]
call[name[submit_set].append, parameter[name[address]]]
call[name[data_map]][name[address]] assign[=] name[o]
if compare[call[name[len], parameter[name[submit_set]]] greater_or_equal[>=] name[self].submit_size] begin[:]
variable[results] assign[=] call[name[self]._send, parameter[name[submit_set]]]
variable[submit_set] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b0ebf880>, <ast.Name object at 0x7da1b0ebcf40>]]] in starred[call[name[results].items, parameter[]]] begin[:]
variable[o] assign[=] call[name[data_map]][name[k]]
<ast.Yield object at 0x7da1b0ebd390>
if compare[call[name[len], parameter[name[submit_set]]] greater[>] constant[0]] begin[:]
variable[results] assign[=] call[name[self]._send, parameter[name[submit_set]]]
for taget[tuple[[<ast.Name object at 0x7da1b0ebfe80>, <ast.Name object at 0x7da1b0ebf610>]]] in starred[call[name[results].items, parameter[]]] begin[:]
variable[o] assign[=] call[name[data_map]][name[k]]
<ast.Yield object at 0x7da1b0ebd210> | keyword[def] identifier[geocode] ( identifier[self] ):
literal[string]
identifier[submit_set] =[]
identifier[data_map] ={}
keyword[for] identifier[address] , identifier[o] keyword[in] identifier[self] . identifier[gen] :
identifier[submit_set] . identifier[append] ( identifier[address] )
identifier[data_map] [ identifier[address] ]= identifier[o]
keyword[if] identifier[len] ( identifier[submit_set] )>= identifier[self] . identifier[submit_size] :
identifier[results] = identifier[self] . identifier[_send] ( identifier[submit_set] )
identifier[submit_set] =[]
keyword[for] identifier[k] , identifier[result] keyword[in] identifier[results] . identifier[items] ():
identifier[o] = identifier[data_map] [ identifier[k] ]
keyword[yield] ( identifier[k] , identifier[result] , identifier[o] )
keyword[if] identifier[len] ( identifier[submit_set] )> literal[int] :
identifier[results] = identifier[self] . identifier[_send] ( identifier[submit_set] )
keyword[for] identifier[k] , identifier[result] keyword[in] identifier[results] . identifier[items] ():
identifier[o] = identifier[data_map] [ identifier[k] ]
keyword[yield] ( identifier[k] , identifier[result] , identifier[o] ) | def geocode(self):
"""A Generator that reads from the address generators and returns
geocode results.
The generator yields ( address, geocode_results, object)
"""
submit_set = []
data_map = {}
for (address, o) in self.gen:
submit_set.append(address)
data_map[address] = o
if len(submit_set) >= self.submit_size:
results = self._send(submit_set)
submit_set = []
for (k, result) in results.items():
o = data_map[k]
yield (k, result, o) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if len(submit_set) > 0:
results = self._send(submit_set)
# submit_set = []
for (k, result) in results.items():
o = data_map[k]
yield (k, result, o) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] |
def init(self):
"""Initialize the URL used to connect to SABnzbd."""
self.url = self.url.format(host=self.host, port=self.port,
api_key=self.api_key) | def function[init, parameter[self]]:
constant[Initialize the URL used to connect to SABnzbd.]
name[self].url assign[=] call[name[self].url.format, parameter[]] | keyword[def] identifier[init] ( identifier[self] ):
literal[string]
identifier[self] . identifier[url] = identifier[self] . identifier[url] . identifier[format] ( identifier[host] = identifier[self] . identifier[host] , identifier[port] = identifier[self] . identifier[port] ,
identifier[api_key] = identifier[self] . identifier[api_key] ) | def init(self):
"""Initialize the URL used to connect to SABnzbd."""
self.url = self.url.format(host=self.host, port=self.port, api_key=self.api_key) |
def get(self, checkplotfname):
'''This handles GET requests to serve a specific checkplot pickle.
This is an AJAX endpoint; returns JSON that gets converted by the
frontend into things to render.
'''
if checkplotfname:
# do the usual safing
self.checkplotfname = xhtml_escape(
base64.b64decode(url_unescape(checkplotfname))
)
# see if this plot is in the current project
if self.checkplotfname in self.currentproject['checkplots']:
# make sure this file exists
cpfpath = os.path.join(
os.path.abspath(os.path.dirname(self.cplistfile)),
self.checkplotfname
)
LOGGER.info('loading %s...' % cpfpath)
if not os.path.exists(cpfpath):
msg = "couldn't find checkplot %s" % cpfpath
LOGGER.error(msg)
resultdict = {'status':'error',
'message':msg,
'result':None}
self.write(resultdict)
raise tornado.web.Finish()
# this is the async call to the executor
cpdict = yield self.executor.submit(
_read_checkplot_picklefile, cpfpath
)
#####################################
## continue after we're good to go ##
#####################################
LOGGER.info('loaded %s' % cpfpath)
# break out the initial info
objectid = cpdict['objectid']
objectinfo = cpdict['objectinfo']
varinfo = cpdict['varinfo']
if 'pfmethods' in cpdict:
pfmethods = cpdict['pfmethods']
else:
pfmethods = []
for pfm in PFMETHODS:
if pfm in cpdict:
pfmethods.append(pfm)
# handle neighbors for this object
neighbors = []
if ('neighbors' in cpdict and
cpdict['neighbors'] is not None and
len(cpdict['neighbors'])) > 0:
nbrlist = cpdict['neighbors']
# get each neighbor, its info, and its phased LCs
for nbr in nbrlist:
if 'magdiffs' in nbr:
nbrmagdiffs = nbr['magdiffs']
else:
nbrmagdiffs = None
if 'colordiffs' in nbr:
nbrcolordiffs = nbr['colordiffs']
else:
nbrcolordiffs = None
thisnbrdict = {
'objectid':nbr['objectid'],
'objectinfo':{
'ra':nbr['ra'],
'decl':nbr['decl'],
'xpix':nbr['xpix'],
'ypix':nbr['ypix'],
'distarcsec':nbr['dist'],
'magdiffs':nbrmagdiffs,
'colordiffs':nbrcolordiffs
}
}
try:
nbr_magseries = nbr['magseries']['plot']
thisnbrdict['magseries'] = nbr_magseries
except Exception as e:
LOGGER.error(
"could not load magseries plot for "
"neighbor %s for object %s"
% (nbr['objectid'],
cpdict['objectid'])
)
try:
for pfm in pfmethods:
if pfm in nbr:
thisnbrdict[pfm] = {
'plot':nbr[pfm][0]['plot'],
'period':nbr[pfm][0]['period'],
'epoch':nbr[pfm][0]['epoch']
}
except Exception as e:
LOGGER.error(
"could not load phased LC plots for "
"neighbor %s for object %s"
% (nbr['objectid'],
cpdict['objectid'])
)
neighbors.append(thisnbrdict)
# load object comments
if 'comments' in cpdict:
objectcomments = cpdict['comments']
else:
objectcomments = None
# load the xmatch results, if any
if 'xmatch' in cpdict:
objectxmatch = cpdict['xmatch']
# get rid of those pesky nans
for xmcat in objectxmatch:
if isinstance(objectxmatch[xmcat]['info'], dict):
xminfo = objectxmatch[xmcat]['info']
for xmek in xminfo:
if (isinstance(xminfo[xmek], float) and
(not np.isfinite(xminfo[xmek]))):
xminfo[xmek] = None
else:
objectxmatch = None
# load the colormagdiagram object
if 'colormagdiagram' in cpdict:
colormagdiagram = cpdict['colormagdiagram']
else:
colormagdiagram = None
# these are base64 which can be provided directly to JS to
# generate images (neat!)
if 'finderchart' in cpdict:
finderchart = cpdict['finderchart']
else:
finderchart = None
if ('magseries' in cpdict and
isinstance(cpdict['magseries'], dict) and
'plot' in cpdict['magseries']):
magseries = cpdict['magseries']['plot']
time0 = cpdict['magseries']['times'].min()
magseries_ndet = cpdict['magseries']['times'].size
else:
magseries = None
time0 = 0.0
magseries_ndet = 0
LOGGER.warning(
"no 'magseries' key present in this "
"checkplot, some plots may be broken..."
)
if 'status' in cpdict:
cpstatus = cpdict['status']
else:
cpstatus = 'unknown, possibly incomplete checkplot'
# load the uifilters if present
if 'uifilters' in cpdict:
uifilters = cpdict['uifilters']
else:
uifilters = {'psearch_magfilters':None,
'psearch_sigclip':None,
'psearch_timefilters':None}
# FIXME: add in other stuff required by the frontend
# - signals
# FIXME: the frontend should load these other things as well
# into the various elems on the period-search-tools and
# variability-tools tabs
# this is the initial dict
resultdict = {
'status':'ok',
'message':'found checkplot %s' % self.checkplotfname,
'readonly':self.readonly,
'result':{
'time0':'%.3f' % time0,
'objectid':objectid,
'objectinfo':objectinfo,
'colormagdiagram':colormagdiagram,
'objectcomments':objectcomments,
'varinfo':varinfo,
'uifilters':uifilters,
'neighbors':neighbors,
'xmatch':objectxmatch,
'finderchart':finderchart,
'magseries':magseries,
# fallback in case objectinfo doesn't have ndet
'magseries_ndet':magseries_ndet,
'cpstatus':cpstatus,
'pfmethods':pfmethods
}
}
# make sure to replace nans with Nones. frontend JS absolutely
# hates NaNs and for some reason, the JSON encoder defined at
# the top of this file doesn't deal with them even though it
# should
for key in resultdict['result']['objectinfo']:
if (isinstance(resultdict['result']['objectinfo'][key],
(float, np.float64, np.float_)) and
(not np.isfinite(resultdict['result'][
'objectinfo'
][key]))):
resultdict['result']['objectinfo'][key] = None
elif (isinstance(resultdict['result']['objectinfo'][key],
ndarray)):
thisval = resultdict['result']['objectinfo'][key]
thisval = thisval.tolist()
for i, v in enumerate(thisval):
if (isinstance(v,(float, np.float64, np.float_)) and
(not(np.isfinite(v)))):
thisval[i] = None
resultdict['result']['objectinfo'][key] = thisval
# remove nans from varinfo itself
for key in resultdict['result']['varinfo']:
if (isinstance(
resultdict['result']['varinfo'][key],
(float, np.float64, np.float_)) and
(not np.isfinite(
resultdict['result']['varinfo'][key]
))):
resultdict['result']['varinfo'][key] = None
elif (isinstance(
resultdict['result']['varinfo'][key],
ndarray)):
thisval = (
resultdict['result']['varinfo'][key]
)
thisval = thisval.tolist()
for i, v in enumerate(thisval):
if (isinstance(v,(float, np.float64, np.float_)) and
(not(np.isfinite(v)))):
thisval[i] = None
resultdict['result']['varinfo'][key] = (
thisval
)
# remove nans from varinfo['features']
if ('features' in resultdict['result']['varinfo'] and
isinstance(resultdict['result']['varinfo']['features'],
dict)):
for key in resultdict['result']['varinfo']['features']:
if (isinstance(
resultdict[
'result'
]['varinfo']['features'][key],
(float, np.float64, np.float_)) and
(not np.isfinite(
resultdict[
'result'
]['varinfo']['features'][key]))):
resultdict[
'result'
]['varinfo']['features'][key] = None
elif (isinstance(
resultdict[
'result'
]['varinfo']['features'][key],
ndarray)):
thisval = (
resultdict['result']['varinfo']['features'][key]
)
thisval = thisval.tolist()
for i, v in enumerate(thisval):
if (isinstance(v,(float,
np.float64,
np.float_)) and
(not(np.isfinite(v)))):
thisval[i] = None
resultdict['result']['varinfo']['features'][key] = (
thisval
)
# now get the periodograms and phased LCs
for key in pfmethods:
# get the periodogram for this method
periodogram = cpdict[key]['periodogram']
# get the phased LC with best period
if 0 in cpdict[key] and isinstance(cpdict[key][0], dict):
phasedlc0plot = cpdict[key][0]['plot']
phasedlc0period = float(cpdict[key][0]['period'])
phasedlc0epoch = float(cpdict[key][0]['epoch'])
else:
phasedlc0plot = None
phasedlc0period = None
phasedlc0epoch = None
# get the associated fitinfo for this period if it
# exists
if (0 in cpdict[key] and
isinstance(cpdict[key][0], dict) and
'lcfit' in cpdict[key][0] and
isinstance(cpdict[key][0]['lcfit'], dict)):
phasedlc0fit = {
'method':(
cpdict[key][0]['lcfit']['fittype']
),
'redchisq':(
cpdict[key][0]['lcfit']['fitredchisq']
),
'chisq':(
cpdict[key][0]['lcfit']['fitchisq']
),
'params':(
cpdict[key][0][
'lcfit'
]['fitinfo']['finalparams'] if
'finalparams' in
cpdict[key][0]['lcfit']['fitinfo'] else None
)
}
else:
phasedlc0fit = None
# get the phased LC with 2nd best period
if 1 in cpdict[key] and isinstance(cpdict[key][1], dict):
phasedlc1plot = cpdict[key][1]['plot']
phasedlc1period = float(cpdict[key][1]['period'])
phasedlc1epoch = float(cpdict[key][1]['epoch'])
else:
phasedlc1plot = None
phasedlc1period = None
phasedlc1epoch = None
# get the associated fitinfo for this period if it
# exists
if (1 in cpdict[key] and
isinstance(cpdict[key][1], dict) and
'lcfit' in cpdict[key][1] and
isinstance(cpdict[key][1]['lcfit'], dict)):
phasedlc1fit = {
'method':(
cpdict[key][1]['lcfit']['fittype']
),
'redchisq':(
cpdict[key][1]['lcfit']['fitredchisq']
),
'chisq':(
cpdict[key][1]['lcfit']['fitchisq']
),
'params':(
cpdict[key][1][
'lcfit'
]['fitinfo']['finalparams'] if
'finalparams' in
cpdict[key][1]['lcfit']['fitinfo'] else None
)
}
else:
phasedlc1fit = None
# get the phased LC with 3rd best period
if 2 in cpdict[key] and isinstance(cpdict[key][2], dict):
phasedlc2plot = cpdict[key][2]['plot']
phasedlc2period = float(cpdict[key][2]['period'])
phasedlc2epoch = float(cpdict[key][2]['epoch'])
else:
phasedlc2plot = None
phasedlc2period = None
phasedlc2epoch = None
# get the associated fitinfo for this period if it
# exists
if (2 in cpdict[key] and
isinstance(cpdict[key][2], dict) and
'lcfit' in cpdict[key][2] and
isinstance(cpdict[key][2]['lcfit'], dict)):
phasedlc2fit = {
'method':(
cpdict[key][2]['lcfit']['fittype']
),
'redchisq':(
cpdict[key][2]['lcfit']['fitredchisq']
),
'chisq':(
cpdict[key][2]['lcfit']['fitchisq']
),
'params':(
cpdict[key][2][
'lcfit'
]['fitinfo']['finalparams'] if
'finalparams' in
cpdict[key][2]['lcfit']['fitinfo'] else None
)
}
else:
phasedlc2fit = None
resultdict['result'][key] = {
'nbestperiods':cpdict[key]['nbestperiods'],
'periodogram':periodogram,
'bestperiod':cpdict[key]['bestperiod'],
'phasedlc0':{
'plot':phasedlc0plot,
'period':phasedlc0period,
'epoch':phasedlc0epoch,
'lcfit':phasedlc0fit,
},
'phasedlc1':{
'plot':phasedlc1plot,
'period':phasedlc1period,
'epoch':phasedlc1epoch,
'lcfit':phasedlc1fit,
},
'phasedlc2':{
'plot':phasedlc2plot,
'period':phasedlc2period,
'epoch':phasedlc2epoch,
'lcfit':phasedlc2fit,
},
}
#
# end of processing per pfmethod
#
# return the checkplot via JSON
self.write(resultdict)
self.finish()
else:
LOGGER.error('could not find %s' % self.checkplotfname)
resultdict = {'status':'error',
'message':"This checkplot doesn't exist.",
'readonly':self.readonly,
'result':None}
self.write(resultdict)
self.finish()
else:
resultdict = {'status':'error',
'message':'No checkplot provided to load.',
'readonly':self.readonly,
'result':None}
self.write(resultdict) | def function[get, parameter[self, checkplotfname]]:
constant[This handles GET requests to serve a specific checkplot pickle.
This is an AJAX endpoint; returns JSON that gets converted by the
frontend into things to render.
]
if name[checkplotfname] begin[:]
name[self].checkplotfname assign[=] call[name[xhtml_escape], parameter[call[name[base64].b64decode, parameter[call[name[url_unescape], parameter[name[checkplotfname]]]]]]]
if compare[name[self].checkplotfname in call[name[self].currentproject][constant[checkplots]]] begin[:]
variable[cpfpath] assign[=] call[name[os].path.join, parameter[call[name[os].path.abspath, parameter[call[name[os].path.dirname, parameter[name[self].cplistfile]]]], name[self].checkplotfname]]
call[name[LOGGER].info, parameter[binary_operation[constant[loading %s...] <ast.Mod object at 0x7da2590d6920> name[cpfpath]]]]
if <ast.UnaryOp object at 0x7da20e9b2c50> begin[:]
variable[msg] assign[=] binary_operation[constant[couldn't find checkplot %s] <ast.Mod object at 0x7da2590d6920> name[cpfpath]]
call[name[LOGGER].error, parameter[name[msg]]]
variable[resultdict] assign[=] dictionary[[<ast.Constant object at 0x7da20e9b23e0>, <ast.Constant object at 0x7da20e9b0b50>, <ast.Constant object at 0x7da20e9b2800>], [<ast.Constant object at 0x7da20e9b08e0>, <ast.Name object at 0x7da20e9b28c0>, <ast.Constant object at 0x7da20e9b1ab0>]]
call[name[self].write, parameter[name[resultdict]]]
<ast.Raise object at 0x7da20e9b2ad0>
variable[cpdict] assign[=] <ast.Yield object at 0x7da20e9b0d00>
call[name[LOGGER].info, parameter[binary_operation[constant[loaded %s] <ast.Mod object at 0x7da2590d6920> name[cpfpath]]]]
variable[objectid] assign[=] call[name[cpdict]][constant[objectid]]
variable[objectinfo] assign[=] call[name[cpdict]][constant[objectinfo]]
variable[varinfo] assign[=] call[name[cpdict]][constant[varinfo]]
if compare[constant[pfmethods] in name[cpdict]] begin[:]
variable[pfmethods] assign[=] call[name[cpdict]][constant[pfmethods]]
variable[neighbors] assign[=] list[[]]
if compare[<ast.BoolOp object at 0x7da20c6a8280> greater[>] constant[0]] begin[:]
variable[nbrlist] assign[=] call[name[cpdict]][constant[neighbors]]
for taget[name[nbr]] in starred[name[nbrlist]] begin[:]
if compare[constant[magdiffs] in name[nbr]] begin[:]
variable[nbrmagdiffs] assign[=] call[name[nbr]][constant[magdiffs]]
if compare[constant[colordiffs] in name[nbr]] begin[:]
variable[nbrcolordiffs] assign[=] call[name[nbr]][constant[colordiffs]]
variable[thisnbrdict] assign[=] dictionary[[<ast.Constant object at 0x7da20e9b19f0>, <ast.Constant object at 0x7da20e9b3700>], [<ast.Subscript object at 0x7da20e9b2b90>, <ast.Dict object at 0x7da20e9b0c40>]]
<ast.Try object at 0x7da20e9b0370>
<ast.Try object at 0x7da20e9b0dc0>
call[name[neighbors].append, parameter[name[thisnbrdict]]]
if compare[constant[comments] in name[cpdict]] begin[:]
variable[objectcomments] assign[=] call[name[cpdict]][constant[comments]]
if compare[constant[xmatch] in name[cpdict]] begin[:]
variable[objectxmatch] assign[=] call[name[cpdict]][constant[xmatch]]
for taget[name[xmcat]] in starred[name[objectxmatch]] begin[:]
if call[name[isinstance], parameter[call[call[name[objectxmatch]][name[xmcat]]][constant[info]], name[dict]]] begin[:]
variable[xminfo] assign[=] call[call[name[objectxmatch]][name[xmcat]]][constant[info]]
for taget[name[xmek]] in starred[name[xminfo]] begin[:]
if <ast.BoolOp object at 0x7da18bc728c0> begin[:]
call[name[xminfo]][name[xmek]] assign[=] constant[None]
if compare[constant[colormagdiagram] in name[cpdict]] begin[:]
variable[colormagdiagram] assign[=] call[name[cpdict]][constant[colormagdiagram]]
if compare[constant[finderchart] in name[cpdict]] begin[:]
variable[finderchart] assign[=] call[name[cpdict]][constant[finderchart]]
if <ast.BoolOp object at 0x7da18bc73850> begin[:]
variable[magseries] assign[=] call[call[name[cpdict]][constant[magseries]]][constant[plot]]
variable[time0] assign[=] call[call[call[name[cpdict]][constant[magseries]]][constant[times]].min, parameter[]]
variable[magseries_ndet] assign[=] call[call[name[cpdict]][constant[magseries]]][constant[times]].size
if compare[constant[status] in name[cpdict]] begin[:]
variable[cpstatus] assign[=] call[name[cpdict]][constant[status]]
if compare[constant[uifilters] in name[cpdict]] begin[:]
variable[uifilters] assign[=] call[name[cpdict]][constant[uifilters]]
variable[resultdict] assign[=] dictionary[[<ast.Constant object at 0x7da18bc72b30>, <ast.Constant object at 0x7da18bc723b0>, <ast.Constant object at 0x7da18bc73280>, <ast.Constant object at 0x7da18bc70ee0>], [<ast.Constant object at 0x7da18bc711e0>, <ast.BinOp object at 0x7da18bc72b60>, <ast.Attribute object at 0x7da18bc73ee0>, <ast.Dict object at 0x7da18bc714b0>]]
for taget[name[key]] in starred[call[call[name[resultdict]][constant[result]]][constant[objectinfo]]] begin[:]
if <ast.BoolOp object at 0x7da18bc738e0> begin[:]
call[call[call[name[resultdict]][constant[result]]][constant[objectinfo]]][name[key]] assign[=] constant[None]
for taget[name[key]] in starred[call[call[name[resultdict]][constant[result]]][constant[varinfo]]] begin[:]
if <ast.BoolOp object at 0x7da204566020> begin[:]
call[call[call[name[resultdict]][constant[result]]][constant[varinfo]]][name[key]] assign[=] constant[None]
if <ast.BoolOp object at 0x7da204564700> begin[:]
for taget[name[key]] in starred[call[call[call[name[resultdict]][constant[result]]][constant[varinfo]]][constant[features]]] begin[:]
if <ast.BoolOp object at 0x7da204564a90> begin[:]
call[call[call[call[name[resultdict]][constant[result]]][constant[varinfo]]][constant[features]]][name[key]] assign[=] constant[None]
for taget[name[key]] in starred[name[pfmethods]] begin[:]
variable[periodogram] assign[=] call[call[name[cpdict]][name[key]]][constant[periodogram]]
if <ast.BoolOp object at 0x7da2045665f0> begin[:]
variable[phasedlc0plot] assign[=] call[call[call[name[cpdict]][name[key]]][constant[0]]][constant[plot]]
variable[phasedlc0period] assign[=] call[name[float], parameter[call[call[call[name[cpdict]][name[key]]][constant[0]]][constant[period]]]]
variable[phasedlc0epoch] assign[=] call[name[float], parameter[call[call[call[name[cpdict]][name[key]]][constant[0]]][constant[epoch]]]]
if <ast.BoolOp object at 0x7da204565900> begin[:]
variable[phasedlc0fit] assign[=] dictionary[[<ast.Constant object at 0x7da18bcca350>, <ast.Constant object at 0x7da18bcc97e0>, <ast.Constant object at 0x7da18bccb0a0>, <ast.Constant object at 0x7da18bcc8610>], [<ast.Subscript object at 0x7da18bccb6a0>, <ast.Subscript object at 0x7da18bccb9d0>, <ast.Subscript object at 0x7da18bcc90f0>, <ast.IfExp object at 0x7da18bcc8910>]]
if <ast.BoolOp object at 0x7da18bcc8d90> begin[:]
variable[phasedlc1plot] assign[=] call[call[call[name[cpdict]][name[key]]][constant[1]]][constant[plot]]
variable[phasedlc1period] assign[=] call[name[float], parameter[call[call[call[name[cpdict]][name[key]]][constant[1]]][constant[period]]]]
variable[phasedlc1epoch] assign[=] call[name[float], parameter[call[call[call[name[cpdict]][name[key]]][constant[1]]][constant[epoch]]]]
if <ast.BoolOp object at 0x7da18bccae30> begin[:]
variable[phasedlc1fit] assign[=] dictionary[[<ast.Constant object at 0x7da18bccaf20>, <ast.Constant object at 0x7da18bcc8550>, <ast.Constant object at 0x7da18bcc9330>, <ast.Constant object at 0x7da18bcc9e10>], [<ast.Subscript object at 0x7da18bccb130>, <ast.Subscript object at 0x7da18bcc98d0>, <ast.Subscript object at 0x7da18bcc9b70>, <ast.IfExp object at 0x7da18bcc9930>]]
if <ast.BoolOp object at 0x7da18bcc9ea0> begin[:]
variable[phasedlc2plot] assign[=] call[call[call[name[cpdict]][name[key]]][constant[2]]][constant[plot]]
variable[phasedlc2period] assign[=] call[name[float], parameter[call[call[call[name[cpdict]][name[key]]][constant[2]]][constant[period]]]]
variable[phasedlc2epoch] assign[=] call[name[float], parameter[call[call[call[name[cpdict]][name[key]]][constant[2]]][constant[epoch]]]]
if <ast.BoolOp object at 0x7da18bcc9780> begin[:]
variable[phasedlc2fit] assign[=] dictionary[[<ast.Constant object at 0x7da18bcca530>, <ast.Constant object at 0x7da18bcca110>, <ast.Constant object at 0x7da18bccb880>, <ast.Constant object at 0x7da18bcca4a0>], [<ast.Subscript object at 0x7da18bcc9120>, <ast.Subscript object at 0x7da18f58e1d0>, <ast.Subscript object at 0x7da18f58ee00>, <ast.IfExp object at 0x7da18f58f8e0>]]
call[call[name[resultdict]][constant[result]]][name[key]] assign[=] dictionary[[<ast.Constant object at 0x7da18f58d180>, <ast.Constant object at 0x7da18f58f610>, <ast.Constant object at 0x7da18f58c1f0>, <ast.Constant object at 0x7da18f58f6d0>, <ast.Constant object at 0x7da18f58d480>, <ast.Constant object at 0x7da18f58cc70>], [<ast.Subscript object at 0x7da18f58cd60>, <ast.Name object at 0x7da18f58fd00>, <ast.Subscript object at 0x7da18f58c100>, <ast.Dict object at 0x7da18f58c910>, <ast.Dict object at 0x7da18f58dcf0>, <ast.Dict object at 0x7da18f58e830>]]
call[name[self].write, parameter[name[resultdict]]]
call[name[self].finish, parameter[]] | keyword[def] identifier[get] ( identifier[self] , identifier[checkplotfname] ):
literal[string]
keyword[if] identifier[checkplotfname] :
identifier[self] . identifier[checkplotfname] = identifier[xhtml_escape] (
identifier[base64] . identifier[b64decode] ( identifier[url_unescape] ( identifier[checkplotfname] ))
)
keyword[if] identifier[self] . identifier[checkplotfname] keyword[in] identifier[self] . identifier[currentproject] [ literal[string] ]:
identifier[cpfpath] = identifier[os] . identifier[path] . identifier[join] (
identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[self] . identifier[cplistfile] )),
identifier[self] . identifier[checkplotfname]
)
identifier[LOGGER] . identifier[info] ( literal[string] % identifier[cpfpath] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[cpfpath] ):
identifier[msg] = literal[string] % identifier[cpfpath]
identifier[LOGGER] . identifier[error] ( identifier[msg] )
identifier[resultdict] ={ literal[string] : literal[string] ,
literal[string] : identifier[msg] ,
literal[string] : keyword[None] }
identifier[self] . identifier[write] ( identifier[resultdict] )
keyword[raise] identifier[tornado] . identifier[web] . identifier[Finish] ()
identifier[cpdict] = keyword[yield] identifier[self] . identifier[executor] . identifier[submit] (
identifier[_read_checkplot_picklefile] , identifier[cpfpath]
)
identifier[LOGGER] . identifier[info] ( literal[string] % identifier[cpfpath] )
identifier[objectid] = identifier[cpdict] [ literal[string] ]
identifier[objectinfo] = identifier[cpdict] [ literal[string] ]
identifier[varinfo] = identifier[cpdict] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[cpdict] :
identifier[pfmethods] = identifier[cpdict] [ literal[string] ]
keyword[else] :
identifier[pfmethods] =[]
keyword[for] identifier[pfm] keyword[in] identifier[PFMETHODS] :
keyword[if] identifier[pfm] keyword[in] identifier[cpdict] :
identifier[pfmethods] . identifier[append] ( identifier[pfm] )
identifier[neighbors] =[]
keyword[if] ( literal[string] keyword[in] identifier[cpdict] keyword[and]
identifier[cpdict] [ literal[string] ] keyword[is] keyword[not] keyword[None] keyword[and]
identifier[len] ( identifier[cpdict] [ literal[string] ]))> literal[int] :
identifier[nbrlist] = identifier[cpdict] [ literal[string] ]
keyword[for] identifier[nbr] keyword[in] identifier[nbrlist] :
keyword[if] literal[string] keyword[in] identifier[nbr] :
identifier[nbrmagdiffs] = identifier[nbr] [ literal[string] ]
keyword[else] :
identifier[nbrmagdiffs] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[nbr] :
identifier[nbrcolordiffs] = identifier[nbr] [ literal[string] ]
keyword[else] :
identifier[nbrcolordiffs] = keyword[None]
identifier[thisnbrdict] ={
literal[string] : identifier[nbr] [ literal[string] ],
literal[string] :{
literal[string] : identifier[nbr] [ literal[string] ],
literal[string] : identifier[nbr] [ literal[string] ],
literal[string] : identifier[nbr] [ literal[string] ],
literal[string] : identifier[nbr] [ literal[string] ],
literal[string] : identifier[nbr] [ literal[string] ],
literal[string] : identifier[nbrmagdiffs] ,
literal[string] : identifier[nbrcolordiffs]
}
}
keyword[try] :
identifier[nbr_magseries] = identifier[nbr] [ literal[string] ][ literal[string] ]
identifier[thisnbrdict] [ literal[string] ]= identifier[nbr_magseries]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[LOGGER] . identifier[error] (
literal[string]
literal[string]
%( identifier[nbr] [ literal[string] ],
identifier[cpdict] [ literal[string] ])
)
keyword[try] :
keyword[for] identifier[pfm] keyword[in] identifier[pfmethods] :
keyword[if] identifier[pfm] keyword[in] identifier[nbr] :
identifier[thisnbrdict] [ identifier[pfm] ]={
literal[string] : identifier[nbr] [ identifier[pfm] ][ literal[int] ][ literal[string] ],
literal[string] : identifier[nbr] [ identifier[pfm] ][ literal[int] ][ literal[string] ],
literal[string] : identifier[nbr] [ identifier[pfm] ][ literal[int] ][ literal[string] ]
}
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[LOGGER] . identifier[error] (
literal[string]
literal[string]
%( identifier[nbr] [ literal[string] ],
identifier[cpdict] [ literal[string] ])
)
identifier[neighbors] . identifier[append] ( identifier[thisnbrdict] )
keyword[if] literal[string] keyword[in] identifier[cpdict] :
identifier[objectcomments] = identifier[cpdict] [ literal[string] ]
keyword[else] :
identifier[objectcomments] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[cpdict] :
identifier[objectxmatch] = identifier[cpdict] [ literal[string] ]
keyword[for] identifier[xmcat] keyword[in] identifier[objectxmatch] :
keyword[if] identifier[isinstance] ( identifier[objectxmatch] [ identifier[xmcat] ][ literal[string] ], identifier[dict] ):
identifier[xminfo] = identifier[objectxmatch] [ identifier[xmcat] ][ literal[string] ]
keyword[for] identifier[xmek] keyword[in] identifier[xminfo] :
keyword[if] ( identifier[isinstance] ( identifier[xminfo] [ identifier[xmek] ], identifier[float] ) keyword[and]
( keyword[not] identifier[np] . identifier[isfinite] ( identifier[xminfo] [ identifier[xmek] ]))):
identifier[xminfo] [ identifier[xmek] ]= keyword[None]
keyword[else] :
identifier[objectxmatch] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[cpdict] :
identifier[colormagdiagram] = identifier[cpdict] [ literal[string] ]
keyword[else] :
identifier[colormagdiagram] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[cpdict] :
identifier[finderchart] = identifier[cpdict] [ literal[string] ]
keyword[else] :
identifier[finderchart] = keyword[None]
keyword[if] ( literal[string] keyword[in] identifier[cpdict] keyword[and]
identifier[isinstance] ( identifier[cpdict] [ literal[string] ], identifier[dict] ) keyword[and]
literal[string] keyword[in] identifier[cpdict] [ literal[string] ]):
identifier[magseries] = identifier[cpdict] [ literal[string] ][ literal[string] ]
identifier[time0] = identifier[cpdict] [ literal[string] ][ literal[string] ]. identifier[min] ()
identifier[magseries_ndet] = identifier[cpdict] [ literal[string] ][ literal[string] ]. identifier[size]
keyword[else] :
identifier[magseries] = keyword[None]
identifier[time0] = literal[int]
identifier[magseries_ndet] = literal[int]
identifier[LOGGER] . identifier[warning] (
literal[string]
literal[string]
)
keyword[if] literal[string] keyword[in] identifier[cpdict] :
identifier[cpstatus] = identifier[cpdict] [ literal[string] ]
keyword[else] :
identifier[cpstatus] = literal[string]
keyword[if] literal[string] keyword[in] identifier[cpdict] :
identifier[uifilters] = identifier[cpdict] [ literal[string] ]
keyword[else] :
identifier[uifilters] ={ literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] }
identifier[resultdict] ={
literal[string] : literal[string] ,
literal[string] : literal[string] % identifier[self] . identifier[checkplotfname] ,
literal[string] : identifier[self] . identifier[readonly] ,
literal[string] :{
literal[string] : literal[string] % identifier[time0] ,
literal[string] : identifier[objectid] ,
literal[string] : identifier[objectinfo] ,
literal[string] : identifier[colormagdiagram] ,
literal[string] : identifier[objectcomments] ,
literal[string] : identifier[varinfo] ,
literal[string] : identifier[uifilters] ,
literal[string] : identifier[neighbors] ,
literal[string] : identifier[objectxmatch] ,
literal[string] : identifier[finderchart] ,
literal[string] : identifier[magseries] ,
literal[string] : identifier[magseries_ndet] ,
literal[string] : identifier[cpstatus] ,
literal[string] : identifier[pfmethods]
}
}
keyword[for] identifier[key] keyword[in] identifier[resultdict] [ literal[string] ][ literal[string] ]:
keyword[if] ( identifier[isinstance] ( identifier[resultdict] [ literal[string] ][ literal[string] ][ identifier[key] ],
( identifier[float] , identifier[np] . identifier[float64] , identifier[np] . identifier[float_] )) keyword[and]
( keyword[not] identifier[np] . identifier[isfinite] ( identifier[resultdict] [ literal[string] ][
literal[string]
][ identifier[key] ]))):
identifier[resultdict] [ literal[string] ][ literal[string] ][ identifier[key] ]= keyword[None]
keyword[elif] ( identifier[isinstance] ( identifier[resultdict] [ literal[string] ][ literal[string] ][ identifier[key] ],
identifier[ndarray] )):
identifier[thisval] = identifier[resultdict] [ literal[string] ][ literal[string] ][ identifier[key] ]
identifier[thisval] = identifier[thisval] . identifier[tolist] ()
keyword[for] identifier[i] , identifier[v] keyword[in] identifier[enumerate] ( identifier[thisval] ):
keyword[if] ( identifier[isinstance] ( identifier[v] ,( identifier[float] , identifier[np] . identifier[float64] , identifier[np] . identifier[float_] )) keyword[and]
( keyword[not] ( identifier[np] . identifier[isfinite] ( identifier[v] )))):
identifier[thisval] [ identifier[i] ]= keyword[None]
identifier[resultdict] [ literal[string] ][ literal[string] ][ identifier[key] ]= identifier[thisval]
keyword[for] identifier[key] keyword[in] identifier[resultdict] [ literal[string] ][ literal[string] ]:
keyword[if] ( identifier[isinstance] (
identifier[resultdict] [ literal[string] ][ literal[string] ][ identifier[key] ],
( identifier[float] , identifier[np] . identifier[float64] , identifier[np] . identifier[float_] )) keyword[and]
( keyword[not] identifier[np] . identifier[isfinite] (
identifier[resultdict] [ literal[string] ][ literal[string] ][ identifier[key] ]
))):
identifier[resultdict] [ literal[string] ][ literal[string] ][ identifier[key] ]= keyword[None]
keyword[elif] ( identifier[isinstance] (
identifier[resultdict] [ literal[string] ][ literal[string] ][ identifier[key] ],
identifier[ndarray] )):
identifier[thisval] =(
identifier[resultdict] [ literal[string] ][ literal[string] ][ identifier[key] ]
)
identifier[thisval] = identifier[thisval] . identifier[tolist] ()
keyword[for] identifier[i] , identifier[v] keyword[in] identifier[enumerate] ( identifier[thisval] ):
keyword[if] ( identifier[isinstance] ( identifier[v] ,( identifier[float] , identifier[np] . identifier[float64] , identifier[np] . identifier[float_] )) keyword[and]
( keyword[not] ( identifier[np] . identifier[isfinite] ( identifier[v] )))):
identifier[thisval] [ identifier[i] ]= keyword[None]
identifier[resultdict] [ literal[string] ][ literal[string] ][ identifier[key] ]=(
identifier[thisval]
)
keyword[if] ( literal[string] keyword[in] identifier[resultdict] [ literal[string] ][ literal[string] ] keyword[and]
identifier[isinstance] ( identifier[resultdict] [ literal[string] ][ literal[string] ][ literal[string] ],
identifier[dict] )):
keyword[for] identifier[key] keyword[in] identifier[resultdict] [ literal[string] ][ literal[string] ][ literal[string] ]:
keyword[if] ( identifier[isinstance] (
identifier[resultdict] [
literal[string]
][ literal[string] ][ literal[string] ][ identifier[key] ],
( identifier[float] , identifier[np] . identifier[float64] , identifier[np] . identifier[float_] )) keyword[and]
( keyword[not] identifier[np] . identifier[isfinite] (
identifier[resultdict] [
literal[string]
][ literal[string] ][ literal[string] ][ identifier[key] ]))):
identifier[resultdict] [
literal[string]
][ literal[string] ][ literal[string] ][ identifier[key] ]= keyword[None]
keyword[elif] ( identifier[isinstance] (
identifier[resultdict] [
literal[string]
][ literal[string] ][ literal[string] ][ identifier[key] ],
identifier[ndarray] )):
identifier[thisval] =(
identifier[resultdict] [ literal[string] ][ literal[string] ][ literal[string] ][ identifier[key] ]
)
identifier[thisval] = identifier[thisval] . identifier[tolist] ()
keyword[for] identifier[i] , identifier[v] keyword[in] identifier[enumerate] ( identifier[thisval] ):
keyword[if] ( identifier[isinstance] ( identifier[v] ,( identifier[float] ,
identifier[np] . identifier[float64] ,
identifier[np] . identifier[float_] )) keyword[and]
( keyword[not] ( identifier[np] . identifier[isfinite] ( identifier[v] )))):
identifier[thisval] [ identifier[i] ]= keyword[None]
identifier[resultdict] [ literal[string] ][ literal[string] ][ literal[string] ][ identifier[key] ]=(
identifier[thisval]
)
keyword[for] identifier[key] keyword[in] identifier[pfmethods] :
identifier[periodogram] = identifier[cpdict] [ identifier[key] ][ literal[string] ]
keyword[if] literal[int] keyword[in] identifier[cpdict] [ identifier[key] ] keyword[and] identifier[isinstance] ( identifier[cpdict] [ identifier[key] ][ literal[int] ], identifier[dict] ):
identifier[phasedlc0plot] = identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ]
identifier[phasedlc0period] = identifier[float] ( identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ])
identifier[phasedlc0epoch] = identifier[float] ( identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ])
keyword[else] :
identifier[phasedlc0plot] = keyword[None]
identifier[phasedlc0period] = keyword[None]
identifier[phasedlc0epoch] = keyword[None]
keyword[if] ( literal[int] keyword[in] identifier[cpdict] [ identifier[key] ] keyword[and]
identifier[isinstance] ( identifier[cpdict] [ identifier[key] ][ literal[int] ], identifier[dict] ) keyword[and]
literal[string] keyword[in] identifier[cpdict] [ identifier[key] ][ literal[int] ] keyword[and]
identifier[isinstance] ( identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ], identifier[dict] )):
identifier[phasedlc0fit] ={
literal[string] :(
identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ][ literal[string] ]
),
literal[string] :(
identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ][ literal[string] ]
),
literal[string] :(
identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ][ literal[string] ]
),
literal[string] :(
identifier[cpdict] [ identifier[key] ][ literal[int] ][
literal[string]
][ literal[string] ][ literal[string] ] keyword[if]
literal[string] keyword[in]
identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ][ literal[string] ] keyword[else] keyword[None]
)
}
keyword[else] :
identifier[phasedlc0fit] = keyword[None]
keyword[if] literal[int] keyword[in] identifier[cpdict] [ identifier[key] ] keyword[and] identifier[isinstance] ( identifier[cpdict] [ identifier[key] ][ literal[int] ], identifier[dict] ):
identifier[phasedlc1plot] = identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ]
identifier[phasedlc1period] = identifier[float] ( identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ])
identifier[phasedlc1epoch] = identifier[float] ( identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ])
keyword[else] :
identifier[phasedlc1plot] = keyword[None]
identifier[phasedlc1period] = keyword[None]
identifier[phasedlc1epoch] = keyword[None]
keyword[if] ( literal[int] keyword[in] identifier[cpdict] [ identifier[key] ] keyword[and]
identifier[isinstance] ( identifier[cpdict] [ identifier[key] ][ literal[int] ], identifier[dict] ) keyword[and]
literal[string] keyword[in] identifier[cpdict] [ identifier[key] ][ literal[int] ] keyword[and]
identifier[isinstance] ( identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ], identifier[dict] )):
identifier[phasedlc1fit] ={
literal[string] :(
identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ][ literal[string] ]
),
literal[string] :(
identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ][ literal[string] ]
),
literal[string] :(
identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ][ literal[string] ]
),
literal[string] :(
identifier[cpdict] [ identifier[key] ][ literal[int] ][
literal[string]
][ literal[string] ][ literal[string] ] keyword[if]
literal[string] keyword[in]
identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ][ literal[string] ] keyword[else] keyword[None]
)
}
keyword[else] :
identifier[phasedlc1fit] = keyword[None]
keyword[if] literal[int] keyword[in] identifier[cpdict] [ identifier[key] ] keyword[and] identifier[isinstance] ( identifier[cpdict] [ identifier[key] ][ literal[int] ], identifier[dict] ):
identifier[phasedlc2plot] = identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ]
identifier[phasedlc2period] = identifier[float] ( identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ])
identifier[phasedlc2epoch] = identifier[float] ( identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ])
keyword[else] :
identifier[phasedlc2plot] = keyword[None]
identifier[phasedlc2period] = keyword[None]
identifier[phasedlc2epoch] = keyword[None]
keyword[if] ( literal[int] keyword[in] identifier[cpdict] [ identifier[key] ] keyword[and]
identifier[isinstance] ( identifier[cpdict] [ identifier[key] ][ literal[int] ], identifier[dict] ) keyword[and]
literal[string] keyword[in] identifier[cpdict] [ identifier[key] ][ literal[int] ] keyword[and]
identifier[isinstance] ( identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ], identifier[dict] )):
identifier[phasedlc2fit] ={
literal[string] :(
identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ][ literal[string] ]
),
literal[string] :(
identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ][ literal[string] ]
),
literal[string] :(
identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ][ literal[string] ]
),
literal[string] :(
identifier[cpdict] [ identifier[key] ][ literal[int] ][
literal[string]
][ literal[string] ][ literal[string] ] keyword[if]
literal[string] keyword[in]
identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ][ literal[string] ] keyword[else] keyword[None]
)
}
keyword[else] :
identifier[phasedlc2fit] = keyword[None]
identifier[resultdict] [ literal[string] ][ identifier[key] ]={
literal[string] : identifier[cpdict] [ identifier[key] ][ literal[string] ],
literal[string] : identifier[periodogram] ,
literal[string] : identifier[cpdict] [ identifier[key] ][ literal[string] ],
literal[string] :{
literal[string] : identifier[phasedlc0plot] ,
literal[string] : identifier[phasedlc0period] ,
literal[string] : identifier[phasedlc0epoch] ,
literal[string] : identifier[phasedlc0fit] ,
},
literal[string] :{
literal[string] : identifier[phasedlc1plot] ,
literal[string] : identifier[phasedlc1period] ,
literal[string] : identifier[phasedlc1epoch] ,
literal[string] : identifier[phasedlc1fit] ,
},
literal[string] :{
literal[string] : identifier[phasedlc2plot] ,
literal[string] : identifier[phasedlc2period] ,
literal[string] : identifier[phasedlc2epoch] ,
literal[string] : identifier[phasedlc2fit] ,
},
}
identifier[self] . identifier[write] ( identifier[resultdict] )
identifier[self] . identifier[finish] ()
keyword[else] :
identifier[LOGGER] . identifier[error] ( literal[string] % identifier[self] . identifier[checkplotfname] )
identifier[resultdict] ={ literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : identifier[self] . identifier[readonly] ,
literal[string] : keyword[None] }
identifier[self] . identifier[write] ( identifier[resultdict] )
identifier[self] . identifier[finish] ()
keyword[else] :
identifier[resultdict] ={ literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : identifier[self] . identifier[readonly] ,
literal[string] : keyword[None] }
identifier[self] . identifier[write] ( identifier[resultdict] ) | def get(self, checkplotfname):
"""This handles GET requests to serve a specific checkplot pickle.
This is an AJAX endpoint; returns JSON that gets converted by the
frontend into things to render.
"""
if checkplotfname:
# do the usual safing
self.checkplotfname = xhtml_escape(base64.b64decode(url_unescape(checkplotfname)))
# see if this plot is in the current project
if self.checkplotfname in self.currentproject['checkplots']:
# make sure this file exists
cpfpath = os.path.join(os.path.abspath(os.path.dirname(self.cplistfile)), self.checkplotfname)
LOGGER.info('loading %s...' % cpfpath)
if not os.path.exists(cpfpath):
msg = "couldn't find checkplot %s" % cpfpath
LOGGER.error(msg)
resultdict = {'status': 'error', 'message': msg, 'result': None}
self.write(resultdict)
raise tornado.web.Finish() # depends on [control=['if'], data=[]]
# this is the async call to the executor
cpdict = (yield self.executor.submit(_read_checkplot_picklefile, cpfpath))
#####################################
## continue after we're good to go ##
#####################################
LOGGER.info('loaded %s' % cpfpath)
# break out the initial info
objectid = cpdict['objectid']
objectinfo = cpdict['objectinfo']
varinfo = cpdict['varinfo']
if 'pfmethods' in cpdict:
pfmethods = cpdict['pfmethods'] # depends on [control=['if'], data=['cpdict']]
else:
pfmethods = []
for pfm in PFMETHODS:
if pfm in cpdict:
pfmethods.append(pfm) # depends on [control=['if'], data=['pfm']] # depends on [control=['for'], data=['pfm']]
# handle neighbors for this object
neighbors = []
if ('neighbors' in cpdict and cpdict['neighbors'] is not None and len(cpdict['neighbors'])) > 0:
nbrlist = cpdict['neighbors']
# get each neighbor, its info, and its phased LCs
for nbr in nbrlist:
if 'magdiffs' in nbr:
nbrmagdiffs = nbr['magdiffs'] # depends on [control=['if'], data=['nbr']]
else:
nbrmagdiffs = None
if 'colordiffs' in nbr:
nbrcolordiffs = nbr['colordiffs'] # depends on [control=['if'], data=['nbr']]
else:
nbrcolordiffs = None
thisnbrdict = {'objectid': nbr['objectid'], 'objectinfo': {'ra': nbr['ra'], 'decl': nbr['decl'], 'xpix': nbr['xpix'], 'ypix': nbr['ypix'], 'distarcsec': nbr['dist'], 'magdiffs': nbrmagdiffs, 'colordiffs': nbrcolordiffs}}
try:
nbr_magseries = nbr['magseries']['plot']
thisnbrdict['magseries'] = nbr_magseries # depends on [control=['try'], data=[]]
except Exception as e:
LOGGER.error('could not load magseries plot for neighbor %s for object %s' % (nbr['objectid'], cpdict['objectid'])) # depends on [control=['except'], data=[]]
try:
for pfm in pfmethods:
if pfm in nbr:
thisnbrdict[pfm] = {'plot': nbr[pfm][0]['plot'], 'period': nbr[pfm][0]['period'], 'epoch': nbr[pfm][0]['epoch']} # depends on [control=['if'], data=['pfm', 'nbr']] # depends on [control=['for'], data=['pfm']] # depends on [control=['try'], data=[]]
except Exception as e:
LOGGER.error('could not load phased LC plots for neighbor %s for object %s' % (nbr['objectid'], cpdict['objectid'])) # depends on [control=['except'], data=[]]
neighbors.append(thisnbrdict) # depends on [control=['for'], data=['nbr']] # depends on [control=['if'], data=[]]
# load object comments
if 'comments' in cpdict:
objectcomments = cpdict['comments'] # depends on [control=['if'], data=['cpdict']]
else:
objectcomments = None
# load the xmatch results, if any
if 'xmatch' in cpdict:
objectxmatch = cpdict['xmatch']
# get rid of those pesky nans
for xmcat in objectxmatch:
if isinstance(objectxmatch[xmcat]['info'], dict):
xminfo = objectxmatch[xmcat]['info']
for xmek in xminfo:
if isinstance(xminfo[xmek], float) and (not np.isfinite(xminfo[xmek])):
xminfo[xmek] = None # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['xmek']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['xmcat']] # depends on [control=['if'], data=['cpdict']]
else:
objectxmatch = None
# load the colormagdiagram object
if 'colormagdiagram' in cpdict:
colormagdiagram = cpdict['colormagdiagram'] # depends on [control=['if'], data=['cpdict']]
else:
colormagdiagram = None
# these are base64 which can be provided directly to JS to
# generate images (neat!)
if 'finderchart' in cpdict:
finderchart = cpdict['finderchart'] # depends on [control=['if'], data=['cpdict']]
else:
finderchart = None
if 'magseries' in cpdict and isinstance(cpdict['magseries'], dict) and ('plot' in cpdict['magseries']):
magseries = cpdict['magseries']['plot']
time0 = cpdict['magseries']['times'].min()
magseries_ndet = cpdict['magseries']['times'].size # depends on [control=['if'], data=[]]
else:
magseries = None
time0 = 0.0
magseries_ndet = 0
LOGGER.warning("no 'magseries' key present in this checkplot, some plots may be broken...")
if 'status' in cpdict:
cpstatus = cpdict['status'] # depends on [control=['if'], data=['cpdict']]
else:
cpstatus = 'unknown, possibly incomplete checkplot'
# load the uifilters if present
if 'uifilters' in cpdict:
uifilters = cpdict['uifilters'] # depends on [control=['if'], data=['cpdict']]
else:
uifilters = {'psearch_magfilters': None, 'psearch_sigclip': None, 'psearch_timefilters': None}
# FIXME: add in other stuff required by the frontend
# - signals
# FIXME: the frontend should load these other things as well
# into the various elems on the period-search-tools and
# variability-tools tabs
# this is the initial dict
# fallback in case objectinfo doesn't have ndet
resultdict = {'status': 'ok', 'message': 'found checkplot %s' % self.checkplotfname, 'readonly': self.readonly, 'result': {'time0': '%.3f' % time0, 'objectid': objectid, 'objectinfo': objectinfo, 'colormagdiagram': colormagdiagram, 'objectcomments': objectcomments, 'varinfo': varinfo, 'uifilters': uifilters, 'neighbors': neighbors, 'xmatch': objectxmatch, 'finderchart': finderchart, 'magseries': magseries, 'magseries_ndet': magseries_ndet, 'cpstatus': cpstatus, 'pfmethods': pfmethods}}
# make sure to replace nans with Nones. frontend JS absolutely
# hates NaNs and for some reason, the JSON encoder defined at
# the top of this file doesn't deal with them even though it
# should
for key in resultdict['result']['objectinfo']:
if isinstance(resultdict['result']['objectinfo'][key], (float, np.float64, np.float_)) and (not np.isfinite(resultdict['result']['objectinfo'][key])):
resultdict['result']['objectinfo'][key] = None # depends on [control=['if'], data=[]]
elif isinstance(resultdict['result']['objectinfo'][key], ndarray):
thisval = resultdict['result']['objectinfo'][key]
thisval = thisval.tolist()
for (i, v) in enumerate(thisval):
if isinstance(v, (float, np.float64, np.float_)) and (not np.isfinite(v)):
thisval[i] = None # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
resultdict['result']['objectinfo'][key] = thisval # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']]
# remove nans from varinfo itself
for key in resultdict['result']['varinfo']:
if isinstance(resultdict['result']['varinfo'][key], (float, np.float64, np.float_)) and (not np.isfinite(resultdict['result']['varinfo'][key])):
resultdict['result']['varinfo'][key] = None # depends on [control=['if'], data=[]]
elif isinstance(resultdict['result']['varinfo'][key], ndarray):
thisval = resultdict['result']['varinfo'][key]
thisval = thisval.tolist()
for (i, v) in enumerate(thisval):
if isinstance(v, (float, np.float64, np.float_)) and (not np.isfinite(v)):
thisval[i] = None # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
resultdict['result']['varinfo'][key] = thisval # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']]
# remove nans from varinfo['features']
if 'features' in resultdict['result']['varinfo'] and isinstance(resultdict['result']['varinfo']['features'], dict):
for key in resultdict['result']['varinfo']['features']:
if isinstance(resultdict['result']['varinfo']['features'][key], (float, np.float64, np.float_)) and (not np.isfinite(resultdict['result']['varinfo']['features'][key])):
resultdict['result']['varinfo']['features'][key] = None # depends on [control=['if'], data=[]]
elif isinstance(resultdict['result']['varinfo']['features'][key], ndarray):
thisval = resultdict['result']['varinfo']['features'][key]
thisval = thisval.tolist()
for (i, v) in enumerate(thisval):
if isinstance(v, (float, np.float64, np.float_)) and (not np.isfinite(v)):
thisval[i] = None # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
resultdict['result']['varinfo']['features'][key] = thisval # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] # depends on [control=['if'], data=[]]
# now get the periodograms and phased LCs
for key in pfmethods:
# get the periodogram for this method
periodogram = cpdict[key]['periodogram']
# get the phased LC with best period
if 0 in cpdict[key] and isinstance(cpdict[key][0], dict):
phasedlc0plot = cpdict[key][0]['plot']
phasedlc0period = float(cpdict[key][0]['period'])
phasedlc0epoch = float(cpdict[key][0]['epoch']) # depends on [control=['if'], data=[]]
else:
phasedlc0plot = None
phasedlc0period = None
phasedlc0epoch = None
# get the associated fitinfo for this period if it
# exists
if 0 in cpdict[key] and isinstance(cpdict[key][0], dict) and ('lcfit' in cpdict[key][0]) and isinstance(cpdict[key][0]['lcfit'], dict):
phasedlc0fit = {'method': cpdict[key][0]['lcfit']['fittype'], 'redchisq': cpdict[key][0]['lcfit']['fitredchisq'], 'chisq': cpdict[key][0]['lcfit']['fitchisq'], 'params': cpdict[key][0]['lcfit']['fitinfo']['finalparams'] if 'finalparams' in cpdict[key][0]['lcfit']['fitinfo'] else None} # depends on [control=['if'], data=[]]
else:
phasedlc0fit = None
# get the phased LC with 2nd best period
if 1 in cpdict[key] and isinstance(cpdict[key][1], dict):
phasedlc1plot = cpdict[key][1]['plot']
phasedlc1period = float(cpdict[key][1]['period'])
phasedlc1epoch = float(cpdict[key][1]['epoch']) # depends on [control=['if'], data=[]]
else:
phasedlc1plot = None
phasedlc1period = None
phasedlc1epoch = None
# get the associated fitinfo for this period if it
# exists
if 1 in cpdict[key] and isinstance(cpdict[key][1], dict) and ('lcfit' in cpdict[key][1]) and isinstance(cpdict[key][1]['lcfit'], dict):
phasedlc1fit = {'method': cpdict[key][1]['lcfit']['fittype'], 'redchisq': cpdict[key][1]['lcfit']['fitredchisq'], 'chisq': cpdict[key][1]['lcfit']['fitchisq'], 'params': cpdict[key][1]['lcfit']['fitinfo']['finalparams'] if 'finalparams' in cpdict[key][1]['lcfit']['fitinfo'] else None} # depends on [control=['if'], data=[]]
else:
phasedlc1fit = None
# get the phased LC with 3rd best period
if 2 in cpdict[key] and isinstance(cpdict[key][2], dict):
phasedlc2plot = cpdict[key][2]['plot']
phasedlc2period = float(cpdict[key][2]['period'])
phasedlc2epoch = float(cpdict[key][2]['epoch']) # depends on [control=['if'], data=[]]
else:
phasedlc2plot = None
phasedlc2period = None
phasedlc2epoch = None
# get the associated fitinfo for this period if it
# exists
if 2 in cpdict[key] and isinstance(cpdict[key][2], dict) and ('lcfit' in cpdict[key][2]) and isinstance(cpdict[key][2]['lcfit'], dict):
phasedlc2fit = {'method': cpdict[key][2]['lcfit']['fittype'], 'redchisq': cpdict[key][2]['lcfit']['fitredchisq'], 'chisq': cpdict[key][2]['lcfit']['fitchisq'], 'params': cpdict[key][2]['lcfit']['fitinfo']['finalparams'] if 'finalparams' in cpdict[key][2]['lcfit']['fitinfo'] else None} # depends on [control=['if'], data=[]]
else:
phasedlc2fit = None
resultdict['result'][key] = {'nbestperiods': cpdict[key]['nbestperiods'], 'periodogram': periodogram, 'bestperiod': cpdict[key]['bestperiod'], 'phasedlc0': {'plot': phasedlc0plot, 'period': phasedlc0period, 'epoch': phasedlc0epoch, 'lcfit': phasedlc0fit}, 'phasedlc1': {'plot': phasedlc1plot, 'period': phasedlc1period, 'epoch': phasedlc1epoch, 'lcfit': phasedlc1fit}, 'phasedlc2': {'plot': phasedlc2plot, 'period': phasedlc2period, 'epoch': phasedlc2epoch, 'lcfit': phasedlc2fit}} # depends on [control=['for'], data=['key']]
#
# end of processing per pfmethod
#
# return the checkplot via JSON
self.write(resultdict)
self.finish() # depends on [control=['if'], data=[]]
else:
LOGGER.error('could not find %s' % self.checkplotfname)
resultdict = {'status': 'error', 'message': "This checkplot doesn't exist.", 'readonly': self.readonly, 'result': None}
self.write(resultdict)
self.finish() # depends on [control=['if'], data=[]]
else:
resultdict = {'status': 'error', 'message': 'No checkplot provided to load.', 'readonly': self.readonly, 'result': None}
self.write(resultdict) |
def update_field(self, f, obj):
""" update a field
:param str f: name of field to be updated.
:param obj: value of field to be updated.
"""
n = self.get_private_name(f)
if not hasattr(self, n):
raise AttributeError('{0} is not in {1}'.format(n, self.__class__.__name__))
setattr(self, n, obj)
self.__origin_keys.add(f) | def function[update_field, parameter[self, f, obj]]:
constant[ update a field
:param str f: name of field to be updated.
:param obj: value of field to be updated.
]
variable[n] assign[=] call[name[self].get_private_name, parameter[name[f]]]
if <ast.UnaryOp object at 0x7da2054a6c20> begin[:]
<ast.Raise object at 0x7da2054a4d00>
call[name[setattr], parameter[name[self], name[n], name[obj]]]
call[name[self].__origin_keys.add, parameter[name[f]]] | keyword[def] identifier[update_field] ( identifier[self] , identifier[f] , identifier[obj] ):
literal[string]
identifier[n] = identifier[self] . identifier[get_private_name] ( identifier[f] )
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , identifier[n] ):
keyword[raise] identifier[AttributeError] ( literal[string] . identifier[format] ( identifier[n] , identifier[self] . identifier[__class__] . identifier[__name__] ))
identifier[setattr] ( identifier[self] , identifier[n] , identifier[obj] )
identifier[self] . identifier[__origin_keys] . identifier[add] ( identifier[f] ) | def update_field(self, f, obj):
""" update a field
:param str f: name of field to be updated.
:param obj: value of field to be updated.
"""
n = self.get_private_name(f)
if not hasattr(self, n):
raise AttributeError('{0} is not in {1}'.format(n, self.__class__.__name__)) # depends on [control=['if'], data=[]]
setattr(self, n, obj)
self.__origin_keys.add(f) |
def _massageData(self, row):
"""
Convert a row into a tuple of Item instances, by slicing it
according to the number of columns for each instance, and then
proceeding as for ItemQuery._massageData.
@param row: an n-tuple, where n is the total number of columns
specified by all the item types in this query.
@return: a tuple of instances of the types specified by this query.
"""
offset = 0
resultBits = []
for i, tableClass in enumerate(self.tableClass):
numAttrs = self.schemaLengths[i]
result = self.store._loadedItem(self.tableClass[i],
row[offset],
row[offset+1:offset+numAttrs])
assert result.store is not None, "result %r has funky store" % (result,)
resultBits.append(result)
offset += numAttrs
return tuple(resultBits) | def function[_massageData, parameter[self, row]]:
constant[
Convert a row into a tuple of Item instances, by slicing it
according to the number of columns for each instance, and then
proceeding as for ItemQuery._massageData.
@param row: an n-tuple, where n is the total number of columns
specified by all the item types in this query.
@return: a tuple of instances of the types specified by this query.
]
variable[offset] assign[=] constant[0]
variable[resultBits] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b0fc4c40>, <ast.Name object at 0x7da1b0fc4fd0>]]] in starred[call[name[enumerate], parameter[name[self].tableClass]]] begin[:]
variable[numAttrs] assign[=] call[name[self].schemaLengths][name[i]]
variable[result] assign[=] call[name[self].store._loadedItem, parameter[call[name[self].tableClass][name[i]], call[name[row]][name[offset]], call[name[row]][<ast.Slice object at 0x7da1b0fc4910>]]]
assert[compare[name[result].store is_not constant[None]]]
call[name[resultBits].append, parameter[name[result]]]
<ast.AugAssign object at 0x7da1b0f0f2e0>
return[call[name[tuple], parameter[name[resultBits]]]] | keyword[def] identifier[_massageData] ( identifier[self] , identifier[row] ):
literal[string]
identifier[offset] = literal[int]
identifier[resultBits] =[]
keyword[for] identifier[i] , identifier[tableClass] keyword[in] identifier[enumerate] ( identifier[self] . identifier[tableClass] ):
identifier[numAttrs] = identifier[self] . identifier[schemaLengths] [ identifier[i] ]
identifier[result] = identifier[self] . identifier[store] . identifier[_loadedItem] ( identifier[self] . identifier[tableClass] [ identifier[i] ],
identifier[row] [ identifier[offset] ],
identifier[row] [ identifier[offset] + literal[int] : identifier[offset] + identifier[numAttrs] ])
keyword[assert] identifier[result] . identifier[store] keyword[is] keyword[not] keyword[None] , literal[string] %( identifier[result] ,)
identifier[resultBits] . identifier[append] ( identifier[result] )
identifier[offset] += identifier[numAttrs]
keyword[return] identifier[tuple] ( identifier[resultBits] ) | def _massageData(self, row):
"""
Convert a row into a tuple of Item instances, by slicing it
according to the number of columns for each instance, and then
proceeding as for ItemQuery._massageData.
@param row: an n-tuple, where n is the total number of columns
specified by all the item types in this query.
@return: a tuple of instances of the types specified by this query.
"""
offset = 0
resultBits = []
for (i, tableClass) in enumerate(self.tableClass):
numAttrs = self.schemaLengths[i]
result = self.store._loadedItem(self.tableClass[i], row[offset], row[offset + 1:offset + numAttrs])
assert result.store is not None, 'result %r has funky store' % (result,)
resultBits.append(result)
offset += numAttrs # depends on [control=['for'], data=[]]
return tuple(resultBits) |
def report( callingClass,
astr_key,
ab_exitToOs=1,
astr_header=""
):
'''
Error handling.
Based on the <astr_key>, error information is extracted from
_dictErr and sent to log object.
If <ab_exitToOs> is False, error is considered non-fatal and
processing can continue, otherwise processing terminates.
'''
log = callingClass.log()
b_syslog = log.syslog()
log.syslog(False)
if ab_exitToOs: log( Colors.RED + "\n:: FATAL ERROR :: " + Colors.NO_COLOUR )
else: log( Colors.YELLOW + "\n:: WARNING :: " + Colors.NO_COLOUR )
if len(astr_header): log( Colors.BROWN + astr_header + Colors.NO_COLOUR )
log( "\n" )
log( "\tSorry, some error seems to have occurred in:\n\t<" )
log( Colors.LIGHT_GREEN + ("%s" % callingClass.name()) + Colors.NO_COLOUR + "::")
log( Colors.LIGHT_CYAN + ("%s" % inspect.stack()[2][4][0].strip()) + Colors.NO_COLOUR)
log( "> called by <")
try:
caller = inspect.stack()[3][4][0].strip()
except:
caller = '__main__'
log( Colors.LIGHT_GREEN + ("%s" % callingClass.name()) + Colors.NO_COLOUR + "::")
log( Colors.LIGHT_CYAN + ("%s" % caller) + Colors.NO_COLOUR)
log( ">\n")
log( "\tWhile %s\n" % callingClass._dictErr[astr_key]['action'] )
log( "\t%s\n" % callingClass._dictErr[astr_key]['error'] )
log( "\n" )
if ab_exitToOs:
log( "Returning to system with error code %d\n" % \
callingClass._dictErr[astr_key]['exitCode'] )
sys.exit( callingClass._dictErr[astr_key]['exitCode'] )
log.syslog(b_syslog)
return callingClass._dictErr[astr_key]['exitCode'] | def function[report, parameter[callingClass, astr_key, ab_exitToOs, astr_header]]:
constant[
Error handling.
Based on the <astr_key>, error information is extracted from
_dictErr and sent to log object.
If <ab_exitToOs> is False, error is considered non-fatal and
processing can continue, otherwise processing terminates.
]
variable[log] assign[=] call[name[callingClass].log, parameter[]]
variable[b_syslog] assign[=] call[name[log].syslog, parameter[]]
call[name[log].syslog, parameter[constant[False]]]
if name[ab_exitToOs] begin[:]
call[name[log], parameter[binary_operation[binary_operation[name[Colors].RED + constant[
:: FATAL ERROR :: ]] + name[Colors].NO_COLOUR]]]
if call[name[len], parameter[name[astr_header]]] begin[:]
call[name[log], parameter[binary_operation[binary_operation[name[Colors].BROWN + name[astr_header]] + name[Colors].NO_COLOUR]]]
call[name[log], parameter[constant[
]]]
call[name[log], parameter[constant[ Sorry, some error seems to have occurred in:
<]]]
call[name[log], parameter[binary_operation[binary_operation[binary_operation[name[Colors].LIGHT_GREEN + binary_operation[constant[%s] <ast.Mod object at 0x7da2590d6920> call[name[callingClass].name, parameter[]]]] + name[Colors].NO_COLOUR] + constant[::]]]]
call[name[log], parameter[binary_operation[binary_operation[name[Colors].LIGHT_CYAN + binary_operation[constant[%s] <ast.Mod object at 0x7da2590d6920> call[call[call[call[call[name[inspect].stack, parameter[]]][constant[2]]][constant[4]]][constant[0]].strip, parameter[]]]] + name[Colors].NO_COLOUR]]]
call[name[log], parameter[constant[> called by <]]]
<ast.Try object at 0x7da1b07bb1f0>
call[name[log], parameter[binary_operation[binary_operation[binary_operation[name[Colors].LIGHT_GREEN + binary_operation[constant[%s] <ast.Mod object at 0x7da2590d6920> call[name[callingClass].name, parameter[]]]] + name[Colors].NO_COLOUR] + constant[::]]]]
call[name[log], parameter[binary_operation[binary_operation[name[Colors].LIGHT_CYAN + binary_operation[constant[%s] <ast.Mod object at 0x7da2590d6920> name[caller]]] + name[Colors].NO_COLOUR]]]
call[name[log], parameter[constant[>
]]]
call[name[log], parameter[binary_operation[constant[ While %s
] <ast.Mod object at 0x7da2590d6920> call[call[name[callingClass]._dictErr][name[astr_key]]][constant[action]]]]]
call[name[log], parameter[binary_operation[constant[ %s
] <ast.Mod object at 0x7da2590d6920> call[call[name[callingClass]._dictErr][name[astr_key]]][constant[error]]]]]
call[name[log], parameter[constant[
]]]
if name[ab_exitToOs] begin[:]
call[name[log], parameter[binary_operation[constant[Returning to system with error code %d
] <ast.Mod object at 0x7da2590d6920> call[call[name[callingClass]._dictErr][name[astr_key]]][constant[exitCode]]]]]
call[name[sys].exit, parameter[call[call[name[callingClass]._dictErr][name[astr_key]]][constant[exitCode]]]]
call[name[log].syslog, parameter[name[b_syslog]]]
return[call[call[name[callingClass]._dictErr][name[astr_key]]][constant[exitCode]]] | keyword[def] identifier[report] ( identifier[callingClass] ,
identifier[astr_key] ,
identifier[ab_exitToOs] = literal[int] ,
identifier[astr_header] = literal[string]
):
literal[string]
identifier[log] = identifier[callingClass] . identifier[log] ()
identifier[b_syslog] = identifier[log] . identifier[syslog] ()
identifier[log] . identifier[syslog] ( keyword[False] )
keyword[if] identifier[ab_exitToOs] : identifier[log] ( identifier[Colors] . identifier[RED] + literal[string] + identifier[Colors] . identifier[NO_COLOUR] )
keyword[else] : identifier[log] ( identifier[Colors] . identifier[YELLOW] + literal[string] + identifier[Colors] . identifier[NO_COLOUR] )
keyword[if] identifier[len] ( identifier[astr_header] ): identifier[log] ( identifier[Colors] . identifier[BROWN] + identifier[astr_header] + identifier[Colors] . identifier[NO_COLOUR] )
identifier[log] ( literal[string] )
identifier[log] ( literal[string] )
identifier[log] ( identifier[Colors] . identifier[LIGHT_GREEN] +( literal[string] % identifier[callingClass] . identifier[name] ())+ identifier[Colors] . identifier[NO_COLOUR] + literal[string] )
identifier[log] ( identifier[Colors] . identifier[LIGHT_CYAN] +( literal[string] % identifier[inspect] . identifier[stack] ()[ literal[int] ][ literal[int] ][ literal[int] ]. identifier[strip] ())+ identifier[Colors] . identifier[NO_COLOUR] )
identifier[log] ( literal[string] )
keyword[try] :
identifier[caller] = identifier[inspect] . identifier[stack] ()[ literal[int] ][ literal[int] ][ literal[int] ]. identifier[strip] ()
keyword[except] :
identifier[caller] = literal[string]
identifier[log] ( identifier[Colors] . identifier[LIGHT_GREEN] +( literal[string] % identifier[callingClass] . identifier[name] ())+ identifier[Colors] . identifier[NO_COLOUR] + literal[string] )
identifier[log] ( identifier[Colors] . identifier[LIGHT_CYAN] +( literal[string] % identifier[caller] )+ identifier[Colors] . identifier[NO_COLOUR] )
identifier[log] ( literal[string] )
identifier[log] ( literal[string] % identifier[callingClass] . identifier[_dictErr] [ identifier[astr_key] ][ literal[string] ])
identifier[log] ( literal[string] % identifier[callingClass] . identifier[_dictErr] [ identifier[astr_key] ][ literal[string] ])
identifier[log] ( literal[string] )
keyword[if] identifier[ab_exitToOs] :
identifier[log] ( literal[string] % identifier[callingClass] . identifier[_dictErr] [ identifier[astr_key] ][ literal[string] ])
identifier[sys] . identifier[exit] ( identifier[callingClass] . identifier[_dictErr] [ identifier[astr_key] ][ literal[string] ])
identifier[log] . identifier[syslog] ( identifier[b_syslog] )
keyword[return] identifier[callingClass] . identifier[_dictErr] [ identifier[astr_key] ][ literal[string] ] | def report(callingClass, astr_key, ab_exitToOs=1, astr_header=''):
"""
Error handling.
Based on the <astr_key>, error information is extracted from
_dictErr and sent to log object.
If <ab_exitToOs> is False, error is considered non-fatal and
processing can continue, otherwise processing terminates.
"""
log = callingClass.log()
b_syslog = log.syslog()
log.syslog(False)
if ab_exitToOs:
log(Colors.RED + '\n:: FATAL ERROR :: ' + Colors.NO_COLOUR) # depends on [control=['if'], data=[]]
else:
log(Colors.YELLOW + '\n:: WARNING :: ' + Colors.NO_COLOUR)
if len(astr_header):
log(Colors.BROWN + astr_header + Colors.NO_COLOUR) # depends on [control=['if'], data=[]]
log('\n')
log('\tSorry, some error seems to have occurred in:\n\t<')
log(Colors.LIGHT_GREEN + '%s' % callingClass.name() + Colors.NO_COLOUR + '::')
log(Colors.LIGHT_CYAN + '%s' % inspect.stack()[2][4][0].strip() + Colors.NO_COLOUR)
log('> called by <')
try:
caller = inspect.stack()[3][4][0].strip() # depends on [control=['try'], data=[]]
except:
caller = '__main__' # depends on [control=['except'], data=[]]
log(Colors.LIGHT_GREEN + '%s' % callingClass.name() + Colors.NO_COLOUR + '::')
log(Colors.LIGHT_CYAN + '%s' % caller + Colors.NO_COLOUR)
log('>\n')
log('\tWhile %s\n' % callingClass._dictErr[astr_key]['action'])
log('\t%s\n' % callingClass._dictErr[astr_key]['error'])
log('\n')
if ab_exitToOs:
log('Returning to system with error code %d\n' % callingClass._dictErr[astr_key]['exitCode'])
sys.exit(callingClass._dictErr[astr_key]['exitCode']) # depends on [control=['if'], data=[]]
log.syslog(b_syslog)
return callingClass._dictErr[astr_key]['exitCode'] |
def pokeStorable(self, storable, objname, obj, container, visited=None, _stack=None, **kwargs):
"""
Arguments:
storable (StorableHandler): storable instance.
objname (any): record reference.
obj (any): object to be serialized.
container (any): container.
visited (dict): map of the previously serialized objects that are
passed by references; keys are the objects' IDs.
_stack (CallStack): stack of parent object names.
Trailing keyword arguments are passed to the :class:`Storable` instance's
:attr:`~Storable.poke`.
"""
#print((objname, storable.storable_type)) # debug
storable.poke(self, objname, obj, container, visited=visited, _stack=_stack, **kwargs)
try:
record = self.getRecord(objname, container)
except KeyError:
# fake storable; silently skip
if self.verbose:
print("skipping `{}` (type: {})".format(objname, storable.storable_type))
if 1 < self.verbose:
print(traceback.format_exc())
else:
self.setRecordAttr('type', storable.storable_type, record)
if storable.version is not None:
self.setRecordAttr('version', from_version(storable.version), record) | def function[pokeStorable, parameter[self, storable, objname, obj, container, visited, _stack]]:
constant[
Arguments:
storable (StorableHandler): storable instance.
objname (any): record reference.
obj (any): object to be serialized.
container (any): container.
visited (dict): map of the previously serialized objects that are
passed by references; keys are the objects' IDs.
_stack (CallStack): stack of parent object names.
Trailing keyword arguments are passed to the :class:`Storable` instance's
:attr:`~Storable.poke`.
]
call[name[storable].poke, parameter[name[self], name[objname], name[obj], name[container]]]
<ast.Try object at 0x7da1b2346dd0> | keyword[def] identifier[pokeStorable] ( identifier[self] , identifier[storable] , identifier[objname] , identifier[obj] , identifier[container] , identifier[visited] = keyword[None] , identifier[_stack] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[storable] . identifier[poke] ( identifier[self] , identifier[objname] , identifier[obj] , identifier[container] , identifier[visited] = identifier[visited] , identifier[_stack] = identifier[_stack] ,** identifier[kwargs] )
keyword[try] :
identifier[record] = identifier[self] . identifier[getRecord] ( identifier[objname] , identifier[container] )
keyword[except] identifier[KeyError] :
keyword[if] identifier[self] . identifier[verbose] :
identifier[print] ( literal[string] . identifier[format] ( identifier[objname] , identifier[storable] . identifier[storable_type] ))
keyword[if] literal[int] < identifier[self] . identifier[verbose] :
identifier[print] ( identifier[traceback] . identifier[format_exc] ())
keyword[else] :
identifier[self] . identifier[setRecordAttr] ( literal[string] , identifier[storable] . identifier[storable_type] , identifier[record] )
keyword[if] identifier[storable] . identifier[version] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[setRecordAttr] ( literal[string] , identifier[from_version] ( identifier[storable] . identifier[version] ), identifier[record] ) | def pokeStorable(self, storable, objname, obj, container, visited=None, _stack=None, **kwargs):
"""
Arguments:
storable (StorableHandler): storable instance.
objname (any): record reference.
obj (any): object to be serialized.
container (any): container.
visited (dict): map of the previously serialized objects that are
passed by references; keys are the objects' IDs.
_stack (CallStack): stack of parent object names.
Trailing keyword arguments are passed to the :class:`Storable` instance's
:attr:`~Storable.poke`.
"""
#print((objname, storable.storable_type)) # debug
storable.poke(self, objname, obj, container, visited=visited, _stack=_stack, **kwargs)
try:
record = self.getRecord(objname, container) # depends on [control=['try'], data=[]]
except KeyError:
# fake storable; silently skip
if self.verbose:
print('skipping `{}` (type: {})'.format(objname, storable.storable_type))
if 1 < self.verbose:
print(traceback.format_exc()) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
else:
self.setRecordAttr('type', storable.storable_type, record)
if storable.version is not None:
self.setRecordAttr('version', from_version(storable.version), record) # depends on [control=['if'], data=[]] |
def get_atoms(self, ligands=True, inc_alt_states=False):
"""Flat list of all the Atoms in the Polymer.
Parameters
----------
inc_alt_states : bool
If true atoms from alternate conformations are included rather
than only the "active" states.
Returns
-------
atoms : itertools.chain
Returns an iterator of all the atoms. Convert to list if you
require indexing.
"""
if ligands and self.ligands:
monomers = self._monomers + self.ligands._monomers
else:
monomers = self._monomers
atoms = itertools.chain(
*(list(m.get_atoms(inc_alt_states=inc_alt_states)) for m in monomers))
return atoms | def function[get_atoms, parameter[self, ligands, inc_alt_states]]:
constant[Flat list of all the Atoms in the Polymer.
Parameters
----------
inc_alt_states : bool
If true atoms from alternate conformations are included rather
than only the "active" states.
Returns
-------
atoms : itertools.chain
Returns an iterator of all the atoms. Convert to list if you
require indexing.
]
if <ast.BoolOp object at 0x7da1b28dfaf0> begin[:]
variable[monomers] assign[=] binary_operation[name[self]._monomers + name[self].ligands._monomers]
variable[atoms] assign[=] call[name[itertools].chain, parameter[<ast.Starred object at 0x7da1b26268f0>]]
return[name[atoms]] | keyword[def] identifier[get_atoms] ( identifier[self] , identifier[ligands] = keyword[True] , identifier[inc_alt_states] = keyword[False] ):
literal[string]
keyword[if] identifier[ligands] keyword[and] identifier[self] . identifier[ligands] :
identifier[monomers] = identifier[self] . identifier[_monomers] + identifier[self] . identifier[ligands] . identifier[_monomers]
keyword[else] :
identifier[monomers] = identifier[self] . identifier[_monomers]
identifier[atoms] = identifier[itertools] . identifier[chain] (
*( identifier[list] ( identifier[m] . identifier[get_atoms] ( identifier[inc_alt_states] = identifier[inc_alt_states] )) keyword[for] identifier[m] keyword[in] identifier[monomers] ))
keyword[return] identifier[atoms] | def get_atoms(self, ligands=True, inc_alt_states=False):
"""Flat list of all the Atoms in the Polymer.
Parameters
----------
inc_alt_states : bool
If true atoms from alternate conformations are included rather
than only the "active" states.
Returns
-------
atoms : itertools.chain
Returns an iterator of all the atoms. Convert to list if you
require indexing.
"""
if ligands and self.ligands:
monomers = self._monomers + self.ligands._monomers # depends on [control=['if'], data=[]]
else:
monomers = self._monomers
atoms = itertools.chain(*(list(m.get_atoms(inc_alt_states=inc_alt_states)) for m in monomers))
return atoms |
def NS(domain, resolve=True, nameserver=None):
'''
Return a list of IPs of the nameservers for ``domain``
If 'resolve' is False, don't resolve names.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.NS google.com
'''
if _has_dig():
return __salt__['dig.NS'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available' | def function[NS, parameter[domain, resolve, nameserver]]:
constant[
Return a list of IPs of the nameservers for ``domain``
If 'resolve' is False, don't resolve names.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.NS google.com
]
if call[name[_has_dig], parameter[]] begin[:]
return[call[call[name[__salt__]][constant[dig.NS]], parameter[name[domain], name[resolve], name[nameserver]]]]
return[constant[This function requires dig, which is not currently available]] | keyword[def] identifier[NS] ( identifier[domain] , identifier[resolve] = keyword[True] , identifier[nameserver] = keyword[None] ):
literal[string]
keyword[if] identifier[_has_dig] ():
keyword[return] identifier[__salt__] [ literal[string] ]( identifier[domain] , identifier[resolve] , identifier[nameserver] )
keyword[return] literal[string] | def NS(domain, resolve=True, nameserver=None):
"""
Return a list of IPs of the nameservers for ``domain``
If 'resolve' is False, don't resolve names.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.NS google.com
"""
if _has_dig():
return __salt__['dig.NS'](domain, resolve, nameserver) # depends on [control=['if'], data=[]]
return 'This function requires dig, which is not currently available' |
def start(self, ccallbacks=None):
"""Establish and maintain connections."""
self.__manage_g = gevent.spawn(self.__manage_connections, ccallbacks)
self.__ready_ev.wait() | def function[start, parameter[self, ccallbacks]]:
constant[Establish and maintain connections.]
name[self].__manage_g assign[=] call[name[gevent].spawn, parameter[name[self].__manage_connections, name[ccallbacks]]]
call[name[self].__ready_ev.wait, parameter[]] | keyword[def] identifier[start] ( identifier[self] , identifier[ccallbacks] = keyword[None] ):
literal[string]
identifier[self] . identifier[__manage_g] = identifier[gevent] . identifier[spawn] ( identifier[self] . identifier[__manage_connections] , identifier[ccallbacks] )
identifier[self] . identifier[__ready_ev] . identifier[wait] () | def start(self, ccallbacks=None):
"""Establish and maintain connections."""
self.__manage_g = gevent.spawn(self.__manage_connections, ccallbacks)
self.__ready_ev.wait() |
def save_load(jid, load, minions=None):
'''
Save the load to the specified jid id
'''
with _get_serv(commit=True) as cur:
sql = '''INSERT INTO `jids` (`jid`, `load`) VALUES (%s, %s)'''
try:
cur.execute(sql, (jid, salt.utils.json.dumps(load)))
except MySQLdb.IntegrityError:
# https://github.com/saltstack/salt/issues/22171
# Without this try/except we get tons of duplicate entry errors
# which result in job returns not being stored properly
pass | def function[save_load, parameter[jid, load, minions]]:
constant[
Save the load to the specified jid id
]
with call[name[_get_serv], parameter[]] begin[:]
variable[sql] assign[=] constant[INSERT INTO `jids` (`jid`, `load`) VALUES (%s, %s)]
<ast.Try object at 0x7da2044c2ec0> | keyword[def] identifier[save_load] ( identifier[jid] , identifier[load] , identifier[minions] = keyword[None] ):
literal[string]
keyword[with] identifier[_get_serv] ( identifier[commit] = keyword[True] ) keyword[as] identifier[cur] :
identifier[sql] = literal[string]
keyword[try] :
identifier[cur] . identifier[execute] ( identifier[sql] ,( identifier[jid] , identifier[salt] . identifier[utils] . identifier[json] . identifier[dumps] ( identifier[load] )))
keyword[except] identifier[MySQLdb] . identifier[IntegrityError] :
keyword[pass] | def save_load(jid, load, minions=None):
"""
Save the load to the specified jid id
"""
with _get_serv(commit=True) as cur:
sql = 'INSERT INTO `jids` (`jid`, `load`) VALUES (%s, %s)'
try:
cur.execute(sql, (jid, salt.utils.json.dumps(load))) # depends on [control=['try'], data=[]]
except MySQLdb.IntegrityError:
# https://github.com/saltstack/salt/issues/22171
# Without this try/except we get tons of duplicate entry errors
# which result in job returns not being stored properly
pass # depends on [control=['except'], data=[]] # depends on [control=['with'], data=['cur']] |
def search_objects(self, query):
"""
Return list of objects which match all properties that are set
(``not None``) using AND operator to all of them.
Example:
result = storage_handler.search_objects(
DBPublication(isbn="azgabash")
)
Args:
query (obj): Object implementing proper interface with some of the
properties set.
Returns:
list: List of matching objects or ``[]`` if no match was found.
Raises:
InvalidType: When the `query` doesn't implement required
properties.
"""
self._check_obj_properties(query, "query")
# AND operator between results
final_result = None
for result in self._get_subset_matches(query):
if final_result is None:
final_result = result
continue
final_result = intersection(final_result, result)
# if no result is found, `final_result` is None, and I want []
if not final_result:
return []
return list(final_result) | def function[search_objects, parameter[self, query]]:
constant[
Return list of objects which match all properties that are set
(``not None``) using AND operator to all of them.
Example:
result = storage_handler.search_objects(
DBPublication(isbn="azgabash")
)
Args:
query (obj): Object implementing proper interface with some of the
properties set.
Returns:
list: List of matching objects or ``[]`` if no match was found.
Raises:
InvalidType: When the `query` doesn't implement required
properties.
]
call[name[self]._check_obj_properties, parameter[name[query], constant[query]]]
variable[final_result] assign[=] constant[None]
for taget[name[result]] in starred[call[name[self]._get_subset_matches, parameter[name[query]]]] begin[:]
if compare[name[final_result] is constant[None]] begin[:]
variable[final_result] assign[=] name[result]
continue
variable[final_result] assign[=] call[name[intersection], parameter[name[final_result], name[result]]]
if <ast.UnaryOp object at 0x7da1b26ad9c0> begin[:]
return[list[[]]]
return[call[name[list], parameter[name[final_result]]]] | keyword[def] identifier[search_objects] ( identifier[self] , identifier[query] ):
literal[string]
identifier[self] . identifier[_check_obj_properties] ( identifier[query] , literal[string] )
identifier[final_result] = keyword[None]
keyword[for] identifier[result] keyword[in] identifier[self] . identifier[_get_subset_matches] ( identifier[query] ):
keyword[if] identifier[final_result] keyword[is] keyword[None] :
identifier[final_result] = identifier[result]
keyword[continue]
identifier[final_result] = identifier[intersection] ( identifier[final_result] , identifier[result] )
keyword[if] keyword[not] identifier[final_result] :
keyword[return] []
keyword[return] identifier[list] ( identifier[final_result] ) | def search_objects(self, query):
"""
Return list of objects which match all properties that are set
(``not None``) using AND operator to all of them.
Example:
result = storage_handler.search_objects(
DBPublication(isbn="azgabash")
)
Args:
query (obj): Object implementing proper interface with some of the
properties set.
Returns:
list: List of matching objects or ``[]`` if no match was found.
Raises:
InvalidType: When the `query` doesn't implement required
properties.
"""
self._check_obj_properties(query, 'query')
# AND operator between results
final_result = None
for result in self._get_subset_matches(query):
if final_result is None:
final_result = result
continue # depends on [control=['if'], data=['final_result']]
final_result = intersection(final_result, result) # depends on [control=['for'], data=['result']]
# if no result is found, `final_result` is None, and I want []
if not final_result:
return [] # depends on [control=['if'], data=[]]
return list(final_result) |
def font(self, prefix, size):
"""Return a QFont corresponding to the given prefix and size."""
font = QFont(self.fontname[prefix])
font.setPixelSize(size)
if prefix[-1] == 's': # solid style
font.setStyleName('Solid')
return font | def function[font, parameter[self, prefix, size]]:
constant[Return a QFont corresponding to the given prefix and size.]
variable[font] assign[=] call[name[QFont], parameter[call[name[self].fontname][name[prefix]]]]
call[name[font].setPixelSize, parameter[name[size]]]
if compare[call[name[prefix]][<ast.UnaryOp object at 0x7da1b16206a0>] equal[==] constant[s]] begin[:]
call[name[font].setStyleName, parameter[constant[Solid]]]
return[name[font]] | keyword[def] identifier[font] ( identifier[self] , identifier[prefix] , identifier[size] ):
literal[string]
identifier[font] = identifier[QFont] ( identifier[self] . identifier[fontname] [ identifier[prefix] ])
identifier[font] . identifier[setPixelSize] ( identifier[size] )
keyword[if] identifier[prefix] [- literal[int] ]== literal[string] :
identifier[font] . identifier[setStyleName] ( literal[string] )
keyword[return] identifier[font] | def font(self, prefix, size):
"""Return a QFont corresponding to the given prefix and size."""
font = QFont(self.fontname[prefix])
font.setPixelSize(size)
if prefix[-1] == 's': # solid style
font.setStyleName('Solid') # depends on [control=['if'], data=[]]
return font |
def run_mutect(job, tumor_bam, normal_bam, univ_options, mutect_options, chrom):
"""
This module will run mutect on the DNA bams
ARGUMENTS
1. tumor_bam: REFER ARGUMENTS of spawn_mutect()
2. normal_bam: REFER ARGUMENTS of spawn_mutect()
3. univ_options: REFER ARGUMENTS of spawn_mutect()
4. mutect_options: REFER ARGUMENTS of spawn_mutect()
5. chrom: String containing chromosome name with chr appended
RETURN VALUES
1. output_files: Dict of results of mutect for chromosome
output_files
|- 'mutect_CHROM.vcf': <JSid>
+- 'mutect_CHROM.out': <JSid>
This module corresponds to node 12 on the tree
"""
job.fileStore.logToMaster('Running mutect on %s:%s' % (univ_options['patient'], chrom))
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
'normal.bam': normal_bam['normal_dna_fix_pg_sorted.bam'],
'normal.bam.bai': normal_bam['normal_dna_fix_pg_sorted.bam.bai'],
'genome.fa': mutect_options['genome_fasta'],
'genome.fa.fai': mutect_options['genome_fai'],
'genome.dict': mutect_options['genome_dict'],
'cosmic.vcf': mutect_options['cosmic_vcf'],
'cosmic.vcf.idx': mutect_options['cosmic_idx'],
'dbsnp.vcf': mutect_options['dbsnp_vcf'],
'dbsnp.vcf.idx': mutect_options['dbsnp_idx']}
input_files = get_files_from_filestore(job, input_files, work_dir,
docker=True)
mutout = ''.join([work_dir, '/mutect_', chrom, '.out'])
mutvcf = ''.join([work_dir, '/mutect_', chrom, '.vcf'])
parameters = ['-R', input_files['genome.fa'],
'--cosmic', input_files['cosmic.vcf'],
'--dbsnp', input_files['dbsnp.vcf'],
'--input_file:normal', input_files['normal.bam'],
'--input_file:tumor', input_files['tumor.bam'],
#'--tumor_lod', str(10),
#'--initial_tumor_lod', str(4.0),
'-L', chrom,
'--out', docker_path(mutout),
'--vcf', docker_path(mutvcf)
]
Xmx = mutect_options['java_Xmx'] if mutect_options['java_Xmx'] else univ_options['java_Xmx']
docker_call(tool='mutect:1.1.7', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], java_opts=Xmx)
output_files = defaultdict()
for mutect_file in [mutout, mutvcf]:
output_files[os.path.basename(mutect_file)] = job.fileStore.writeGlobalFile(mutect_file)
return output_files | def function[run_mutect, parameter[job, tumor_bam, normal_bam, univ_options, mutect_options, chrom]]:
constant[
This module will run mutect on the DNA bams
ARGUMENTS
1. tumor_bam: REFER ARGUMENTS of spawn_mutect()
2. normal_bam: REFER ARGUMENTS of spawn_mutect()
3. univ_options: REFER ARGUMENTS of spawn_mutect()
4. mutect_options: REFER ARGUMENTS of spawn_mutect()
5. chrom: String containing chromosome name with chr appended
RETURN VALUES
1. output_files: Dict of results of mutect for chromosome
output_files
|- 'mutect_CHROM.vcf': <JSid>
+- 'mutect_CHROM.out': <JSid>
This module corresponds to node 12 on the tree
]
call[name[job].fileStore.logToMaster, parameter[binary_operation[constant[Running mutect on %s:%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da18bc700d0>, <ast.Name object at 0x7da18bc72e30>]]]]]
variable[work_dir] assign[=] call[name[job].fileStore.getLocalTempDir, parameter[]]
variable[input_files] assign[=] dictionary[[<ast.Constant object at 0x7da18bc70790>, <ast.Constant object at 0x7da18bc70820>, <ast.Constant object at 0x7da18bc73af0>, <ast.Constant object at 0x7da18bc70eb0>, <ast.Constant object at 0x7da18bc72b90>, <ast.Constant object at 0x7da18bc70040>, <ast.Constant object at 0x7da18bc70460>, <ast.Constant object at 0x7da18bc71720>, <ast.Constant object at 0x7da18bc73d90>, <ast.Constant object at 0x7da18bc70550>, <ast.Constant object at 0x7da18bc70190>], [<ast.Subscript object at 0x7da18bc72620>, <ast.Subscript object at 0x7da18bc70940>, <ast.Subscript object at 0x7da18bc71d20>, <ast.Subscript object at 0x7da18bc719c0>, <ast.Subscript object at 0x7da18bc70430>, <ast.Subscript object at 0x7da18bc71bd0>, <ast.Subscript object at 0x7da18bc727d0>, <ast.Subscript object at 0x7da18bc72830>, <ast.Subscript object at 0x7da18bc70ca0>, <ast.Subscript object at 0x7da18bc71000>, <ast.Subscript object at 0x7da18bc70af0>]]
variable[input_files] assign[=] call[name[get_files_from_filestore], parameter[name[job], name[input_files], name[work_dir]]]
variable[mutout] assign[=] call[constant[].join, parameter[list[[<ast.Name object at 0x7da18bc73a60>, <ast.Constant object at 0x7da18bc70280>, <ast.Name object at 0x7da18bc737c0>, <ast.Constant object at 0x7da18bc71600>]]]]
variable[mutvcf] assign[=] call[constant[].join, parameter[list[[<ast.Name object at 0x7da18bc725c0>, <ast.Constant object at 0x7da18bc714e0>, <ast.Name object at 0x7da18bc73ca0>, <ast.Constant object at 0x7da18bc73430>]]]]
variable[parameters] assign[=] list[[<ast.Constant object at 0x7da18bc725f0>, <ast.Subscript object at 0x7da18bc719f0>, <ast.Constant object at 0x7da18bc730d0>, <ast.Subscript object at 0x7da18bc70a30>, <ast.Constant object at 0x7da18bc704f0>, <ast.Subscript object at 0x7da18bc739d0>, <ast.Constant object at 0x7da18bc73280>, <ast.Subscript object at 0x7da18bc71690>, <ast.Constant object at 0x7da18bc72fb0>, <ast.Subscript object at 0x7da18bc710c0>, <ast.Constant object at 0x7da18bc72650>, <ast.Name object at 0x7da18bc737f0>, <ast.Constant object at 0x7da18bc70c70>, <ast.Call object at 0x7da18bc71f60>, <ast.Constant object at 0x7da18bc70880>, <ast.Call object at 0x7da18bc73dc0>]]
variable[Xmx] assign[=] <ast.IfExp object at 0x7da18bc70220>
call[name[docker_call], parameter[]]
variable[output_files] assign[=] call[name[defaultdict], parameter[]]
for taget[name[mutect_file]] in starred[list[[<ast.Name object at 0x7da18bc713f0>, <ast.Name object at 0x7da18bc711b0>]]] begin[:]
call[name[output_files]][call[name[os].path.basename, parameter[name[mutect_file]]]] assign[=] call[name[job].fileStore.writeGlobalFile, parameter[name[mutect_file]]]
return[name[output_files]] | keyword[def] identifier[run_mutect] ( identifier[job] , identifier[tumor_bam] , identifier[normal_bam] , identifier[univ_options] , identifier[mutect_options] , identifier[chrom] ):
literal[string]
identifier[job] . identifier[fileStore] . identifier[logToMaster] ( literal[string] %( identifier[univ_options] [ literal[string] ], identifier[chrom] ))
identifier[work_dir] = identifier[job] . identifier[fileStore] . identifier[getLocalTempDir] ()
identifier[input_files] ={
literal[string] : identifier[tumor_bam] [ literal[string] ],
literal[string] : identifier[tumor_bam] [ literal[string] ],
literal[string] : identifier[normal_bam] [ literal[string] ],
literal[string] : identifier[normal_bam] [ literal[string] ],
literal[string] : identifier[mutect_options] [ literal[string] ],
literal[string] : identifier[mutect_options] [ literal[string] ],
literal[string] : identifier[mutect_options] [ literal[string] ],
literal[string] : identifier[mutect_options] [ literal[string] ],
literal[string] : identifier[mutect_options] [ literal[string] ],
literal[string] : identifier[mutect_options] [ literal[string] ],
literal[string] : identifier[mutect_options] [ literal[string] ]}
identifier[input_files] = identifier[get_files_from_filestore] ( identifier[job] , identifier[input_files] , identifier[work_dir] ,
identifier[docker] = keyword[True] )
identifier[mutout] = literal[string] . identifier[join] ([ identifier[work_dir] , literal[string] , identifier[chrom] , literal[string] ])
identifier[mutvcf] = literal[string] . identifier[join] ([ identifier[work_dir] , literal[string] , identifier[chrom] , literal[string] ])
identifier[parameters] =[ literal[string] , identifier[input_files] [ literal[string] ],
literal[string] , identifier[input_files] [ literal[string] ],
literal[string] , identifier[input_files] [ literal[string] ],
literal[string] , identifier[input_files] [ literal[string] ],
literal[string] , identifier[input_files] [ literal[string] ],
literal[string] , identifier[chrom] ,
literal[string] , identifier[docker_path] ( identifier[mutout] ),
literal[string] , identifier[docker_path] ( identifier[mutvcf] )
]
identifier[Xmx] = identifier[mutect_options] [ literal[string] ] keyword[if] identifier[mutect_options] [ literal[string] ] keyword[else] identifier[univ_options] [ literal[string] ]
identifier[docker_call] ( identifier[tool] = literal[string] , identifier[tool_parameters] = identifier[parameters] , identifier[work_dir] = identifier[work_dir] ,
identifier[dockerhub] = identifier[univ_options] [ literal[string] ], identifier[java_opts] = identifier[Xmx] )
identifier[output_files] = identifier[defaultdict] ()
keyword[for] identifier[mutect_file] keyword[in] [ identifier[mutout] , identifier[mutvcf] ]:
identifier[output_files] [ identifier[os] . identifier[path] . identifier[basename] ( identifier[mutect_file] )]= identifier[job] . identifier[fileStore] . identifier[writeGlobalFile] ( identifier[mutect_file] )
keyword[return] identifier[output_files] | def run_mutect(job, tumor_bam, normal_bam, univ_options, mutect_options, chrom):
"""
This module will run mutect on the DNA bams
ARGUMENTS
1. tumor_bam: REFER ARGUMENTS of spawn_mutect()
2. normal_bam: REFER ARGUMENTS of spawn_mutect()
3. univ_options: REFER ARGUMENTS of spawn_mutect()
4. mutect_options: REFER ARGUMENTS of spawn_mutect()
5. chrom: String containing chromosome name with chr appended
RETURN VALUES
1. output_files: Dict of results of mutect for chromosome
output_files
|- 'mutect_CHROM.vcf': <JSid>
+- 'mutect_CHROM.out': <JSid>
This module corresponds to node 12 on the tree
"""
job.fileStore.logToMaster('Running mutect on %s:%s' % (univ_options['patient'], chrom))
work_dir = job.fileStore.getLocalTempDir()
input_files = {'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'], 'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'], 'normal.bam': normal_bam['normal_dna_fix_pg_sorted.bam'], 'normal.bam.bai': normal_bam['normal_dna_fix_pg_sorted.bam.bai'], 'genome.fa': mutect_options['genome_fasta'], 'genome.fa.fai': mutect_options['genome_fai'], 'genome.dict': mutect_options['genome_dict'], 'cosmic.vcf': mutect_options['cosmic_vcf'], 'cosmic.vcf.idx': mutect_options['cosmic_idx'], 'dbsnp.vcf': mutect_options['dbsnp_vcf'], 'dbsnp.vcf.idx': mutect_options['dbsnp_idx']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
mutout = ''.join([work_dir, '/mutect_', chrom, '.out'])
mutvcf = ''.join([work_dir, '/mutect_', chrom, '.vcf'])
#'--tumor_lod', str(10),
#'--initial_tumor_lod', str(4.0),
parameters = ['-R', input_files['genome.fa'], '--cosmic', input_files['cosmic.vcf'], '--dbsnp', input_files['dbsnp.vcf'], '--input_file:normal', input_files['normal.bam'], '--input_file:tumor', input_files['tumor.bam'], '-L', chrom, '--out', docker_path(mutout), '--vcf', docker_path(mutvcf)]
Xmx = mutect_options['java_Xmx'] if mutect_options['java_Xmx'] else univ_options['java_Xmx']
docker_call(tool='mutect:1.1.7', tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub'], java_opts=Xmx)
output_files = defaultdict()
for mutect_file in [mutout, mutvcf]:
output_files[os.path.basename(mutect_file)] = job.fileStore.writeGlobalFile(mutect_file) # depends on [control=['for'], data=['mutect_file']]
return output_files |
def create_data_and_metadata_from_data(self, data: numpy.ndarray, intensity_calibration: Calibration.Calibration=None, dimensional_calibrations: typing.List[Calibration.Calibration]=None, metadata: dict=None, timestamp: str=None) -> DataAndMetadata.DataAndMetadata:
"""Create a data_and_metadata object from data.
.. versionadded:: 1.0
.. deprecated:: 1.1
Use :py:meth:`~nion.swift.Facade.DataItem.create_data_and_metadata` instead.
Scriptable: No
"""
... | def function[create_data_and_metadata_from_data, parameter[self, data, intensity_calibration, dimensional_calibrations, metadata, timestamp]]:
constant[Create a data_and_metadata object from data.
.. versionadded:: 1.0
.. deprecated:: 1.1
Use :py:meth:`~nion.swift.Facade.DataItem.create_data_and_metadata` instead.
Scriptable: No
]
constant[Ellipsis] | keyword[def] identifier[create_data_and_metadata_from_data] ( identifier[self] , identifier[data] : identifier[numpy] . identifier[ndarray] , identifier[intensity_calibration] : identifier[Calibration] . identifier[Calibration] = keyword[None] , identifier[dimensional_calibrations] : identifier[typing] . identifier[List] [ identifier[Calibration] . identifier[Calibration] ]= keyword[None] , identifier[metadata] : identifier[dict] = keyword[None] , identifier[timestamp] : identifier[str] = keyword[None] )-> identifier[DataAndMetadata] . identifier[DataAndMetadata] :
literal[string]
... | def create_data_and_metadata_from_data(self, data: numpy.ndarray, intensity_calibration: Calibration.Calibration=None, dimensional_calibrations: typing.List[Calibration.Calibration]=None, metadata: dict=None, timestamp: str=None) -> DataAndMetadata.DataAndMetadata:
"""Create a data_and_metadata object from data.
.. versionadded:: 1.0
.. deprecated:: 1.1
Use :py:meth:`~nion.swift.Facade.DataItem.create_data_and_metadata` instead.
Scriptable: No
"""
... |
def number_occurences(self, proc):
"""
Returns the number of occurencies of commands that contain given text
Returns:
int: The number of occurencies of commands with given text
.. note::
'proc' can match anywhere in the command path, name or arguments.
"""
return len([True for row in self.data if proc in row[self.command_name]]) | def function[number_occurences, parameter[self, proc]]:
constant[
Returns the number of occurencies of commands that contain given text
Returns:
int: The number of occurencies of commands with given text
.. note::
'proc' can match anywhere in the command path, name or arguments.
]
return[call[name[len], parameter[<ast.ListComp object at 0x7da18dc9b250>]]] | keyword[def] identifier[number_occurences] ( identifier[self] , identifier[proc] ):
literal[string]
keyword[return] identifier[len] ([ keyword[True] keyword[for] identifier[row] keyword[in] identifier[self] . identifier[data] keyword[if] identifier[proc] keyword[in] identifier[row] [ identifier[self] . identifier[command_name] ]]) | def number_occurences(self, proc):
"""
Returns the number of occurencies of commands that contain given text
Returns:
int: The number of occurencies of commands with given text
.. note::
'proc' can match anywhere in the command path, name or arguments.
"""
return len([True for row in self.data if proc in row[self.command_name]]) |
def duplicate_files(self):
'''
Search for duplicates of submission file uploads for this assignment.
This includes the search in other course, whether inactive or not.
Returns a list of lists, where each latter is a set of duplicate submissions
with at least on of them for this assignment
'''
result=list()
files = SubmissionFile.valid_ones.order_by('md5')
for key, dup_group in groupby(files, lambda f: f.md5):
file_list=[entry for entry in dup_group]
if len(file_list)>1:
for entry in file_list:
if entry.submissions.filter(assignment=self).count()>0:
result.append([key, file_list])
break
return result | def function[duplicate_files, parameter[self]]:
constant[
Search for duplicates of submission file uploads for this assignment.
This includes the search in other course, whether inactive or not.
Returns a list of lists, where each latter is a set of duplicate submissions
with at least on of them for this assignment
]
variable[result] assign[=] call[name[list], parameter[]]
variable[files] assign[=] call[name[SubmissionFile].valid_ones.order_by, parameter[constant[md5]]]
for taget[tuple[[<ast.Name object at 0x7da18ede6ec0>, <ast.Name object at 0x7da18ede5540>]]] in starred[call[name[groupby], parameter[name[files], <ast.Lambda object at 0x7da18ede7760>]]] begin[:]
variable[file_list] assign[=] <ast.ListComp object at 0x7da18ede5600>
if compare[call[name[len], parameter[name[file_list]]] greater[>] constant[1]] begin[:]
for taget[name[entry]] in starred[name[file_list]] begin[:]
if compare[call[call[name[entry].submissions.filter, parameter[]].count, parameter[]] greater[>] constant[0]] begin[:]
call[name[result].append, parameter[list[[<ast.Name object at 0x7da18ede4100>, <ast.Name object at 0x7da18ede4e80>]]]]
break
return[name[result]] | keyword[def] identifier[duplicate_files] ( identifier[self] ):
literal[string]
identifier[result] = identifier[list] ()
identifier[files] = identifier[SubmissionFile] . identifier[valid_ones] . identifier[order_by] ( literal[string] )
keyword[for] identifier[key] , identifier[dup_group] keyword[in] identifier[groupby] ( identifier[files] , keyword[lambda] identifier[f] : identifier[f] . identifier[md5] ):
identifier[file_list] =[ identifier[entry] keyword[for] identifier[entry] keyword[in] identifier[dup_group] ]
keyword[if] identifier[len] ( identifier[file_list] )> literal[int] :
keyword[for] identifier[entry] keyword[in] identifier[file_list] :
keyword[if] identifier[entry] . identifier[submissions] . identifier[filter] ( identifier[assignment] = identifier[self] ). identifier[count] ()> literal[int] :
identifier[result] . identifier[append] ([ identifier[key] , identifier[file_list] ])
keyword[break]
keyword[return] identifier[result] | def duplicate_files(self):
"""
Search for duplicates of submission file uploads for this assignment.
This includes the search in other course, whether inactive or not.
Returns a list of lists, where each latter is a set of duplicate submissions
with at least on of them for this assignment
"""
result = list()
files = SubmissionFile.valid_ones.order_by('md5')
for (key, dup_group) in groupby(files, lambda f: f.md5):
file_list = [entry for entry in dup_group]
if len(file_list) > 1:
for entry in file_list:
if entry.submissions.filter(assignment=self).count() > 0:
result.append([key, file_list])
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['entry']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return result |
def add_path_segment(self, value):
"""
Add a new path segment to the end of the current string
:param string value: the new path segment to use
Example::
>>> u = URL('http://example.com/foo/')
>>> u.add_path_segment('bar').as_string()
'http://example.com/foo/bar'
"""
segments = self.path_segments() + (to_unicode(value),)
return self.path_segments(segments) | def function[add_path_segment, parameter[self, value]]:
constant[
Add a new path segment to the end of the current string
:param string value: the new path segment to use
Example::
>>> u = URL('http://example.com/foo/')
>>> u.add_path_segment('bar').as_string()
'http://example.com/foo/bar'
]
variable[segments] assign[=] binary_operation[call[name[self].path_segments, parameter[]] + tuple[[<ast.Call object at 0x7da1b0fccd00>]]]
return[call[name[self].path_segments, parameter[name[segments]]]] | keyword[def] identifier[add_path_segment] ( identifier[self] , identifier[value] ):
literal[string]
identifier[segments] = identifier[self] . identifier[path_segments] ()+( identifier[to_unicode] ( identifier[value] ),)
keyword[return] identifier[self] . identifier[path_segments] ( identifier[segments] ) | def add_path_segment(self, value):
"""
Add a new path segment to the end of the current string
:param string value: the new path segment to use
Example::
>>> u = URL('http://example.com/foo/')
>>> u.add_path_segment('bar').as_string()
'http://example.com/foo/bar'
"""
segments = self.path_segments() + (to_unicode(value),)
return self.path_segments(segments) |
def assess(model, reaction, flux_coefficient_cutoff=0.001, solver=None):
"""Assesses production capacity.
Assesses the capacity of the model to produce the precursors for the
reaction and absorb the production of the reaction while the reaction is
operating at, or above, the specified cutoff.
Parameters
----------
model : cobra.Model
The cobra model to assess production capacity for
reaction : reaction identifier or cobra.Reaction
The reaction to assess
flux_coefficient_cutoff : float
The minimum flux that reaction must carry to be considered active.
solver : basestring
Solver name. If None, the default solver will be used.
Returns
-------
bool or dict
True if the model can produce the precursors and absorb the products
for the reaction operating at, or above, flux_coefficient_cutoff.
Otherwise, a dictionary of {'precursor': Status, 'product': Status}.
Where Status is the results from assess_precursors and
assess_products, respectively.
"""
reaction = model.reactions.get_by_any(reaction)[0]
with model as m:
m.objective = reaction
if _optimize_or_value(m, solver=solver) >= flux_coefficient_cutoff:
return True
else:
results = dict()
results['precursors'] = assess_component(
model, reaction, 'reactants', flux_coefficient_cutoff)
results['products'] = assess_component(
model, reaction, 'products', flux_coefficient_cutoff)
return results | def function[assess, parameter[model, reaction, flux_coefficient_cutoff, solver]]:
constant[Assesses production capacity.
Assesses the capacity of the model to produce the precursors for the
reaction and absorb the production of the reaction while the reaction is
operating at, or above, the specified cutoff.
Parameters
----------
model : cobra.Model
The cobra model to assess production capacity for
reaction : reaction identifier or cobra.Reaction
The reaction to assess
flux_coefficient_cutoff : float
The minimum flux that reaction must carry to be considered active.
solver : basestring
Solver name. If None, the default solver will be used.
Returns
-------
bool or dict
True if the model can produce the precursors and absorb the products
for the reaction operating at, or above, flux_coefficient_cutoff.
Otherwise, a dictionary of {'precursor': Status, 'product': Status}.
Where Status is the results from assess_precursors and
assess_products, respectively.
]
variable[reaction] assign[=] call[call[name[model].reactions.get_by_any, parameter[name[reaction]]]][constant[0]]
with name[model] begin[:]
name[m].objective assign[=] name[reaction]
if compare[call[name[_optimize_or_value], parameter[name[m]]] greater_or_equal[>=] name[flux_coefficient_cutoff]] begin[:]
return[constant[True]] | keyword[def] identifier[assess] ( identifier[model] , identifier[reaction] , identifier[flux_coefficient_cutoff] = literal[int] , identifier[solver] = keyword[None] ):
literal[string]
identifier[reaction] = identifier[model] . identifier[reactions] . identifier[get_by_any] ( identifier[reaction] )[ literal[int] ]
keyword[with] identifier[model] keyword[as] identifier[m] :
identifier[m] . identifier[objective] = identifier[reaction]
keyword[if] identifier[_optimize_or_value] ( identifier[m] , identifier[solver] = identifier[solver] )>= identifier[flux_coefficient_cutoff] :
keyword[return] keyword[True]
keyword[else] :
identifier[results] = identifier[dict] ()
identifier[results] [ literal[string] ]= identifier[assess_component] (
identifier[model] , identifier[reaction] , literal[string] , identifier[flux_coefficient_cutoff] )
identifier[results] [ literal[string] ]= identifier[assess_component] (
identifier[model] , identifier[reaction] , literal[string] , identifier[flux_coefficient_cutoff] )
keyword[return] identifier[results] | def assess(model, reaction, flux_coefficient_cutoff=0.001, solver=None):
"""Assesses production capacity.
Assesses the capacity of the model to produce the precursors for the
reaction and absorb the production of the reaction while the reaction is
operating at, or above, the specified cutoff.
Parameters
----------
model : cobra.Model
The cobra model to assess production capacity for
reaction : reaction identifier or cobra.Reaction
The reaction to assess
flux_coefficient_cutoff : float
The minimum flux that reaction must carry to be considered active.
solver : basestring
Solver name. If None, the default solver will be used.
Returns
-------
bool or dict
True if the model can produce the precursors and absorb the products
for the reaction operating at, or above, flux_coefficient_cutoff.
Otherwise, a dictionary of {'precursor': Status, 'product': Status}.
Where Status is the results from assess_precursors and
assess_products, respectively.
"""
reaction = model.reactions.get_by_any(reaction)[0]
with model as m:
m.objective = reaction
if _optimize_or_value(m, solver=solver) >= flux_coefficient_cutoff:
return True # depends on [control=['if'], data=[]]
else:
results = dict()
results['precursors'] = assess_component(model, reaction, 'reactants', flux_coefficient_cutoff)
results['products'] = assess_component(model, reaction, 'products', flux_coefficient_cutoff)
return results # depends on [control=['with'], data=['m']] |
def coordinates(x0, y0, distance, angle):
""" Returns the location of a point by rotating around origin (x0,y0).
"""
return (x0 + cos(radians(angle)) * distance,
y0 + sin(radians(angle)) * distance) | def function[coordinates, parameter[x0, y0, distance, angle]]:
constant[ Returns the location of a point by rotating around origin (x0,y0).
]
return[tuple[[<ast.BinOp object at 0x7da1b004e140>, <ast.BinOp object at 0x7da1b004fcd0>]]] | keyword[def] identifier[coordinates] ( identifier[x0] , identifier[y0] , identifier[distance] , identifier[angle] ):
literal[string]
keyword[return] ( identifier[x0] + identifier[cos] ( identifier[radians] ( identifier[angle] ))* identifier[distance] ,
identifier[y0] + identifier[sin] ( identifier[radians] ( identifier[angle] ))* identifier[distance] ) | def coordinates(x0, y0, distance, angle):
""" Returns the location of a point by rotating around origin (x0,y0).
"""
return (x0 + cos(radians(angle)) * distance, y0 + sin(radians(angle)) * distance) |
def node_pairing(self):
"""if "node" then test current node and next one
if "tag", then create tests for every pair of the current tag.
"""
value = self.attributes['node_pairing']
if value not in IMB.NODE_PAIRING:
msg = 'Unexpected {0} value: got "{1}" but valid values are {2}'
msg = msg.format('node_pairing', value, IMB.NODE_PAIRING)
raise ValueError(msg)
return value | def function[node_pairing, parameter[self]]:
constant[if "node" then test current node and next one
if "tag", then create tests for every pair of the current tag.
]
variable[value] assign[=] call[name[self].attributes][constant[node_pairing]]
if compare[name[value] <ast.NotIn object at 0x7da2590d7190> name[IMB].NODE_PAIRING] begin[:]
variable[msg] assign[=] constant[Unexpected {0} value: got "{1}" but valid values are {2}]
variable[msg] assign[=] call[name[msg].format, parameter[constant[node_pairing], name[value], name[IMB].NODE_PAIRING]]
<ast.Raise object at 0x7da20c7c98a0>
return[name[value]] | keyword[def] identifier[node_pairing] ( identifier[self] ):
literal[string]
identifier[value] = identifier[self] . identifier[attributes] [ literal[string] ]
keyword[if] identifier[value] keyword[not] keyword[in] identifier[IMB] . identifier[NODE_PAIRING] :
identifier[msg] = literal[string]
identifier[msg] = identifier[msg] . identifier[format] ( literal[string] , identifier[value] , identifier[IMB] . identifier[NODE_PAIRING] )
keyword[raise] identifier[ValueError] ( identifier[msg] )
keyword[return] identifier[value] | def node_pairing(self):
"""if "node" then test current node and next one
if "tag", then create tests for every pair of the current tag.
"""
value = self.attributes['node_pairing']
if value not in IMB.NODE_PAIRING:
msg = 'Unexpected {0} value: got "{1}" but valid values are {2}'
msg = msg.format('node_pairing', value, IMB.NODE_PAIRING)
raise ValueError(msg) # depends on [control=['if'], data=['value']]
return value |
def update_pypsa_generator_import(network):
"""
Translate graph based grid representation to PyPSA Network
For details from a user perspective see API documentation of
:meth:`~.grid.network.EDisGo.analyze` of the API class
:class:`~.grid.network.EDisGo`.
Translating eDisGo's grid topology to PyPSA representation is structured
into translating the topology and adding time series for components of the
grid. In both cases translation of MV grid only (`mode='mv'`), LV grid only
(`mode='lv'`), MV and LV (`mode=None`) share some code. The
code is organized as follows:
* Medium-voltage only (`mode='mv'`): All medium-voltage grid components are
exported by :func:`mv_to_pypsa` including the LV station. LV grid load
and generation is considered using :func:`add_aggregated_lv_components`.
Time series are collected by `_pypsa_load_timeseries` (as example
for loads, generators and buses) specifying `mode='mv'`). Timeseries
for aggregated load/generation at substations are determined individually.
* Low-voltage only (`mode='lv'`): LV grid topology including the MV-LV
transformer is exported. The slack is defind at primary side of the MV-LV
transformer.
* Both level MV+LV (`mode=None`): The entire grid topology is translated to
PyPSA in order to perform a complete power flow analysis in both levels
together. First, both grid levels are translated seperately using
:func:`mv_to_pypsa` and :func:`lv_to_pypsa`. Those are merge by
:func:`combine_mv_and_lv`. Time series are obtained at once for both grid
levels.
This PyPSA interface is aware of translation errors and performs so checks
on integrity of data converted to PyPSA grid representation
* Sub-graphs/ Sub-networks: It is ensured the grid has no islanded parts
* Completeness of time series: It is ensured each component has a time
series
* Buses available: Each component (load, generator, line, transformer) is
connected to a bus. The PyPSA representation is check for completeness of
buses.
* Duplicate labels in components DataFrames and components' time series
DataFrames
Parameters
----------
network : :class:`~.grid.network.Network`
eDisGo grid container
mode : str
Determines grid levels that are translated to
`PyPSA grid representation
<https://www.pypsa.org/doc/components.html#network>`_. Specify
* None to export MV and LV grid levels. None is the default.
* ('mv' to export MV grid level only. This includes cumulative load and
generation from underlying LV grid aggregated at respective LV
station. This option is implemented, though the rest of edisgo does
not handle it yet.)
* ('lv' to export LV grid level only. This option is not yet
implemented)
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or \
:pandas:`pandas.Timestamp<timestamp>`
Timesteps specifies which time steps to export to pypsa representation
and use in power flow analysis.
Returns
-------
:pypsa:`pypsa.Network<network>`
The `PyPSA network
<https://www.pypsa.org/doc/components.html#network>`_ container.
"""
# get topology and time series data
if network.pypsa.edisgo_mode is None:
mv_components = mv_to_pypsa(network)
lv_components = lv_to_pypsa(network)
components = combine_mv_and_lv(mv_components, lv_components)
elif network.pypsa.edisgo_mode is 'mv':
raise NotImplementedError
elif network.pypsa.edisgo_mode is 'lv':
raise NotImplementedError
else:
raise ValueError("Provide proper mode or leave it empty to export "
"entire grid topology.")
# check topology
_check_topology(components)
# create power flow problem
pypsa_network = PyPSANetwork()
pypsa_network.edisgo_mode = network.pypsa.edisgo_mode
pypsa_network.set_snapshots(network.pypsa.snapshots)
# import grid topology to PyPSA network
# buses are created first to avoid warnings
pypsa_network.import_components_from_dataframe(components['Bus'], 'Bus')
for k, comps in components.items():
if k is not 'Bus' and not comps.empty:
pypsa_network.import_components_from_dataframe(comps, k)
# import time series to PyPSA network
pypsa_network.generators_t.p_set = network.pypsa.generators_t.p_set
pypsa_network.generators_t.q_set = network.pypsa.generators_t.q_set
pypsa_network.loads_t.p_set = network.pypsa.loads_t.p_set
pypsa_network.loads_t.q_set = network.pypsa.loads_t.q_set
pypsa_network.storage_units_t.p_set = network.pypsa.storage_units_t.p_set
pypsa_network.storage_units_t.q_set = network.pypsa.storage_units_t.q_set
pypsa_network.buses_t.v_mag_pu_set = network.pypsa.buses_t.v_mag_pu_set
network.pypsa = pypsa_network
if len(list(components['Generator'].index.values)) > 1:
update_pypsa_generator_timeseries(network)
if list(components['Bus'].index.values):
update_pypsa_bus_timeseries(network)
if len(list(components['StorageUnit'].index.values)) > 0:
update_pypsa_storage_timeseries(network)
_check_integrity_of_pypsa(pypsa_network) | def function[update_pypsa_generator_import, parameter[network]]:
constant[
Translate graph based grid representation to PyPSA Network
For details from a user perspective see API documentation of
:meth:`~.grid.network.EDisGo.analyze` of the API class
:class:`~.grid.network.EDisGo`.
Translating eDisGo's grid topology to PyPSA representation is structured
into translating the topology and adding time series for components of the
grid. In both cases translation of MV grid only (`mode='mv'`), LV grid only
(`mode='lv'`), MV and LV (`mode=None`) share some code. The
code is organized as follows:
* Medium-voltage only (`mode='mv'`): All medium-voltage grid components are
exported by :func:`mv_to_pypsa` including the LV station. LV grid load
and generation is considered using :func:`add_aggregated_lv_components`.
Time series are collected by `_pypsa_load_timeseries` (as example
for loads, generators and buses) specifying `mode='mv'`). Timeseries
for aggregated load/generation at substations are determined individually.
* Low-voltage only (`mode='lv'`): LV grid topology including the MV-LV
transformer is exported. The slack is defind at primary side of the MV-LV
transformer.
* Both level MV+LV (`mode=None`): The entire grid topology is translated to
PyPSA in order to perform a complete power flow analysis in both levels
together. First, both grid levels are translated seperately using
:func:`mv_to_pypsa` and :func:`lv_to_pypsa`. Those are merge by
:func:`combine_mv_and_lv`. Time series are obtained at once for both grid
levels.
This PyPSA interface is aware of translation errors and performs so checks
on integrity of data converted to PyPSA grid representation
* Sub-graphs/ Sub-networks: It is ensured the grid has no islanded parts
* Completeness of time series: It is ensured each component has a time
series
* Buses available: Each component (load, generator, line, transformer) is
connected to a bus. The PyPSA representation is check for completeness of
buses.
* Duplicate labels in components DataFrames and components' time series
DataFrames
Parameters
----------
network : :class:`~.grid.network.Network`
eDisGo grid container
mode : str
Determines grid levels that are translated to
`PyPSA grid representation
<https://www.pypsa.org/doc/components.html#network>`_. Specify
* None to export MV and LV grid levels. None is the default.
* ('mv' to export MV grid level only. This includes cumulative load and
generation from underlying LV grid aggregated at respective LV
station. This option is implemented, though the rest of edisgo does
not handle it yet.)
* ('lv' to export LV grid level only. This option is not yet
implemented)
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>`
Timesteps specifies which time steps to export to pypsa representation
and use in power flow analysis.
Returns
-------
:pypsa:`pypsa.Network<network>`
The `PyPSA network
<https://www.pypsa.org/doc/components.html#network>`_ container.
]
if compare[name[network].pypsa.edisgo_mode is constant[None]] begin[:]
variable[mv_components] assign[=] call[name[mv_to_pypsa], parameter[name[network]]]
variable[lv_components] assign[=] call[name[lv_to_pypsa], parameter[name[network]]]
variable[components] assign[=] call[name[combine_mv_and_lv], parameter[name[mv_components], name[lv_components]]]
call[name[_check_topology], parameter[name[components]]]
variable[pypsa_network] assign[=] call[name[PyPSANetwork], parameter[]]
name[pypsa_network].edisgo_mode assign[=] name[network].pypsa.edisgo_mode
call[name[pypsa_network].set_snapshots, parameter[name[network].pypsa.snapshots]]
call[name[pypsa_network].import_components_from_dataframe, parameter[call[name[components]][constant[Bus]], constant[Bus]]]
for taget[tuple[[<ast.Name object at 0x7da1b03e01f0>, <ast.Name object at 0x7da1b03e3d60>]]] in starred[call[name[components].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b03e3b50> begin[:]
call[name[pypsa_network].import_components_from_dataframe, parameter[name[comps], name[k]]]
name[pypsa_network].generators_t.p_set assign[=] name[network].pypsa.generators_t.p_set
name[pypsa_network].generators_t.q_set assign[=] name[network].pypsa.generators_t.q_set
name[pypsa_network].loads_t.p_set assign[=] name[network].pypsa.loads_t.p_set
name[pypsa_network].loads_t.q_set assign[=] name[network].pypsa.loads_t.q_set
name[pypsa_network].storage_units_t.p_set assign[=] name[network].pypsa.storage_units_t.p_set
name[pypsa_network].storage_units_t.q_set assign[=] name[network].pypsa.storage_units_t.q_set
name[pypsa_network].buses_t.v_mag_pu_set assign[=] name[network].pypsa.buses_t.v_mag_pu_set
name[network].pypsa assign[=] name[pypsa_network]
if compare[call[name[len], parameter[call[name[list], parameter[call[name[components]][constant[Generator]].index.values]]]] greater[>] constant[1]] begin[:]
call[name[update_pypsa_generator_timeseries], parameter[name[network]]]
if call[name[list], parameter[call[name[components]][constant[Bus]].index.values]] begin[:]
call[name[update_pypsa_bus_timeseries], parameter[name[network]]]
if compare[call[name[len], parameter[call[name[list], parameter[call[name[components]][constant[StorageUnit]].index.values]]]] greater[>] constant[0]] begin[:]
call[name[update_pypsa_storage_timeseries], parameter[name[network]]]
call[name[_check_integrity_of_pypsa], parameter[name[pypsa_network]]] | keyword[def] identifier[update_pypsa_generator_import] ( identifier[network] ):
literal[string]
keyword[if] identifier[network] . identifier[pypsa] . identifier[edisgo_mode] keyword[is] keyword[None] :
identifier[mv_components] = identifier[mv_to_pypsa] ( identifier[network] )
identifier[lv_components] = identifier[lv_to_pypsa] ( identifier[network] )
identifier[components] = identifier[combine_mv_and_lv] ( identifier[mv_components] , identifier[lv_components] )
keyword[elif] identifier[network] . identifier[pypsa] . identifier[edisgo_mode] keyword[is] literal[string] :
keyword[raise] identifier[NotImplementedError]
keyword[elif] identifier[network] . identifier[pypsa] . identifier[edisgo_mode] keyword[is] literal[string] :
keyword[raise] identifier[NotImplementedError]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[_check_topology] ( identifier[components] )
identifier[pypsa_network] = identifier[PyPSANetwork] ()
identifier[pypsa_network] . identifier[edisgo_mode] = identifier[network] . identifier[pypsa] . identifier[edisgo_mode]
identifier[pypsa_network] . identifier[set_snapshots] ( identifier[network] . identifier[pypsa] . identifier[snapshots] )
identifier[pypsa_network] . identifier[import_components_from_dataframe] ( identifier[components] [ literal[string] ], literal[string] )
keyword[for] identifier[k] , identifier[comps] keyword[in] identifier[components] . identifier[items] ():
keyword[if] identifier[k] keyword[is] keyword[not] literal[string] keyword[and] keyword[not] identifier[comps] . identifier[empty] :
identifier[pypsa_network] . identifier[import_components_from_dataframe] ( identifier[comps] , identifier[k] )
identifier[pypsa_network] . identifier[generators_t] . identifier[p_set] = identifier[network] . identifier[pypsa] . identifier[generators_t] . identifier[p_set]
identifier[pypsa_network] . identifier[generators_t] . identifier[q_set] = identifier[network] . identifier[pypsa] . identifier[generators_t] . identifier[q_set]
identifier[pypsa_network] . identifier[loads_t] . identifier[p_set] = identifier[network] . identifier[pypsa] . identifier[loads_t] . identifier[p_set]
identifier[pypsa_network] . identifier[loads_t] . identifier[q_set] = identifier[network] . identifier[pypsa] . identifier[loads_t] . identifier[q_set]
identifier[pypsa_network] . identifier[storage_units_t] . identifier[p_set] = identifier[network] . identifier[pypsa] . identifier[storage_units_t] . identifier[p_set]
identifier[pypsa_network] . identifier[storage_units_t] . identifier[q_set] = identifier[network] . identifier[pypsa] . identifier[storage_units_t] . identifier[q_set]
identifier[pypsa_network] . identifier[buses_t] . identifier[v_mag_pu_set] = identifier[network] . identifier[pypsa] . identifier[buses_t] . identifier[v_mag_pu_set]
identifier[network] . identifier[pypsa] = identifier[pypsa_network]
keyword[if] identifier[len] ( identifier[list] ( identifier[components] [ literal[string] ]. identifier[index] . identifier[values] ))> literal[int] :
identifier[update_pypsa_generator_timeseries] ( identifier[network] )
keyword[if] identifier[list] ( identifier[components] [ literal[string] ]. identifier[index] . identifier[values] ):
identifier[update_pypsa_bus_timeseries] ( identifier[network] )
keyword[if] identifier[len] ( identifier[list] ( identifier[components] [ literal[string] ]. identifier[index] . identifier[values] ))> literal[int] :
identifier[update_pypsa_storage_timeseries] ( identifier[network] )
identifier[_check_integrity_of_pypsa] ( identifier[pypsa_network] ) | def update_pypsa_generator_import(network):
"""
Translate graph based grid representation to PyPSA Network
For details from a user perspective see API documentation of
:meth:`~.grid.network.EDisGo.analyze` of the API class
:class:`~.grid.network.EDisGo`.
Translating eDisGo's grid topology to PyPSA representation is structured
into translating the topology and adding time series for components of the
grid. In both cases translation of MV grid only (`mode='mv'`), LV grid only
(`mode='lv'`), MV and LV (`mode=None`) share some code. The
code is organized as follows:
* Medium-voltage only (`mode='mv'`): All medium-voltage grid components are
exported by :func:`mv_to_pypsa` including the LV station. LV grid load
and generation is considered using :func:`add_aggregated_lv_components`.
Time series are collected by `_pypsa_load_timeseries` (as example
for loads, generators and buses) specifying `mode='mv'`). Timeseries
for aggregated load/generation at substations are determined individually.
* Low-voltage only (`mode='lv'`): LV grid topology including the MV-LV
transformer is exported. The slack is defind at primary side of the MV-LV
transformer.
* Both level MV+LV (`mode=None`): The entire grid topology is translated to
PyPSA in order to perform a complete power flow analysis in both levels
together. First, both grid levels are translated seperately using
:func:`mv_to_pypsa` and :func:`lv_to_pypsa`. Those are merge by
:func:`combine_mv_and_lv`. Time series are obtained at once for both grid
levels.
This PyPSA interface is aware of translation errors and performs so checks
on integrity of data converted to PyPSA grid representation
* Sub-graphs/ Sub-networks: It is ensured the grid has no islanded parts
* Completeness of time series: It is ensured each component has a time
series
* Buses available: Each component (load, generator, line, transformer) is
connected to a bus. The PyPSA representation is check for completeness of
buses.
* Duplicate labels in components DataFrames and components' time series
DataFrames
Parameters
----------
network : :class:`~.grid.network.Network`
eDisGo grid container
mode : str
Determines grid levels that are translated to
`PyPSA grid representation
<https://www.pypsa.org/doc/components.html#network>`_. Specify
* None to export MV and LV grid levels. None is the default.
* ('mv' to export MV grid level only. This includes cumulative load and
generation from underlying LV grid aggregated at respective LV
station. This option is implemented, though the rest of edisgo does
not handle it yet.)
* ('lv' to export LV grid level only. This option is not yet
implemented)
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>`
Timesteps specifies which time steps to export to pypsa representation
and use in power flow analysis.
Returns
-------
:pypsa:`pypsa.Network<network>`
The `PyPSA network
<https://www.pypsa.org/doc/components.html#network>`_ container.
"""
# get topology and time series data
if network.pypsa.edisgo_mode is None:
mv_components = mv_to_pypsa(network)
lv_components = lv_to_pypsa(network)
components = combine_mv_and_lv(mv_components, lv_components) # depends on [control=['if'], data=[]]
elif network.pypsa.edisgo_mode is 'mv':
raise NotImplementedError # depends on [control=['if'], data=[]]
elif network.pypsa.edisgo_mode is 'lv':
raise NotImplementedError # depends on [control=['if'], data=[]]
else:
raise ValueError('Provide proper mode or leave it empty to export entire grid topology.')
# check topology
_check_topology(components)
# create power flow problem
pypsa_network = PyPSANetwork()
pypsa_network.edisgo_mode = network.pypsa.edisgo_mode
pypsa_network.set_snapshots(network.pypsa.snapshots)
# import grid topology to PyPSA network
# buses are created first to avoid warnings
pypsa_network.import_components_from_dataframe(components['Bus'], 'Bus')
for (k, comps) in components.items():
if k is not 'Bus' and (not comps.empty):
pypsa_network.import_components_from_dataframe(comps, k) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# import time series to PyPSA network
pypsa_network.generators_t.p_set = network.pypsa.generators_t.p_set
pypsa_network.generators_t.q_set = network.pypsa.generators_t.q_set
pypsa_network.loads_t.p_set = network.pypsa.loads_t.p_set
pypsa_network.loads_t.q_set = network.pypsa.loads_t.q_set
pypsa_network.storage_units_t.p_set = network.pypsa.storage_units_t.p_set
pypsa_network.storage_units_t.q_set = network.pypsa.storage_units_t.q_set
pypsa_network.buses_t.v_mag_pu_set = network.pypsa.buses_t.v_mag_pu_set
network.pypsa = pypsa_network
if len(list(components['Generator'].index.values)) > 1:
update_pypsa_generator_timeseries(network) # depends on [control=['if'], data=[]]
if list(components['Bus'].index.values):
update_pypsa_bus_timeseries(network) # depends on [control=['if'], data=[]]
if len(list(components['StorageUnit'].index.values)) > 0:
update_pypsa_storage_timeseries(network) # depends on [control=['if'], data=[]]
_check_integrity_of_pypsa(pypsa_network) |
def uuid(self):
""" Return UUID of logical volume
:return: str
"""
uuid_file = '/sys/block/%s/dm/uuid' % os.path.basename(os.path.realpath(self.volume_path()))
lv_uuid = open(uuid_file).read().strip()
if lv_uuid.startswith('LVM-') is True:
return lv_uuid[4:]
return lv_uuid | def function[uuid, parameter[self]]:
constant[ Return UUID of logical volume
:return: str
]
variable[uuid_file] assign[=] binary_operation[constant[/sys/block/%s/dm/uuid] <ast.Mod object at 0x7da2590d6920> call[name[os].path.basename, parameter[call[name[os].path.realpath, parameter[call[name[self].volume_path, parameter[]]]]]]]
variable[lv_uuid] assign[=] call[call[call[name[open], parameter[name[uuid_file]]].read, parameter[]].strip, parameter[]]
if compare[call[name[lv_uuid].startswith, parameter[constant[LVM-]]] is constant[True]] begin[:]
return[call[name[lv_uuid]][<ast.Slice object at 0x7da20c6c7c40>]]
return[name[lv_uuid]] | keyword[def] identifier[uuid] ( identifier[self] ):
literal[string]
identifier[uuid_file] = literal[string] % identifier[os] . identifier[path] . identifier[basename] ( identifier[os] . identifier[path] . identifier[realpath] ( identifier[self] . identifier[volume_path] ()))
identifier[lv_uuid] = identifier[open] ( identifier[uuid_file] ). identifier[read] (). identifier[strip] ()
keyword[if] identifier[lv_uuid] . identifier[startswith] ( literal[string] ) keyword[is] keyword[True] :
keyword[return] identifier[lv_uuid] [ literal[int] :]
keyword[return] identifier[lv_uuid] | def uuid(self):
""" Return UUID of logical volume
:return: str
"""
uuid_file = '/sys/block/%s/dm/uuid' % os.path.basename(os.path.realpath(self.volume_path()))
lv_uuid = open(uuid_file).read().strip()
if lv_uuid.startswith('LVM-') is True:
return lv_uuid[4:] # depends on [control=['if'], data=[]]
return lv_uuid |
def transform(self, data):
"""
:param data:
:type data: dict
:return:
:rtype: dict
"""
out = {}
for key, val in data.items():
if key in self._transform_map:
target = self._transform_map[key]
key, val = target(key, val) if type(target) == types.FunctionType else (key, target)
out[key] = val
return out | def function[transform, parameter[self, data]]:
constant[
:param data:
:type data: dict
:return:
:rtype: dict
]
variable[out] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da18dc07250>, <ast.Name object at 0x7da18dc05a80>]]] in starred[call[name[data].items, parameter[]]] begin[:]
if compare[name[key] in name[self]._transform_map] begin[:]
variable[target] assign[=] call[name[self]._transform_map][name[key]]
<ast.Tuple object at 0x7da18dc06170> assign[=] <ast.IfExp object at 0x7da18dc06980>
call[name[out]][name[key]] assign[=] name[val]
return[name[out]] | keyword[def] identifier[transform] ( identifier[self] , identifier[data] ):
literal[string]
identifier[out] ={}
keyword[for] identifier[key] , identifier[val] keyword[in] identifier[data] . identifier[items] ():
keyword[if] identifier[key] keyword[in] identifier[self] . identifier[_transform_map] :
identifier[target] = identifier[self] . identifier[_transform_map] [ identifier[key] ]
identifier[key] , identifier[val] = identifier[target] ( identifier[key] , identifier[val] ) keyword[if] identifier[type] ( identifier[target] )== identifier[types] . identifier[FunctionType] keyword[else] ( identifier[key] , identifier[target] )
identifier[out] [ identifier[key] ]= identifier[val]
keyword[return] identifier[out] | def transform(self, data):
"""
:param data:
:type data: dict
:return:
:rtype: dict
"""
out = {}
for (key, val) in data.items():
if key in self._transform_map:
target = self._transform_map[key]
(key, val) = target(key, val) if type(target) == types.FunctionType else (key, target) # depends on [control=['if'], data=['key']]
out[key] = val # depends on [control=['for'], data=[]]
return out |
def schema_delete_field(cls, key):
"""Deletes a field."""
root = '/'.join([API_ROOT, 'schemas', cls.__name__])
payload = {
'className': cls.__name__,
'fields': {
key: {
'__op': 'Delete'
}
}
}
cls.PUT(root, **payload) | def function[schema_delete_field, parameter[cls, key]]:
constant[Deletes a field.]
variable[root] assign[=] call[constant[/].join, parameter[list[[<ast.Name object at 0x7da1b0510eb0>, <ast.Constant object at 0x7da1b05112d0>, <ast.Attribute object at 0x7da1b05114b0>]]]]
variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da1b0510d60>, <ast.Constant object at 0x7da1b0511030>], [<ast.Attribute object at 0x7da1b0510970>, <ast.Dict object at 0x7da20c9936d0>]]
call[name[cls].PUT, parameter[name[root]]] | keyword[def] identifier[schema_delete_field] ( identifier[cls] , identifier[key] ):
literal[string]
identifier[root] = literal[string] . identifier[join] ([ identifier[API_ROOT] , literal[string] , identifier[cls] . identifier[__name__] ])
identifier[payload] ={
literal[string] : identifier[cls] . identifier[__name__] ,
literal[string] :{
identifier[key] :{
literal[string] : literal[string]
}
}
}
identifier[cls] . identifier[PUT] ( identifier[root] ,** identifier[payload] ) | def schema_delete_field(cls, key):
"""Deletes a field."""
root = '/'.join([API_ROOT, 'schemas', cls.__name__])
payload = {'className': cls.__name__, 'fields': {key: {'__op': 'Delete'}}}
cls.PUT(root, **payload) |
def revise(self, data):
"""
Revise attributes value with dictionary data.
**中文文档**
将一个字典中的数据更新到本条文档。当且仅当数据值不为None时。
"""
if not isinstance(data, dict):
raise TypeError("`data` has to be a dict!")
for key, value in data.items():
if value is not None:
setattr(self, key, deepcopy(value)) | def function[revise, parameter[self, data]]:
constant[
Revise attributes value with dictionary data.
**中文文档**
将一个字典中的数据更新到本条文档。当且仅当数据值不为None时。
]
if <ast.UnaryOp object at 0x7da20e963820> begin[:]
<ast.Raise object at 0x7da20e962f80>
for taget[tuple[[<ast.Name object at 0x7da20e962050>, <ast.Name object at 0x7da20e963070>]]] in starred[call[name[data].items, parameter[]]] begin[:]
if compare[name[value] is_not constant[None]] begin[:]
call[name[setattr], parameter[name[self], name[key], call[name[deepcopy], parameter[name[value]]]]] | keyword[def] identifier[revise] ( identifier[self] , identifier[data] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[data] , identifier[dict] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[data] . identifier[items] ():
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
identifier[setattr] ( identifier[self] , identifier[key] , identifier[deepcopy] ( identifier[value] )) | def revise(self, data):
"""
Revise attributes value with dictionary data.
**中文文档**
将一个字典中的数据更新到本条文档。当且仅当数据值不为None时。
"""
if not isinstance(data, dict):
raise TypeError('`data` has to be a dict!') # depends on [control=['if'], data=[]]
for (key, value) in data.items():
if value is not None:
setattr(self, key, deepcopy(value)) # depends on [control=['if'], data=['value']] # depends on [control=['for'], data=[]] |
def get_repo(name, config_path=_DEFAULT_CONFIG_PATH, with_packages=False):
'''
Get detailed information about a local package repository.
:param str name: The name of the local repository.
:param str config_path: The path to the configuration file for the aptly instance.
:param bool with_packages: Return a list of packages in the repo.
:return: A dictionary containing information about the repository.
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' aptly.get_repo name="test-repo"
'''
_validate_config(config_path)
with_packages = six.text_type(bool(with_packages)).lower()
ret = dict()
cmd = ['repo', 'show', '-config={}'.format(config_path),
'-with-packages={}'.format(with_packages), name]
cmd_ret = _cmd_run(cmd)
ret = _parse_show_output(cmd_ret=cmd_ret)
if ret:
log.debug('Found repository: %s', name)
else:
log.debug('Unable to find repository: %s', name)
return ret | def function[get_repo, parameter[name, config_path, with_packages]]:
constant[
Get detailed information about a local package repository.
:param str name: The name of the local repository.
:param str config_path: The path to the configuration file for the aptly instance.
:param bool with_packages: Return a list of packages in the repo.
:return: A dictionary containing information about the repository.
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' aptly.get_repo name="test-repo"
]
call[name[_validate_config], parameter[name[config_path]]]
variable[with_packages] assign[=] call[call[name[six].text_type, parameter[call[name[bool], parameter[name[with_packages]]]]].lower, parameter[]]
variable[ret] assign[=] call[name[dict], parameter[]]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da18f58cd00>, <ast.Constant object at 0x7da18f58dcc0>, <ast.Call object at 0x7da18f58ebf0>, <ast.Call object at 0x7da18f58c520>, <ast.Name object at 0x7da18f58df30>]]
variable[cmd_ret] assign[=] call[name[_cmd_run], parameter[name[cmd]]]
variable[ret] assign[=] call[name[_parse_show_output], parameter[]]
if name[ret] begin[:]
call[name[log].debug, parameter[constant[Found repository: %s], name[name]]]
return[name[ret]] | keyword[def] identifier[get_repo] ( identifier[name] , identifier[config_path] = identifier[_DEFAULT_CONFIG_PATH] , identifier[with_packages] = keyword[False] ):
literal[string]
identifier[_validate_config] ( identifier[config_path] )
identifier[with_packages] = identifier[six] . identifier[text_type] ( identifier[bool] ( identifier[with_packages] )). identifier[lower] ()
identifier[ret] = identifier[dict] ()
identifier[cmd] =[ literal[string] , literal[string] , literal[string] . identifier[format] ( identifier[config_path] ),
literal[string] . identifier[format] ( identifier[with_packages] ), identifier[name] ]
identifier[cmd_ret] = identifier[_cmd_run] ( identifier[cmd] )
identifier[ret] = identifier[_parse_show_output] ( identifier[cmd_ret] = identifier[cmd_ret] )
keyword[if] identifier[ret] :
identifier[log] . identifier[debug] ( literal[string] , identifier[name] )
keyword[else] :
identifier[log] . identifier[debug] ( literal[string] , identifier[name] )
keyword[return] identifier[ret] | def get_repo(name, config_path=_DEFAULT_CONFIG_PATH, with_packages=False):
"""
Get detailed information about a local package repository.
:param str name: The name of the local repository.
:param str config_path: The path to the configuration file for the aptly instance.
:param bool with_packages: Return a list of packages in the repo.
:return: A dictionary containing information about the repository.
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' aptly.get_repo name="test-repo"
"""
_validate_config(config_path)
with_packages = six.text_type(bool(with_packages)).lower()
ret = dict()
cmd = ['repo', 'show', '-config={}'.format(config_path), '-with-packages={}'.format(with_packages), name]
cmd_ret = _cmd_run(cmd)
ret = _parse_show_output(cmd_ret=cmd_ret)
if ret:
log.debug('Found repository: %s', name) # depends on [control=['if'], data=[]]
else:
log.debug('Unable to find repository: %s', name)
return ret |
def delete_empty_children(self):
"""
Walk through the children of this node and delete any that are empty.
"""
for child in self.children:
child.delete_empty_children()
try:
if os.path.exists(child.full_path):
os.rmdir(child.full_path)
except OSError: pass
else: self.children.remove(child) | def function[delete_empty_children, parameter[self]]:
constant[
Walk through the children of this node and delete any that are empty.
]
for taget[name[child]] in starred[name[self].children] begin[:]
call[name[child].delete_empty_children, parameter[]]
<ast.Try object at 0x7da1b053a9b0> | keyword[def] identifier[delete_empty_children] ( identifier[self] ):
literal[string]
keyword[for] identifier[child] keyword[in] identifier[self] . identifier[children] :
identifier[child] . identifier[delete_empty_children] ()
keyword[try] :
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[child] . identifier[full_path] ):
identifier[os] . identifier[rmdir] ( identifier[child] . identifier[full_path] )
keyword[except] identifier[OSError] : keyword[pass]
keyword[else] : identifier[self] . identifier[children] . identifier[remove] ( identifier[child] ) | def delete_empty_children(self):
"""
Walk through the children of this node and delete any that are empty.
"""
for child in self.children:
child.delete_empty_children()
try:
if os.path.exists(child.full_path):
os.rmdir(child.full_path) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except OSError:
pass # depends on [control=['except'], data=[]]
else:
self.children.remove(child) # depends on [control=['for'], data=['child']] |
def _fetch_index_package_info(self, package_name, current_version):
"""
:type package_name: str
:type current_version: version.Version
"""
try:
package_canonical_name = package_name
if self.PYPI_API_TYPE == 'simple_html':
package_canonical_name = canonicalize_name(package_name)
response = requests.get(self.PYPI_API_URL.format(package=package_canonical_name), timeout=15)
except HTTPError as e: # pragma: nocover
return False, e.message
if not response.ok: # pragma: nocover
return False, 'API error: {}'.format(response.reason)
if self.PYPI_API_TYPE == 'pypi_json':
return self._parse_pypi_json_package_info(package_name, current_version, response)
elif self.PYPI_API_TYPE == 'simple_html':
return self._parse_simple_html_package_info(package_name, current_version, response)
else: # pragma: nocover
raise NotImplementedError('This type of PYPI_API_TYPE type is not supported') | def function[_fetch_index_package_info, parameter[self, package_name, current_version]]:
constant[
:type package_name: str
:type current_version: version.Version
]
<ast.Try object at 0x7da1b038ac50>
if <ast.UnaryOp object at 0x7da1b038b490> begin[:]
return[tuple[[<ast.Constant object at 0x7da1b038a980>, <ast.Call object at 0x7da1b038b6d0>]]]
if compare[name[self].PYPI_API_TYPE equal[==] constant[pypi_json]] begin[:]
return[call[name[self]._parse_pypi_json_package_info, parameter[name[package_name], name[current_version], name[response]]]] | keyword[def] identifier[_fetch_index_package_info] ( identifier[self] , identifier[package_name] , identifier[current_version] ):
literal[string]
keyword[try] :
identifier[package_canonical_name] = identifier[package_name]
keyword[if] identifier[self] . identifier[PYPI_API_TYPE] == literal[string] :
identifier[package_canonical_name] = identifier[canonicalize_name] ( identifier[package_name] )
identifier[response] = identifier[requests] . identifier[get] ( identifier[self] . identifier[PYPI_API_URL] . identifier[format] ( identifier[package] = identifier[package_canonical_name] ), identifier[timeout] = literal[int] )
keyword[except] identifier[HTTPError] keyword[as] identifier[e] :
keyword[return] keyword[False] , identifier[e] . identifier[message]
keyword[if] keyword[not] identifier[response] . identifier[ok] :
keyword[return] keyword[False] , literal[string] . identifier[format] ( identifier[response] . identifier[reason] )
keyword[if] identifier[self] . identifier[PYPI_API_TYPE] == literal[string] :
keyword[return] identifier[self] . identifier[_parse_pypi_json_package_info] ( identifier[package_name] , identifier[current_version] , identifier[response] )
keyword[elif] identifier[self] . identifier[PYPI_API_TYPE] == literal[string] :
keyword[return] identifier[self] . identifier[_parse_simple_html_package_info] ( identifier[package_name] , identifier[current_version] , identifier[response] )
keyword[else] :
keyword[raise] identifier[NotImplementedError] ( literal[string] ) | def _fetch_index_package_info(self, package_name, current_version):
"""
:type package_name: str
:type current_version: version.Version
"""
try:
package_canonical_name = package_name
if self.PYPI_API_TYPE == 'simple_html':
package_canonical_name = canonicalize_name(package_name) # depends on [control=['if'], data=[]]
response = requests.get(self.PYPI_API_URL.format(package=package_canonical_name), timeout=15) # depends on [control=['try'], data=[]]
except HTTPError as e: # pragma: nocover
return (False, e.message) # depends on [control=['except'], data=['e']]
if not response.ok: # pragma: nocover
return (False, 'API error: {}'.format(response.reason)) # depends on [control=['if'], data=[]]
if self.PYPI_API_TYPE == 'pypi_json':
return self._parse_pypi_json_package_info(package_name, current_version, response) # depends on [control=['if'], data=[]]
elif self.PYPI_API_TYPE == 'simple_html':
return self._parse_simple_html_package_info(package_name, current_version, response) # depends on [control=['if'], data=[]]
else: # pragma: nocover
raise NotImplementedError('This type of PYPI_API_TYPE type is not supported') |
def url_signature(url: str) -> Optional[Tuple]:
"""
Return an identify signature for url
:param url: item to get signature for
:return: tuple containing last modified, length and, if present, etag
"""
request = urllib.request.Request(url)
request.get_method = lambda: 'HEAD'
response = None
try:
response = urllib.request.urlopen(request)
except urllib.error.HTTPError:
return None
return response.info()['Last-Modified'], response.info()['Content-Length'], response.info().get('ETag') | def function[url_signature, parameter[url]]:
constant[
Return an identify signature for url
:param url: item to get signature for
:return: tuple containing last modified, length and, if present, etag
]
variable[request] assign[=] call[name[urllib].request.Request, parameter[name[url]]]
name[request].get_method assign[=] <ast.Lambda object at 0x7da18c4cf130>
variable[response] assign[=] constant[None]
<ast.Try object at 0x7da18c4cca60>
return[tuple[[<ast.Subscript object at 0x7da18c4cf100>, <ast.Subscript object at 0x7da18c4cf160>, <ast.Call object at 0x7da18c4ce110>]]] | keyword[def] identifier[url_signature] ( identifier[url] : identifier[str] )-> identifier[Optional] [ identifier[Tuple] ]:
literal[string]
identifier[request] = identifier[urllib] . identifier[request] . identifier[Request] ( identifier[url] )
identifier[request] . identifier[get_method] = keyword[lambda] : literal[string]
identifier[response] = keyword[None]
keyword[try] :
identifier[response] = identifier[urllib] . identifier[request] . identifier[urlopen] ( identifier[request] )
keyword[except] identifier[urllib] . identifier[error] . identifier[HTTPError] :
keyword[return] keyword[None]
keyword[return] identifier[response] . identifier[info] ()[ literal[string] ], identifier[response] . identifier[info] ()[ literal[string] ], identifier[response] . identifier[info] (). identifier[get] ( literal[string] ) | def url_signature(url: str) -> Optional[Tuple]:
"""
Return an identify signature for url
:param url: item to get signature for
:return: tuple containing last modified, length and, if present, etag
"""
request = urllib.request.Request(url)
request.get_method = lambda : 'HEAD'
response = None
try:
response = urllib.request.urlopen(request) # depends on [control=['try'], data=[]]
except urllib.error.HTTPError:
return None # depends on [control=['except'], data=[]]
return (response.info()['Last-Modified'], response.info()['Content-Length'], response.info().get('ETag')) |
async def get(self, request):
"""Gets the user_id for the request.
Gets the ticket for the request using the get_ticket() function, and
authenticates the ticket.
Args:
request: aiohttp Request object.
Returns:
The userid for the request, or None if the ticket is not
authenticated.
"""
ticket = await self.get_ticket(request)
if ticket is None:
return None
try:
# Returns a tuple of (user_id, token, userdata, validuntil)
now = time.time()
fields = self._ticket.validate(ticket, self._get_ip(request), now)
# Check if we need to reissue a ticket
if (self._reissue_time is not None and
now >= (fields.valid_until - self._reissue_time)):
# Reissue our ticket, and save it in our request.
request[_REISSUE_KEY] = self._new_ticket(request, fields.user_id)
return fields.user_id
except TicketError as e:
return None | <ast.AsyncFunctionDef object at 0x7da18ede6a10> | keyword[async] keyword[def] identifier[get] ( identifier[self] , identifier[request] ):
literal[string]
identifier[ticket] = keyword[await] identifier[self] . identifier[get_ticket] ( identifier[request] )
keyword[if] identifier[ticket] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[try] :
identifier[now] = identifier[time] . identifier[time] ()
identifier[fields] = identifier[self] . identifier[_ticket] . identifier[validate] ( identifier[ticket] , identifier[self] . identifier[_get_ip] ( identifier[request] ), identifier[now] )
keyword[if] ( identifier[self] . identifier[_reissue_time] keyword[is] keyword[not] keyword[None] keyword[and]
identifier[now] >=( identifier[fields] . identifier[valid_until] - identifier[self] . identifier[_reissue_time] )):
identifier[request] [ identifier[_REISSUE_KEY] ]= identifier[self] . identifier[_new_ticket] ( identifier[request] , identifier[fields] . identifier[user_id] )
keyword[return] identifier[fields] . identifier[user_id]
keyword[except] identifier[TicketError] keyword[as] identifier[e] :
keyword[return] keyword[None] | async def get(self, request):
"""Gets the user_id for the request.
Gets the ticket for the request using the get_ticket() function, and
authenticates the ticket.
Args:
request: aiohttp Request object.
Returns:
The userid for the request, or None if the ticket is not
authenticated.
"""
ticket = await self.get_ticket(request)
if ticket is None:
return None # depends on [control=['if'], data=[]]
try:
# Returns a tuple of (user_id, token, userdata, validuntil)
now = time.time()
fields = self._ticket.validate(ticket, self._get_ip(request), now)
# Check if we need to reissue a ticket
if self._reissue_time is not None and now >= fields.valid_until - self._reissue_time:
# Reissue our ticket, and save it in our request.
request[_REISSUE_KEY] = self._new_ticket(request, fields.user_id) # depends on [control=['if'], data=[]]
return fields.user_id # depends on [control=['try'], data=[]]
except TicketError as e:
return None # depends on [control=['except'], data=[]] |
def get(self, request, *args, **kwargs):
"""
Method for handling GET requests. Passes the
following arguments to the context:
* **versions** - The versions available for this object.\
These will be instances of the inner version class, and \
will not have access to the fields on the base model.
* **done_url** - The result of the `get_done_url` method.
"""
versions = self._get_versions()
return self.render(request, obj=self.object, versions=versions,
done_url=self.get_done_url()) | def function[get, parameter[self, request]]:
constant[
Method for handling GET requests. Passes the
following arguments to the context:
* **versions** - The versions available for this object. These will be instances of the inner version class, and will not have access to the fields on the base model.
* **done_url** - The result of the `get_done_url` method.
]
variable[versions] assign[=] call[name[self]._get_versions, parameter[]]
return[call[name[self].render, parameter[name[request]]]] | keyword[def] identifier[get] ( identifier[self] , identifier[request] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[versions] = identifier[self] . identifier[_get_versions] ()
keyword[return] identifier[self] . identifier[render] ( identifier[request] , identifier[obj] = identifier[self] . identifier[object] , identifier[versions] = identifier[versions] ,
identifier[done_url] = identifier[self] . identifier[get_done_url] ()) | def get(self, request, *args, **kwargs):
"""
Method for handling GET requests. Passes the
following arguments to the context:
* **versions** - The versions available for this object. These will be instances of the inner version class, and will not have access to the fields on the base model.
* **done_url** - The result of the `get_done_url` method.
"""
versions = self._get_versions()
return self.render(request, obj=self.object, versions=versions, done_url=self.get_done_url()) |
def _build_cache():
"""Preprocess collection queries."""
query = current_app.config['COLLECTIONS_DELETED_RECORDS']
for collection in Collection.query.filter(
Collection.dbquery.isnot(None)).all():
yield collection.name, dict(
query=query.format(dbquery=collection.dbquery),
ancestors=set(_ancestors(collection)),
)
raise StopIteration | def function[_build_cache, parameter[]]:
constant[Preprocess collection queries.]
variable[query] assign[=] call[name[current_app].config][constant[COLLECTIONS_DELETED_RECORDS]]
for taget[name[collection]] in starred[call[call[name[Collection].query.filter, parameter[call[name[Collection].dbquery.isnot, parameter[constant[None]]]]].all, parameter[]]] begin[:]
<ast.Yield object at 0x7da1b0bdb5b0>
<ast.Raise object at 0x7da1b0bd9390> | keyword[def] identifier[_build_cache] ():
literal[string]
identifier[query] = identifier[current_app] . identifier[config] [ literal[string] ]
keyword[for] identifier[collection] keyword[in] identifier[Collection] . identifier[query] . identifier[filter] (
identifier[Collection] . identifier[dbquery] . identifier[isnot] ( keyword[None] )). identifier[all] ():
keyword[yield] identifier[collection] . identifier[name] , identifier[dict] (
identifier[query] = identifier[query] . identifier[format] ( identifier[dbquery] = identifier[collection] . identifier[dbquery] ),
identifier[ancestors] = identifier[set] ( identifier[_ancestors] ( identifier[collection] )),
)
keyword[raise] identifier[StopIteration] | def _build_cache():
"""Preprocess collection queries."""
query = current_app.config['COLLECTIONS_DELETED_RECORDS']
for collection in Collection.query.filter(Collection.dbquery.isnot(None)).all():
yield (collection.name, dict(query=query.format(dbquery=collection.dbquery), ancestors=set(_ancestors(collection)))) # depends on [control=['for'], data=['collection']]
raise StopIteration |
def _ngrams(segment, n):
"""Extracts n-grams from an input segment.
Parameters
----------
segment: list
Text segment from which n-grams will be extracted.
n: int
Order of n-gram.
Returns
-------
ngram_counts: Counter
Contain all the nth n-grams in segment with a count of how many times each n-gram occurred.
"""
ngram_counts = Counter()
for i in range(0, len(segment) - n + 1):
ngram = tuple(segment[i:i + n])
ngram_counts[ngram] += 1
return ngram_counts | def function[_ngrams, parameter[segment, n]]:
constant[Extracts n-grams from an input segment.
Parameters
----------
segment: list
Text segment from which n-grams will be extracted.
n: int
Order of n-gram.
Returns
-------
ngram_counts: Counter
Contain all the nth n-grams in segment with a count of how many times each n-gram occurred.
]
variable[ngram_counts] assign[=] call[name[Counter], parameter[]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], binary_operation[binary_operation[call[name[len], parameter[name[segment]]] - name[n]] + constant[1]]]]] begin[:]
variable[ngram] assign[=] call[name[tuple], parameter[call[name[segment]][<ast.Slice object at 0x7da1b212cf40>]]]
<ast.AugAssign object at 0x7da1b212cee0>
return[name[ngram_counts]] | keyword[def] identifier[_ngrams] ( identifier[segment] , identifier[n] ):
literal[string]
identifier[ngram_counts] = identifier[Counter] ()
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[segment] )- identifier[n] + literal[int] ):
identifier[ngram] = identifier[tuple] ( identifier[segment] [ identifier[i] : identifier[i] + identifier[n] ])
identifier[ngram_counts] [ identifier[ngram] ]+= literal[int]
keyword[return] identifier[ngram_counts] | def _ngrams(segment, n):
"""Extracts n-grams from an input segment.
Parameters
----------
segment: list
Text segment from which n-grams will be extracted.
n: int
Order of n-gram.
Returns
-------
ngram_counts: Counter
Contain all the nth n-grams in segment with a count of how many times each n-gram occurred.
"""
ngram_counts = Counter()
for i in range(0, len(segment) - n + 1):
ngram = tuple(segment[i:i + n])
ngram_counts[ngram] += 1 # depends on [control=['for'], data=['i']]
return ngram_counts |
def purge_configs():
"""
These will delete any configs found in either the current directory or the
user's home directory
"""
user_config = path(CONFIG_FILE_NAME, root=USER)
inplace_config = path(CONFIG_FILE_NAME)
if os.path.isfile(user_config):
os.remove(user_config)
if os.path.isfile(inplace_config):
os.remove(inplace_config) | def function[purge_configs, parameter[]]:
constant[
These will delete any configs found in either the current directory or the
user's home directory
]
variable[user_config] assign[=] call[name[path], parameter[name[CONFIG_FILE_NAME]]]
variable[inplace_config] assign[=] call[name[path], parameter[name[CONFIG_FILE_NAME]]]
if call[name[os].path.isfile, parameter[name[user_config]]] begin[:]
call[name[os].remove, parameter[name[user_config]]]
if call[name[os].path.isfile, parameter[name[inplace_config]]] begin[:]
call[name[os].remove, parameter[name[inplace_config]]] | keyword[def] identifier[purge_configs] ():
literal[string]
identifier[user_config] = identifier[path] ( identifier[CONFIG_FILE_NAME] , identifier[root] = identifier[USER] )
identifier[inplace_config] = identifier[path] ( identifier[CONFIG_FILE_NAME] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[user_config] ):
identifier[os] . identifier[remove] ( identifier[user_config] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[inplace_config] ):
identifier[os] . identifier[remove] ( identifier[inplace_config] ) | def purge_configs():
"""
These will delete any configs found in either the current directory or the
user's home directory
"""
user_config = path(CONFIG_FILE_NAME, root=USER)
inplace_config = path(CONFIG_FILE_NAME)
if os.path.isfile(user_config):
os.remove(user_config) # depends on [control=['if'], data=[]]
if os.path.isfile(inplace_config):
os.remove(inplace_config) # depends on [control=['if'], data=[]] |
def on(event, *args, **kwargs):
"""
Event method wrapper for bot mixins. When a bot is constructed,
its metaclass inspects all members of all base classes, and
looks for methods marked with an event attribute which is assigned
via this wrapper. It then stores all the methods in a dict
that maps event names to lists of these methods, which are each
called when the event occurs.
"""
def wrapper(func):
for i, arg in args:
kwargs[i] = arg
func.event = Event(event, kwargs)
return func
return wrapper | def function[on, parameter[event]]:
constant[
Event method wrapper for bot mixins. When a bot is constructed,
its metaclass inspects all members of all base classes, and
looks for methods marked with an event attribute which is assigned
via this wrapper. It then stores all the methods in a dict
that maps event names to lists of these methods, which are each
called when the event occurs.
]
def function[wrapper, parameter[func]]:
for taget[tuple[[<ast.Name object at 0x7da1b0f5b1f0>, <ast.Name object at 0x7da1b0f58b80>]]] in starred[name[args]] begin[:]
call[name[kwargs]][name[i]] assign[=] name[arg]
name[func].event assign[=] call[name[Event], parameter[name[event], name[kwargs]]]
return[name[func]]
return[name[wrapper]] | keyword[def] identifier[on] ( identifier[event] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[def] identifier[wrapper] ( identifier[func] ):
keyword[for] identifier[i] , identifier[arg] keyword[in] identifier[args] :
identifier[kwargs] [ identifier[i] ]= identifier[arg]
identifier[func] . identifier[event] = identifier[Event] ( identifier[event] , identifier[kwargs] )
keyword[return] identifier[func]
keyword[return] identifier[wrapper] | def on(event, *args, **kwargs):
"""
Event method wrapper for bot mixins. When a bot is constructed,
its metaclass inspects all members of all base classes, and
looks for methods marked with an event attribute which is assigned
via this wrapper. It then stores all the methods in a dict
that maps event names to lists of these methods, which are each
called when the event occurs.
"""
def wrapper(func):
for (i, arg) in args:
kwargs[i] = arg # depends on [control=['for'], data=[]]
func.event = Event(event, kwargs)
return func
return wrapper |
def merge_errors(self, errors_local, errors_remote):
"""
Merge errors
Recursively traverses error graph to merge remote errors into local
errors to return a new joined graph.
:param errors_local: dict, local errors, will be updated
:param errors_remote: dict, remote errors, provides updates
:return: dict
"""
for prop in errors_remote:
# create if doesn't exist
if prop not in errors_local:
errors_local[prop] = errors_remote[prop]
continue
local = errors_local[prop]
local = local.errors if isinstance(local, Result) else local
remote = errors_remote[prop]
remote = remote.errors if isinstance(remote, Result) else remote
# check compatibility
if not isinstance(local, type(remote)):
msg = 'Type mismatch on property [{}] when merging errors. '
msg += 'Unable to merge [{}] into [{}]'
raise x.UnableToMergeResultsType(msg.format(
prop,
type(errors_remote[prop]),
type(self.errors[prop])
))
mismatch = 'Unable to merge nested entity errors with nested '
mismatch += 'collection errors on property [{}]'
if 'schema' in local and 'collection' in remote:
raise x.UnableToMergeResultsType(mismatch.format(prop))
if 'collection' in local and 'schema' in remote:
raise x.UnableToMergeResultsType(mismatch.format(prop))
# merge simple & state
if type(remote) is list:
errors_local[prop].extend(remote)
continue
# merge direct errors on nested entities and collection
if 'direct' in remote and 'direct' in local:
errors_local[prop]['direct'].extend(remote['direct'])
# merge nested schema errors
if 'schema' in remote and 'schema' in local:
errors_local[prop]['schema'] = self.merge_errors(
errors_local[prop]['schema'],
remote['schema']
)
# merge nested collections errors
if 'collection' in remote and 'collection' in local:
for index, result in remote['collection'].items():
if index not in local['collection']:
errors_local[prop]['collection'][index] = result
else:
merged = self.merge_errors(
errors_local[prop]['collection'][index].errors,
errors_remote[prop]['collection'][index].errors,
)
errors_local[prop]['collection'][index] = merged
# and return
return errors_local | def function[merge_errors, parameter[self, errors_local, errors_remote]]:
constant[
Merge errors
Recursively traverses error graph to merge remote errors into local
errors to return a new joined graph.
:param errors_local: dict, local errors, will be updated
:param errors_remote: dict, remote errors, provides updates
:return: dict
]
for taget[name[prop]] in starred[name[errors_remote]] begin[:]
if compare[name[prop] <ast.NotIn object at 0x7da2590d7190> name[errors_local]] begin[:]
call[name[errors_local]][name[prop]] assign[=] call[name[errors_remote]][name[prop]]
continue
variable[local] assign[=] call[name[errors_local]][name[prop]]
variable[local] assign[=] <ast.IfExp object at 0x7da20c990f70>
variable[remote] assign[=] call[name[errors_remote]][name[prop]]
variable[remote] assign[=] <ast.IfExp object at 0x7da20c993a90>
if <ast.UnaryOp object at 0x7da20c990bb0> begin[:]
variable[msg] assign[=] constant[Type mismatch on property [{}] when merging errors. ]
<ast.AugAssign object at 0x7da20c993bb0>
<ast.Raise object at 0x7da20c990130>
variable[mismatch] assign[=] constant[Unable to merge nested entity errors with nested ]
<ast.AugAssign object at 0x7da20c990eb0>
if <ast.BoolOp object at 0x7da20c990fd0> begin[:]
<ast.Raise object at 0x7da20c992b30>
if <ast.BoolOp object at 0x7da20c992770> begin[:]
<ast.Raise object at 0x7da20c993610>
if compare[call[name[type], parameter[name[remote]]] is name[list]] begin[:]
call[call[name[errors_local]][name[prop]].extend, parameter[name[remote]]]
continue
if <ast.BoolOp object at 0x7da20c992650> begin[:]
call[call[call[name[errors_local]][name[prop]]][constant[direct]].extend, parameter[call[name[remote]][constant[direct]]]]
if <ast.BoolOp object at 0x7da20c991840> begin[:]
call[call[name[errors_local]][name[prop]]][constant[schema]] assign[=] call[name[self].merge_errors, parameter[call[call[name[errors_local]][name[prop]]][constant[schema]], call[name[remote]][constant[schema]]]]
if <ast.BoolOp object at 0x7da20c992710> begin[:]
for taget[tuple[[<ast.Name object at 0x7da20c991510>, <ast.Name object at 0x7da20c9938e0>]]] in starred[call[call[name[remote]][constant[collection]].items, parameter[]]] begin[:]
if compare[name[index] <ast.NotIn object at 0x7da2590d7190> call[name[local]][constant[collection]]] begin[:]
call[call[call[name[errors_local]][name[prop]]][constant[collection]]][name[index]] assign[=] name[result]
return[name[errors_local]] | keyword[def] identifier[merge_errors] ( identifier[self] , identifier[errors_local] , identifier[errors_remote] ):
literal[string]
keyword[for] identifier[prop] keyword[in] identifier[errors_remote] :
keyword[if] identifier[prop] keyword[not] keyword[in] identifier[errors_local] :
identifier[errors_local] [ identifier[prop] ]= identifier[errors_remote] [ identifier[prop] ]
keyword[continue]
identifier[local] = identifier[errors_local] [ identifier[prop] ]
identifier[local] = identifier[local] . identifier[errors] keyword[if] identifier[isinstance] ( identifier[local] , identifier[Result] ) keyword[else] identifier[local]
identifier[remote] = identifier[errors_remote] [ identifier[prop] ]
identifier[remote] = identifier[remote] . identifier[errors] keyword[if] identifier[isinstance] ( identifier[remote] , identifier[Result] ) keyword[else] identifier[remote]
keyword[if] keyword[not] identifier[isinstance] ( identifier[local] , identifier[type] ( identifier[remote] )):
identifier[msg] = literal[string]
identifier[msg] += literal[string]
keyword[raise] identifier[x] . identifier[UnableToMergeResultsType] ( identifier[msg] . identifier[format] (
identifier[prop] ,
identifier[type] ( identifier[errors_remote] [ identifier[prop] ]),
identifier[type] ( identifier[self] . identifier[errors] [ identifier[prop] ])
))
identifier[mismatch] = literal[string]
identifier[mismatch] += literal[string]
keyword[if] literal[string] keyword[in] identifier[local] keyword[and] literal[string] keyword[in] identifier[remote] :
keyword[raise] identifier[x] . identifier[UnableToMergeResultsType] ( identifier[mismatch] . identifier[format] ( identifier[prop] ))
keyword[if] literal[string] keyword[in] identifier[local] keyword[and] literal[string] keyword[in] identifier[remote] :
keyword[raise] identifier[x] . identifier[UnableToMergeResultsType] ( identifier[mismatch] . identifier[format] ( identifier[prop] ))
keyword[if] identifier[type] ( identifier[remote] ) keyword[is] identifier[list] :
identifier[errors_local] [ identifier[prop] ]. identifier[extend] ( identifier[remote] )
keyword[continue]
keyword[if] literal[string] keyword[in] identifier[remote] keyword[and] literal[string] keyword[in] identifier[local] :
identifier[errors_local] [ identifier[prop] ][ literal[string] ]. identifier[extend] ( identifier[remote] [ literal[string] ])
keyword[if] literal[string] keyword[in] identifier[remote] keyword[and] literal[string] keyword[in] identifier[local] :
identifier[errors_local] [ identifier[prop] ][ literal[string] ]= identifier[self] . identifier[merge_errors] (
identifier[errors_local] [ identifier[prop] ][ literal[string] ],
identifier[remote] [ literal[string] ]
)
keyword[if] literal[string] keyword[in] identifier[remote] keyword[and] literal[string] keyword[in] identifier[local] :
keyword[for] identifier[index] , identifier[result] keyword[in] identifier[remote] [ literal[string] ]. identifier[items] ():
keyword[if] identifier[index] keyword[not] keyword[in] identifier[local] [ literal[string] ]:
identifier[errors_local] [ identifier[prop] ][ literal[string] ][ identifier[index] ]= identifier[result]
keyword[else] :
identifier[merged] = identifier[self] . identifier[merge_errors] (
identifier[errors_local] [ identifier[prop] ][ literal[string] ][ identifier[index] ]. identifier[errors] ,
identifier[errors_remote] [ identifier[prop] ][ literal[string] ][ identifier[index] ]. identifier[errors] ,
)
identifier[errors_local] [ identifier[prop] ][ literal[string] ][ identifier[index] ]= identifier[merged]
keyword[return] identifier[errors_local] | def merge_errors(self, errors_local, errors_remote):
"""
Merge errors
Recursively traverses error graph to merge remote errors into local
errors to return a new joined graph.
:param errors_local: dict, local errors, will be updated
:param errors_remote: dict, remote errors, provides updates
:return: dict
"""
for prop in errors_remote:
# create if doesn't exist
if prop not in errors_local:
errors_local[prop] = errors_remote[prop]
continue # depends on [control=['if'], data=['prop', 'errors_local']]
local = errors_local[prop]
local = local.errors if isinstance(local, Result) else local
remote = errors_remote[prop]
remote = remote.errors if isinstance(remote, Result) else remote
# check compatibility
if not isinstance(local, type(remote)):
msg = 'Type mismatch on property [{}] when merging errors. '
msg += 'Unable to merge [{}] into [{}]'
raise x.UnableToMergeResultsType(msg.format(prop, type(errors_remote[prop]), type(self.errors[prop]))) # depends on [control=['if'], data=[]]
mismatch = 'Unable to merge nested entity errors with nested '
mismatch += 'collection errors on property [{}]'
if 'schema' in local and 'collection' in remote:
raise x.UnableToMergeResultsType(mismatch.format(prop)) # depends on [control=['if'], data=[]]
if 'collection' in local and 'schema' in remote:
raise x.UnableToMergeResultsType(mismatch.format(prop)) # depends on [control=['if'], data=[]]
# merge simple & state
if type(remote) is list:
errors_local[prop].extend(remote)
continue # depends on [control=['if'], data=[]]
# merge direct errors on nested entities and collection
if 'direct' in remote and 'direct' in local:
errors_local[prop]['direct'].extend(remote['direct']) # depends on [control=['if'], data=[]]
# merge nested schema errors
if 'schema' in remote and 'schema' in local:
errors_local[prop]['schema'] = self.merge_errors(errors_local[prop]['schema'], remote['schema']) # depends on [control=['if'], data=[]]
# merge nested collections errors
if 'collection' in remote and 'collection' in local:
for (index, result) in remote['collection'].items():
if index not in local['collection']:
errors_local[prop]['collection'][index] = result # depends on [control=['if'], data=['index']]
else:
merged = self.merge_errors(errors_local[prop]['collection'][index].errors, errors_remote[prop]['collection'][index].errors)
errors_local[prop]['collection'][index] = merged # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['prop']]
# and return
return errors_local |
def import_from_file(request):
"""
Import a part of a source site's page tree via an import of a JSON file
exported to a user's filesystem from the source site's Wagtail Admin
The source site's base url and the source page id of the point in the
tree to import defined what to import and the destination parent page
defines where to import it to.
"""
if request.method == 'POST':
form = ImportFromFileForm(request.POST, request.FILES)
if form.is_valid():
import_data = json.loads(form.cleaned_data['file'].read().decode('utf-8-sig'))
parent_page = form.cleaned_data['parent_page']
try:
page_count = import_pages(import_data, parent_page)
except LookupError as e:
messages.error(request, _(
"Import failed: %(reason)s") % {'reason': e}
)
else:
messages.success(request, ungettext(
"%(count)s page imported.",
"%(count)s pages imported.",
page_count) % {'count': page_count}
)
return redirect('wagtailadmin_explore', parent_page.pk)
else:
form = ImportFromFileForm()
return render(request, 'wagtailimportexport/import_from_file.html', {
'form': form,
}) | def function[import_from_file, parameter[request]]:
constant[
Import a part of a source site's page tree via an import of a JSON file
exported to a user's filesystem from the source site's Wagtail Admin
The source site's base url and the source page id of the point in the
tree to import defined what to import and the destination parent page
defines where to import it to.
]
if compare[name[request].method equal[==] constant[POST]] begin[:]
variable[form] assign[=] call[name[ImportFromFileForm], parameter[name[request].POST, name[request].FILES]]
if call[name[form].is_valid, parameter[]] begin[:]
variable[import_data] assign[=] call[name[json].loads, parameter[call[call[call[name[form].cleaned_data][constant[file]].read, parameter[]].decode, parameter[constant[utf-8-sig]]]]]
variable[parent_page] assign[=] call[name[form].cleaned_data][constant[parent_page]]
<ast.Try object at 0x7da1b152a920>
return[call[name[redirect], parameter[constant[wagtailadmin_explore], name[parent_page].pk]]]
return[call[name[render], parameter[name[request], constant[wagtailimportexport/import_from_file.html], dictionary[[<ast.Constant object at 0x7da18fe92ef0>], [<ast.Name object at 0x7da18fe93be0>]]]]] | keyword[def] identifier[import_from_file] ( identifier[request] ):
literal[string]
keyword[if] identifier[request] . identifier[method] == literal[string] :
identifier[form] = identifier[ImportFromFileForm] ( identifier[request] . identifier[POST] , identifier[request] . identifier[FILES] )
keyword[if] identifier[form] . identifier[is_valid] ():
identifier[import_data] = identifier[json] . identifier[loads] ( identifier[form] . identifier[cleaned_data] [ literal[string] ]. identifier[read] (). identifier[decode] ( literal[string] ))
identifier[parent_page] = identifier[form] . identifier[cleaned_data] [ literal[string] ]
keyword[try] :
identifier[page_count] = identifier[import_pages] ( identifier[import_data] , identifier[parent_page] )
keyword[except] identifier[LookupError] keyword[as] identifier[e] :
identifier[messages] . identifier[error] ( identifier[request] , identifier[_] (
literal[string] )%{ literal[string] : identifier[e] }
)
keyword[else] :
identifier[messages] . identifier[success] ( identifier[request] , identifier[ungettext] (
literal[string] ,
literal[string] ,
identifier[page_count] )%{ literal[string] : identifier[page_count] }
)
keyword[return] identifier[redirect] ( literal[string] , identifier[parent_page] . identifier[pk] )
keyword[else] :
identifier[form] = identifier[ImportFromFileForm] ()
keyword[return] identifier[render] ( identifier[request] , literal[string] ,{
literal[string] : identifier[form] ,
}) | def import_from_file(request):
"""
Import a part of a source site's page tree via an import of a JSON file
exported to a user's filesystem from the source site's Wagtail Admin
The source site's base url and the source page id of the point in the
tree to import defined what to import and the destination parent page
defines where to import it to.
"""
if request.method == 'POST':
form = ImportFromFileForm(request.POST, request.FILES)
if form.is_valid():
import_data = json.loads(form.cleaned_data['file'].read().decode('utf-8-sig'))
parent_page = form.cleaned_data['parent_page']
try:
page_count = import_pages(import_data, parent_page) # depends on [control=['try'], data=[]]
except LookupError as e:
messages.error(request, _('Import failed: %(reason)s') % {'reason': e}) # depends on [control=['except'], data=['e']]
else:
messages.success(request, ungettext('%(count)s page imported.', '%(count)s pages imported.', page_count) % {'count': page_count})
return redirect('wagtailadmin_explore', parent_page.pk) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
form = ImportFromFileForm()
return render(request, 'wagtailimportexport/import_from_file.html', {'form': form}) |
def get_stats(self):
"""
It returns a name and a value pairs of control files
which are categorised in the stats group.
"""
stats = {}
for name, cls in self.stats.items():
path = self.paths[name]
if os.path.exists(path):
try:
stats[name] = self._PARSERS[cls](fileops.read(path))
except IOError as e:
# XXX: we have to distinguish unexpected errors from the expected ones
if e.errno == errno.EOPNOTSUPP:
# Since 3.5 memory.memsw.* are always created even if disabled.
# If disabled we will get EOPNOTSUPP when read or write them.
# See commit af36f906c0f4c2ffa0482ecdf856a33dc88ae8c5 of the kernel.
pass
if e.errno == errno.EIO:
# memory.kmem.slabinfo throws EIO until limit_in_bytes is set.
pass
else:
raise
return stats | def function[get_stats, parameter[self]]:
constant[
It returns a name and a value pairs of control files
which are categorised in the stats group.
]
variable[stats] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da20e9b1600>, <ast.Name object at 0x7da20e9b07f0>]]] in starred[call[name[self].stats.items, parameter[]]] begin[:]
variable[path] assign[=] call[name[self].paths][name[name]]
if call[name[os].path.exists, parameter[name[path]]] begin[:]
<ast.Try object at 0x7da20e9b2650>
return[name[stats]] | keyword[def] identifier[get_stats] ( identifier[self] ):
literal[string]
identifier[stats] ={}
keyword[for] identifier[name] , identifier[cls] keyword[in] identifier[self] . identifier[stats] . identifier[items] ():
identifier[path] = identifier[self] . identifier[paths] [ identifier[name] ]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ):
keyword[try] :
identifier[stats] [ identifier[name] ]= identifier[self] . identifier[_PARSERS] [ identifier[cls] ]( identifier[fileops] . identifier[read] ( identifier[path] ))
keyword[except] identifier[IOError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[errno] == identifier[errno] . identifier[EOPNOTSUPP] :
keyword[pass]
keyword[if] identifier[e] . identifier[errno] == identifier[errno] . identifier[EIO] :
keyword[pass]
keyword[else] :
keyword[raise]
keyword[return] identifier[stats] | def get_stats(self):
"""
It returns a name and a value pairs of control files
which are categorised in the stats group.
"""
stats = {}
for (name, cls) in self.stats.items():
path = self.paths[name]
if os.path.exists(path):
try:
stats[name] = self._PARSERS[cls](fileops.read(path)) # depends on [control=['try'], data=[]]
except IOError as e:
# XXX: we have to distinguish unexpected errors from the expected ones
if e.errno == errno.EOPNOTSUPP:
# Since 3.5 memory.memsw.* are always created even if disabled.
# If disabled we will get EOPNOTSUPP when read or write them.
# See commit af36f906c0f4c2ffa0482ecdf856a33dc88ae8c5 of the kernel.
pass # depends on [control=['if'], data=[]]
if e.errno == errno.EIO:
# memory.kmem.slabinfo throws EIO until limit_in_bytes is set.
pass # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return stats |
def getOutput(self):
"""
Returns the combined output of stdout and stderr
"""
output = self.stdout
if self.stdout:
output += '\r\n'
output += self.stderr
return output | def function[getOutput, parameter[self]]:
constant[
Returns the combined output of stdout and stderr
]
variable[output] assign[=] name[self].stdout
if name[self].stdout begin[:]
<ast.AugAssign object at 0x7da1b23e5c00>
<ast.AugAssign object at 0x7da1b23e56c0>
return[name[output]] | keyword[def] identifier[getOutput] ( identifier[self] ):
literal[string]
identifier[output] = identifier[self] . identifier[stdout]
keyword[if] identifier[self] . identifier[stdout] :
identifier[output] += literal[string]
identifier[output] += identifier[self] . identifier[stderr]
keyword[return] identifier[output] | def getOutput(self):
"""
Returns the combined output of stdout and stderr
"""
output = self.stdout
if self.stdout:
output += '\r\n' # depends on [control=['if'], data=[]]
output += self.stderr
return output |
def get_conf_filename():
"""
The configuration file either lives in ~/.peri.json or is specified on the
command line via the environment variables PERI_CONF_FILE
"""
default = os.path.join(os.path.expanduser("~"), ".peri.json")
return os.environ.get('PERI_CONF_FILE', default) | def function[get_conf_filename, parameter[]]:
constant[
The configuration file either lives in ~/.peri.json or is specified on the
command line via the environment variables PERI_CONF_FILE
]
variable[default] assign[=] call[name[os].path.join, parameter[call[name[os].path.expanduser, parameter[constant[~]]], constant[.peri.json]]]
return[call[name[os].environ.get, parameter[constant[PERI_CONF_FILE], name[default]]]] | keyword[def] identifier[get_conf_filename] ():
literal[string]
identifier[default] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[expanduser] ( literal[string] ), literal[string] )
keyword[return] identifier[os] . identifier[environ] . identifier[get] ( literal[string] , identifier[default] ) | def get_conf_filename():
"""
The configuration file either lives in ~/.peri.json or is specified on the
command line via the environment variables PERI_CONF_FILE
"""
default = os.path.join(os.path.expanduser('~'), '.peri.json')
return os.environ.get('PERI_CONF_FILE', default) |
def user_credentials(self):
"""
Provides the credentials required to authenticate the user for
login.
"""
credentials = {}
login = self.cleaned_data["login"]
if app_settings.AUTHENTICATION_METHOD == AuthenticationMethod.EMAIL:
credentials["email"] = login
elif (
app_settings.AUTHENTICATION_METHOD ==
AuthenticationMethod.USERNAME):
credentials["username"] = login
else:
if self._is_login_email(login):
credentials["email"] = login
credentials["username"] = login
credentials["password"] = self.cleaned_data["password"]
return credentials | def function[user_credentials, parameter[self]]:
constant[
Provides the credentials required to authenticate the user for
login.
]
variable[credentials] assign[=] dictionary[[], []]
variable[login] assign[=] call[name[self].cleaned_data][constant[login]]
if compare[name[app_settings].AUTHENTICATION_METHOD equal[==] name[AuthenticationMethod].EMAIL] begin[:]
call[name[credentials]][constant[email]] assign[=] name[login]
call[name[credentials]][constant[password]] assign[=] call[name[self].cleaned_data][constant[password]]
return[name[credentials]] | keyword[def] identifier[user_credentials] ( identifier[self] ):
literal[string]
identifier[credentials] ={}
identifier[login] = identifier[self] . identifier[cleaned_data] [ literal[string] ]
keyword[if] identifier[app_settings] . identifier[AUTHENTICATION_METHOD] == identifier[AuthenticationMethod] . identifier[EMAIL] :
identifier[credentials] [ literal[string] ]= identifier[login]
keyword[elif] (
identifier[app_settings] . identifier[AUTHENTICATION_METHOD] ==
identifier[AuthenticationMethod] . identifier[USERNAME] ):
identifier[credentials] [ literal[string] ]= identifier[login]
keyword[else] :
keyword[if] identifier[self] . identifier[_is_login_email] ( identifier[login] ):
identifier[credentials] [ literal[string] ]= identifier[login]
identifier[credentials] [ literal[string] ]= identifier[login]
identifier[credentials] [ literal[string] ]= identifier[self] . identifier[cleaned_data] [ literal[string] ]
keyword[return] identifier[credentials] | def user_credentials(self):
"""
Provides the credentials required to authenticate the user for
login.
"""
credentials = {}
login = self.cleaned_data['login']
if app_settings.AUTHENTICATION_METHOD == AuthenticationMethod.EMAIL:
credentials['email'] = login # depends on [control=['if'], data=[]]
elif app_settings.AUTHENTICATION_METHOD == AuthenticationMethod.USERNAME:
credentials['username'] = login # depends on [control=['if'], data=[]]
else:
if self._is_login_email(login):
credentials['email'] = login # depends on [control=['if'], data=[]]
credentials['username'] = login
credentials['password'] = self.cleaned_data['password']
return credentials |
def weight_list_to_tuple(data, attr_name):
'''
Converts a list of values and corresponding weights to a tuple of values
'''
if len(data['Value']) != len(data['Weight']):
raise ValueError('Number of weights do not correspond to number of '
'attributes in %s' % attr_name)
weight = np.array(data['Weight'])
if fabs(np.sum(weight) - 1.) > 1E-7:
raise ValueError('Weights do not sum to 1.0 in %s' % attr_name)
data_tuple = []
for iloc, value in enumerate(data['Value']):
data_tuple.append((value, weight[iloc]))
return data_tuple | def function[weight_list_to_tuple, parameter[data, attr_name]]:
constant[
Converts a list of values and corresponding weights to a tuple of values
]
if compare[call[name[len], parameter[call[name[data]][constant[Value]]]] not_equal[!=] call[name[len], parameter[call[name[data]][constant[Weight]]]]] begin[:]
<ast.Raise object at 0x7da20c7963e0>
variable[weight] assign[=] call[name[np].array, parameter[call[name[data]][constant[Weight]]]]
if compare[call[name[fabs], parameter[binary_operation[call[name[np].sum, parameter[name[weight]]] - constant[1.0]]]] greater[>] constant[1e-07]] begin[:]
<ast.Raise object at 0x7da2054a4460>
variable[data_tuple] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da2054a4a00>, <ast.Name object at 0x7da2054a6e90>]]] in starred[call[name[enumerate], parameter[call[name[data]][constant[Value]]]]] begin[:]
call[name[data_tuple].append, parameter[tuple[[<ast.Name object at 0x7da2054a6260>, <ast.Subscript object at 0x7da2054a40a0>]]]]
return[name[data_tuple]] | keyword[def] identifier[weight_list_to_tuple] ( identifier[data] , identifier[attr_name] ):
literal[string]
keyword[if] identifier[len] ( identifier[data] [ literal[string] ])!= identifier[len] ( identifier[data] [ literal[string] ]):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] % identifier[attr_name] )
identifier[weight] = identifier[np] . identifier[array] ( identifier[data] [ literal[string] ])
keyword[if] identifier[fabs] ( identifier[np] . identifier[sum] ( identifier[weight] )- literal[int] )> literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[attr_name] )
identifier[data_tuple] =[]
keyword[for] identifier[iloc] , identifier[value] keyword[in] identifier[enumerate] ( identifier[data] [ literal[string] ]):
identifier[data_tuple] . identifier[append] (( identifier[value] , identifier[weight] [ identifier[iloc] ]))
keyword[return] identifier[data_tuple] | def weight_list_to_tuple(data, attr_name):
"""
Converts a list of values and corresponding weights to a tuple of values
"""
if len(data['Value']) != len(data['Weight']):
raise ValueError('Number of weights do not correspond to number of attributes in %s' % attr_name) # depends on [control=['if'], data=[]]
weight = np.array(data['Weight'])
if fabs(np.sum(weight) - 1.0) > 1e-07:
raise ValueError('Weights do not sum to 1.0 in %s' % attr_name) # depends on [control=['if'], data=[]]
data_tuple = []
for (iloc, value) in enumerate(data['Value']):
data_tuple.append((value, weight[iloc])) # depends on [control=['for'], data=[]]
return data_tuple |
def is_instance_throughput_too_low(self, inst_id):
"""
Return whether the throughput of the master instance is greater than the
acceptable threshold
"""
r = self.instance_throughput_ratio(inst_id)
if r is None:
logger.debug("{} instance {} throughput is not "
"measurable.".format(self, inst_id))
return None
too_low = r < self.Delta
if too_low:
logger.display("{}{} instance {} throughput ratio {} is lower than Delta {}.".
format(MONITORING_PREFIX, self, inst_id, r, self.Delta))
else:
logger.trace("{} instance {} throughput ratio {} is acceptable.".
format(self, inst_id, r))
return too_low | def function[is_instance_throughput_too_low, parameter[self, inst_id]]:
constant[
Return whether the throughput of the master instance is greater than the
acceptable threshold
]
variable[r] assign[=] call[name[self].instance_throughput_ratio, parameter[name[inst_id]]]
if compare[name[r] is constant[None]] begin[:]
call[name[logger].debug, parameter[call[constant[{} instance {} throughput is not measurable.].format, parameter[name[self], name[inst_id]]]]]
return[constant[None]]
variable[too_low] assign[=] compare[name[r] less[<] name[self].Delta]
if name[too_low] begin[:]
call[name[logger].display, parameter[call[constant[{}{} instance {} throughput ratio {} is lower than Delta {}.].format, parameter[name[MONITORING_PREFIX], name[self], name[inst_id], name[r], name[self].Delta]]]]
return[name[too_low]] | keyword[def] identifier[is_instance_throughput_too_low] ( identifier[self] , identifier[inst_id] ):
literal[string]
identifier[r] = identifier[self] . identifier[instance_throughput_ratio] ( identifier[inst_id] )
keyword[if] identifier[r] keyword[is] keyword[None] :
identifier[logger] . identifier[debug] ( literal[string]
literal[string] . identifier[format] ( identifier[self] , identifier[inst_id] ))
keyword[return] keyword[None]
identifier[too_low] = identifier[r] < identifier[self] . identifier[Delta]
keyword[if] identifier[too_low] :
identifier[logger] . identifier[display] ( literal[string] .
identifier[format] ( identifier[MONITORING_PREFIX] , identifier[self] , identifier[inst_id] , identifier[r] , identifier[self] . identifier[Delta] ))
keyword[else] :
identifier[logger] . identifier[trace] ( literal[string] .
identifier[format] ( identifier[self] , identifier[inst_id] , identifier[r] ))
keyword[return] identifier[too_low] | def is_instance_throughput_too_low(self, inst_id):
"""
Return whether the throughput of the master instance is greater than the
acceptable threshold
"""
r = self.instance_throughput_ratio(inst_id)
if r is None:
logger.debug('{} instance {} throughput is not measurable.'.format(self, inst_id))
return None # depends on [control=['if'], data=[]]
too_low = r < self.Delta
if too_low:
logger.display('{}{} instance {} throughput ratio {} is lower than Delta {}.'.format(MONITORING_PREFIX, self, inst_id, r, self.Delta)) # depends on [control=['if'], data=[]]
else:
logger.trace('{} instance {} throughput ratio {} is acceptable.'.format(self, inst_id, r))
return too_low |
def similar_items(self, itemid, N=10):
""" Returns a list of the most similar other items """
if itemid >= self.similarity.shape[0]:
return []
return sorted(list(nonzeros(self.similarity, itemid)), key=lambda x: -x[1])[:N] | def function[similar_items, parameter[self, itemid, N]]:
constant[ Returns a list of the most similar other items ]
if compare[name[itemid] greater_or_equal[>=] call[name[self].similarity.shape][constant[0]]] begin[:]
return[list[[]]]
return[call[call[name[sorted], parameter[call[name[list], parameter[call[name[nonzeros], parameter[name[self].similarity, name[itemid]]]]]]]][<ast.Slice object at 0x7da1b23460b0>]] | keyword[def] identifier[similar_items] ( identifier[self] , identifier[itemid] , identifier[N] = literal[int] ):
literal[string]
keyword[if] identifier[itemid] >= identifier[self] . identifier[similarity] . identifier[shape] [ literal[int] ]:
keyword[return] []
keyword[return] identifier[sorted] ( identifier[list] ( identifier[nonzeros] ( identifier[self] . identifier[similarity] , identifier[itemid] )), identifier[key] = keyword[lambda] identifier[x] :- identifier[x] [ literal[int] ])[: identifier[N] ] | def similar_items(self, itemid, N=10):
""" Returns a list of the most similar other items """
if itemid >= self.similarity.shape[0]:
return [] # depends on [control=['if'], data=[]]
return sorted(list(nonzeros(self.similarity, itemid)), key=lambda x: -x[1])[:N] |
def get_message_actions(current):
"""
Returns applicable actions for current user for given message key
.. code-block:: python
# request:
{
'view':'_zops_get_message_actions',
'key': key,
}
# response:
{
'actions':[('name_string', 'cmd_string'),]
'status': string, # 'OK' for success
'code': int, # 200 for success
}
"""
current.output = {'status': 'OK',
'code': 200,
'actions': Message.objects.get(
current.input['key']).get_actions_for(current.user)} | def function[get_message_actions, parameter[current]]:
constant[
Returns applicable actions for current user for given message key
.. code-block:: python
# request:
{
'view':'_zops_get_message_actions',
'key': key,
}
# response:
{
'actions':[('name_string', 'cmd_string'),]
'status': string, # 'OK' for success
'code': int, # 200 for success
}
]
name[current].output assign[=] dictionary[[<ast.Constant object at 0x7da20c991d80>, <ast.Constant object at 0x7da20c991750>, <ast.Constant object at 0x7da20c9909d0>], [<ast.Constant object at 0x7da20c993d90>, <ast.Constant object at 0x7da20c992fe0>, <ast.Call object at 0x7da20c991ff0>]] | keyword[def] identifier[get_message_actions] ( identifier[current] ):
literal[string]
identifier[current] . identifier[output] ={ literal[string] : literal[string] ,
literal[string] : literal[int] ,
literal[string] : identifier[Message] . identifier[objects] . identifier[get] (
identifier[current] . identifier[input] [ literal[string] ]). identifier[get_actions_for] ( identifier[current] . identifier[user] )} | def get_message_actions(current):
"""
Returns applicable actions for current user for given message key
.. code-block:: python
# request:
{
'view':'_zops_get_message_actions',
'key': key,
}
# response:
{
'actions':[('name_string', 'cmd_string'),]
'status': string, # 'OK' for success
'code': int, # 200 for success
}
"""
current.output = {'status': 'OK', 'code': 200, 'actions': Message.objects.get(current.input['key']).get_actions_for(current.user)} |
def add_translations(
self, module_name, translations_dir="translations", domain="messages"
):
"""Add translations from external module.
For example::
babel.add_translations('abilian.core')
Will add translations files from `abilian.core` module.
"""
module = importlib.import_module(module_name)
for path in (Path(p, translations_dir) for p in module.__path__):
if not (path.exists() and path.is_dir()):
continue
if not os.access(str(path), os.R_OK):
self.app.logger.warning(
"Babel translations: read access not allowed {}, skipping."
"".format(repr(str(path).encode("utf-8")))
)
continue
self._translations_paths.append((str(path), domain)) | def function[add_translations, parameter[self, module_name, translations_dir, domain]]:
constant[Add translations from external module.
For example::
babel.add_translations('abilian.core')
Will add translations files from `abilian.core` module.
]
variable[module] assign[=] call[name[importlib].import_module, parameter[name[module_name]]]
for taget[name[path]] in starred[<ast.GeneratorExp object at 0x7da1b26af100>] begin[:]
if <ast.UnaryOp object at 0x7da1b26af400> begin[:]
continue
if <ast.UnaryOp object at 0x7da1b26aceb0> begin[:]
call[name[self].app.logger.warning, parameter[call[constant[Babel translations: read access not allowed {}, skipping.].format, parameter[call[name[repr], parameter[call[call[name[str], parameter[name[path]]].encode, parameter[constant[utf-8]]]]]]]]]
continue
call[name[self]._translations_paths.append, parameter[tuple[[<ast.Call object at 0x7da1b26aef20>, <ast.Name object at 0x7da1b26acf40>]]]] | keyword[def] identifier[add_translations] (
identifier[self] , identifier[module_name] , identifier[translations_dir] = literal[string] , identifier[domain] = literal[string]
):
literal[string]
identifier[module] = identifier[importlib] . identifier[import_module] ( identifier[module_name] )
keyword[for] identifier[path] keyword[in] ( identifier[Path] ( identifier[p] , identifier[translations_dir] ) keyword[for] identifier[p] keyword[in] identifier[module] . identifier[__path__] ):
keyword[if] keyword[not] ( identifier[path] . identifier[exists] () keyword[and] identifier[path] . identifier[is_dir] ()):
keyword[continue]
keyword[if] keyword[not] identifier[os] . identifier[access] ( identifier[str] ( identifier[path] ), identifier[os] . identifier[R_OK] ):
identifier[self] . identifier[app] . identifier[logger] . identifier[warning] (
literal[string]
literal[string] . identifier[format] ( identifier[repr] ( identifier[str] ( identifier[path] ). identifier[encode] ( literal[string] )))
)
keyword[continue]
identifier[self] . identifier[_translations_paths] . identifier[append] (( identifier[str] ( identifier[path] ), identifier[domain] )) | def add_translations(self, module_name, translations_dir='translations', domain='messages'):
"""Add translations from external module.
For example::
babel.add_translations('abilian.core')
Will add translations files from `abilian.core` module.
"""
module = importlib.import_module(module_name)
for path in (Path(p, translations_dir) for p in module.__path__):
if not (path.exists() and path.is_dir()):
continue # depends on [control=['if'], data=[]]
if not os.access(str(path), os.R_OK):
self.app.logger.warning('Babel translations: read access not allowed {}, skipping.'.format(repr(str(path).encode('utf-8'))))
continue # depends on [control=['if'], data=[]]
self._translations_paths.append((str(path), domain)) # depends on [control=['for'], data=['path']] |
def fillzip(*l):
"""like zip (for things that have a length), but repeats the last element of all shorter lists such that the result is as long as the longest."""
maximum = max(len(el) for el in l)
return zip(*[el + [el[-1]]*(maximum-len(el)) for el in l]) | def function[fillzip, parameter[]]:
constant[like zip (for things that have a length), but repeats the last element of all shorter lists such that the result is as long as the longest.]
variable[maximum] assign[=] call[name[max], parameter[<ast.GeneratorExp object at 0x7da2054a5b10>]]
return[call[name[zip], parameter[<ast.Starred object at 0x7da2054a6740>]]] | keyword[def] identifier[fillzip] (* identifier[l] ):
literal[string]
identifier[maximum] = identifier[max] ( identifier[len] ( identifier[el] ) keyword[for] identifier[el] keyword[in] identifier[l] )
keyword[return] identifier[zip] (*[ identifier[el] +[ identifier[el] [- literal[int] ]]*( identifier[maximum] - identifier[len] ( identifier[el] )) keyword[for] identifier[el] keyword[in] identifier[l] ]) | def fillzip(*l):
"""like zip (for things that have a length), but repeats the last element of all shorter lists such that the result is as long as the longest."""
maximum = max((len(el) for el in l))
return zip(*[el + [el[-1]] * (maximum - len(el)) for el in l]) |
def auth(nodes, pcsuser='hacluster', pcspasswd='hacluster', extra_args=None):
'''
Authorize nodes to the cluster
nodes
a list of nodes which should be authorized to the cluster
pcsuser
user for communitcation with PCS (default: hacluster)
pcspasswd
password for pcsuser (default: hacluster)
extra_args
list of extra option for the \'pcs cluster auth\' command
CLI Example:
.. code-block:: bash
salt '*' pcs.auth nodes='[ node1.example.org node2.example.org ]' pcsuser=hacluster pcspasswd=hoonetorg extra_args="[ '--force' ]"
'''
cmd = ['pcs', 'cluster', 'auth']
if pcsuser:
cmd += ['-u', pcsuser]
if pcspasswd:
cmd += ['-p', pcspasswd]
if isinstance(extra_args, (list, tuple)):
cmd += extra_args
cmd += nodes
return __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) | def function[auth, parameter[nodes, pcsuser, pcspasswd, extra_args]]:
constant[
Authorize nodes to the cluster
nodes
a list of nodes which should be authorized to the cluster
pcsuser
user for communitcation with PCS (default: hacluster)
pcspasswd
password for pcsuser (default: hacluster)
extra_args
list of extra option for the 'pcs cluster auth' command
CLI Example:
.. code-block:: bash
salt '*' pcs.auth nodes='[ node1.example.org node2.example.org ]' pcsuser=hacluster pcspasswd=hoonetorg extra_args="[ '--force' ]"
]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da207f01a80>, <ast.Constant object at 0x7da207f00880>, <ast.Constant object at 0x7da207f00820>]]
if name[pcsuser] begin[:]
<ast.AugAssign object at 0x7da207f013f0>
if name[pcspasswd] begin[:]
<ast.AugAssign object at 0x7da207f037c0>
if call[name[isinstance], parameter[name[extra_args], tuple[[<ast.Name object at 0x7da18f813640>, <ast.Name object at 0x7da18f812ec0>]]]] begin[:]
<ast.AugAssign object at 0x7da18f812260>
<ast.AugAssign object at 0x7da18f812080>
return[call[call[name[__salt__]][constant[cmd.run_all]], parameter[name[cmd]]]] | keyword[def] identifier[auth] ( identifier[nodes] , identifier[pcsuser] = literal[string] , identifier[pcspasswd] = literal[string] , identifier[extra_args] = keyword[None] ):
literal[string]
identifier[cmd] =[ literal[string] , literal[string] , literal[string] ]
keyword[if] identifier[pcsuser] :
identifier[cmd] +=[ literal[string] , identifier[pcsuser] ]
keyword[if] identifier[pcspasswd] :
identifier[cmd] +=[ literal[string] , identifier[pcspasswd] ]
keyword[if] identifier[isinstance] ( identifier[extra_args] ,( identifier[list] , identifier[tuple] )):
identifier[cmd] += identifier[extra_args]
identifier[cmd] += identifier[nodes]
keyword[return] identifier[__salt__] [ literal[string] ]( identifier[cmd] , identifier[output_loglevel] = literal[string] , identifier[python_shell] = keyword[False] ) | def auth(nodes, pcsuser='hacluster', pcspasswd='hacluster', extra_args=None):
"""
Authorize nodes to the cluster
nodes
a list of nodes which should be authorized to the cluster
pcsuser
user for communitcation with PCS (default: hacluster)
pcspasswd
password for pcsuser (default: hacluster)
extra_args
list of extra option for the 'pcs cluster auth' command
CLI Example:
.. code-block:: bash
salt '*' pcs.auth nodes='[ node1.example.org node2.example.org ]' pcsuser=hacluster pcspasswd=hoonetorg extra_args="[ '--force' ]"
"""
cmd = ['pcs', 'cluster', 'auth']
if pcsuser:
cmd += ['-u', pcsuser] # depends on [control=['if'], data=[]]
if pcspasswd:
cmd += ['-p', pcspasswd] # depends on [control=['if'], data=[]]
if isinstance(extra_args, (list, tuple)):
cmd += extra_args # depends on [control=['if'], data=[]]
cmd += nodes
return __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) |
def stop(name):
'''
stops a container
'''
if not exists(name):
raise ContainerNotExists("The container (%s) does not exist!" % name)
cmd = ['lxc-stop', '-n', name]
subprocess.check_call(cmd) | def function[stop, parameter[name]]:
constant[
stops a container
]
if <ast.UnaryOp object at 0x7da18dc048e0> begin[:]
<ast.Raise object at 0x7da18dc06cb0>
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da20c6ab8b0>, <ast.Constant object at 0x7da20c6abb20>, <ast.Name object at 0x7da20c6a9c90>]]
call[name[subprocess].check_call, parameter[name[cmd]]] | keyword[def] identifier[stop] ( identifier[name] ):
literal[string]
keyword[if] keyword[not] identifier[exists] ( identifier[name] ):
keyword[raise] identifier[ContainerNotExists] ( literal[string] % identifier[name] )
identifier[cmd] =[ literal[string] , literal[string] , identifier[name] ]
identifier[subprocess] . identifier[check_call] ( identifier[cmd] ) | def stop(name):
"""
stops a container
"""
if not exists(name):
raise ContainerNotExists('The container (%s) does not exist!' % name) # depends on [control=['if'], data=[]]
cmd = ['lxc-stop', '-n', name]
subprocess.check_call(cmd) |
def get_annotation_tags(index_page):
"""
Return list of descriptions parsed from ``<meta>`` tags and dublin core
inlined in ``<meta>`` tags.
Args:
index_page (str): HTML content of the page you wisht to analyze.
Returns:
list: List of ``SourceString`` objects.
"""
dom = dhtmlparser.parseString(index_page)
descriptions = [
get_html_annotations(dom),
get_dc_annotations(dom),
]
return sum(descriptions, []) | def function[get_annotation_tags, parameter[index_page]]:
constant[
Return list of descriptions parsed from ``<meta>`` tags and dublin core
inlined in ``<meta>`` tags.
Args:
index_page (str): HTML content of the page you wisht to analyze.
Returns:
list: List of ``SourceString`` objects.
]
variable[dom] assign[=] call[name[dhtmlparser].parseString, parameter[name[index_page]]]
variable[descriptions] assign[=] list[[<ast.Call object at 0x7da1b0a82290>, <ast.Call object at 0x7da1b0a82c50>]]
return[call[name[sum], parameter[name[descriptions], list[[]]]]] | keyword[def] identifier[get_annotation_tags] ( identifier[index_page] ):
literal[string]
identifier[dom] = identifier[dhtmlparser] . identifier[parseString] ( identifier[index_page] )
identifier[descriptions] =[
identifier[get_html_annotations] ( identifier[dom] ),
identifier[get_dc_annotations] ( identifier[dom] ),
]
keyword[return] identifier[sum] ( identifier[descriptions] ,[]) | def get_annotation_tags(index_page):
"""
Return list of descriptions parsed from ``<meta>`` tags and dublin core
inlined in ``<meta>`` tags.
Args:
index_page (str): HTML content of the page you wisht to analyze.
Returns:
list: List of ``SourceString`` objects.
"""
dom = dhtmlparser.parseString(index_page)
descriptions = [get_html_annotations(dom), get_dc_annotations(dom)]
return sum(descriptions, []) |
def check_manifest(source_tree='.', create=False, update=False,
python=sys.executable):
"""Compare a generated source distribution with list of files in a VCS.
Returns True if the manifest is fine.
"""
all_ok = True
if os.path.sep in python:
python = os.path.abspath(python)
with cd(source_tree):
if not is_package():
raise Failure('This is not a Python project (no setup.py).')
read_config()
read_manifest()
info_begin("listing source files under version control")
all_source_files = sorted(get_vcs_files())
source_files = strip_sdist_extras(all_source_files)
info_continue(": %d files and directories" % len(source_files))
if not all_source_files:
raise Failure('There are no files added to version control!')
info_begin("building an sdist")
with mkdtemp('-sdist') as tempdir:
run([python, 'setup.py', 'sdist', '-d', tempdir])
sdist_filename = get_one_file_in(tempdir)
info_continue(": %s" % os.path.basename(sdist_filename))
sdist_files = sorted(normalize_names(strip_sdist_extras(
strip_toplevel_name(get_archive_file_list(sdist_filename)))))
info_continue(": %d files and directories" % len(sdist_files))
version = extract_version_from_filename(sdist_filename)
existing_source_files = list(filter(os.path.exists, all_source_files))
missing_source_files = sorted(set(all_source_files) - set(existing_source_files))
if missing_source_files:
warning("some files listed as being under source control are missing:\n%s"
% format_list(missing_source_files))
info_begin("copying source files to a temporary directory")
with mkdtemp('-sources') as tempsourcedir:
copy_files(existing_source_files, tempsourcedir)
if os.path.exists('MANIFEST.in') and 'MANIFEST.in' not in source_files:
# See https://github.com/mgedmin/check-manifest/issues/7
# if do this, we will emit a warning about MANIFEST.in not
# being in source control, if we don't do this, the user
# gets confused about their new manifest rules being
# ignored.
copy_files(['MANIFEST.in'], tempsourcedir)
if 'setup.py' not in source_files:
# See https://github.com/mgedmin/check-manifest/issues/46
# if do this, we will emit a warning about setup.py not
# being in source control, if we don't do this, the user
# gets a scary error
copy_files(['setup.py'], tempsourcedir)
info_begin("building a clean sdist")
with cd(tempsourcedir):
with mkdtemp('-sdist') as tempdir:
os.environ['SETUPTOOLS_SCM_PRETEND_VERSION'] = version
run([python, 'setup.py', 'sdist', '-d', tempdir])
sdist_filename = get_one_file_in(tempdir)
info_continue(": %s" % os.path.basename(sdist_filename))
clean_sdist_files = sorted(normalize_names(strip_sdist_extras(
strip_toplevel_name(get_archive_file_list(sdist_filename)))))
info_continue(": %d files and directories" % len(clean_sdist_files))
missing_from_manifest = set(source_files) - set(clean_sdist_files)
missing_from_VCS = set(sdist_files + clean_sdist_files) - set(source_files)
if not missing_from_manifest and not missing_from_VCS:
info("lists of files in version control and sdist match")
else:
error("lists of files in version control and sdist do not match!\n%s"
% format_missing(missing_from_VCS, missing_from_manifest,
"VCS", "sdist"))
suggestions, unknowns = find_suggestions(missing_from_manifest)
user_asked_for_help = update or (create and not
os.path.exists('MANIFEST.in'))
if 'MANIFEST.in' not in existing_source_files:
if suggestions and not user_asked_for_help:
info("no MANIFEST.in found; you can run 'check-manifest -c' to create one")
else:
info("no MANIFEST.in found")
if suggestions:
info("suggested MANIFEST.in rules:\n%s"
% format_list(suggestions))
if user_asked_for_help:
existed = os.path.exists('MANIFEST.in')
with open('MANIFEST.in', 'a') as f:
if not existed:
info("creating MANIFEST.in")
else:
info("updating MANIFEST.in")
f.write('\n# added by check_manifest.py\n')
f.write('\n'.join(suggestions) + '\n')
if unknowns:
info("don't know how to come up with rules matching\n%s"
% format_list(unknowns))
elif user_asked_for_help:
info("don't know how to come up with rules"
" matching any of the files, sorry!")
all_ok = False
bad_ideas = find_bad_ideas(all_source_files)
filtered_bad_ideas = [bad_idea for bad_idea in bad_ideas
if not file_matches(bad_idea, IGNORE_BAD_IDEAS)]
if filtered_bad_ideas:
warning("you have %s in source control!\nthat's a bad idea:"
" auto-generated files should not be versioned"
% filtered_bad_ideas[0])
if len(filtered_bad_ideas) > 1:
warning("this also applies to the following:\n%s"
% format_list(filtered_bad_ideas[1:]))
all_ok = False
return all_ok | def function[check_manifest, parameter[source_tree, create, update, python]]:
constant[Compare a generated source distribution with list of files in a VCS.
Returns True if the manifest is fine.
]
variable[all_ok] assign[=] constant[True]
if compare[name[os].path.sep in name[python]] begin[:]
variable[python] assign[=] call[name[os].path.abspath, parameter[name[python]]]
with call[name[cd], parameter[name[source_tree]]] begin[:]
if <ast.UnaryOp object at 0x7da1b26acf70> begin[:]
<ast.Raise object at 0x7da1b26ad5a0>
call[name[read_config], parameter[]]
call[name[read_manifest], parameter[]]
call[name[info_begin], parameter[constant[listing source files under version control]]]
variable[all_source_files] assign[=] call[name[sorted], parameter[call[name[get_vcs_files], parameter[]]]]
variable[source_files] assign[=] call[name[strip_sdist_extras], parameter[name[all_source_files]]]
call[name[info_continue], parameter[binary_operation[constant[: %d files and directories] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[source_files]]]]]]
if <ast.UnaryOp object at 0x7da1b26af2b0> begin[:]
<ast.Raise object at 0x7da1b26ae830>
call[name[info_begin], parameter[constant[building an sdist]]]
with call[name[mkdtemp], parameter[constant[-sdist]]] begin[:]
call[name[run], parameter[list[[<ast.Name object at 0x7da1b26acaf0>, <ast.Constant object at 0x7da1b26ac0d0>, <ast.Constant object at 0x7da1b26ad480>, <ast.Constant object at 0x7da1b26acd60>, <ast.Name object at 0x7da1b26aedd0>]]]]
variable[sdist_filename] assign[=] call[name[get_one_file_in], parameter[name[tempdir]]]
call[name[info_continue], parameter[binary_operation[constant[: %s] <ast.Mod object at 0x7da2590d6920> call[name[os].path.basename, parameter[name[sdist_filename]]]]]]
variable[sdist_files] assign[=] call[name[sorted], parameter[call[name[normalize_names], parameter[call[name[strip_sdist_extras], parameter[call[name[strip_toplevel_name], parameter[call[name[get_archive_file_list], parameter[name[sdist_filename]]]]]]]]]]]
call[name[info_continue], parameter[binary_operation[constant[: %d files and directories] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[sdist_files]]]]]]
variable[version] assign[=] call[name[extract_version_from_filename], parameter[name[sdist_filename]]]
variable[existing_source_files] assign[=] call[name[list], parameter[call[name[filter], parameter[name[os].path.exists, name[all_source_files]]]]]
variable[missing_source_files] assign[=] call[name[sorted], parameter[binary_operation[call[name[set], parameter[name[all_source_files]]] - call[name[set], parameter[name[existing_source_files]]]]]]
if name[missing_source_files] begin[:]
call[name[warning], parameter[binary_operation[constant[some files listed as being under source control are missing:
%s] <ast.Mod object at 0x7da2590d6920> call[name[format_list], parameter[name[missing_source_files]]]]]]
call[name[info_begin], parameter[constant[copying source files to a temporary directory]]]
with call[name[mkdtemp], parameter[constant[-sources]]] begin[:]
call[name[copy_files], parameter[name[existing_source_files], name[tempsourcedir]]]
if <ast.BoolOp object at 0x7da1b26ad780> begin[:]
call[name[copy_files], parameter[list[[<ast.Constant object at 0x7da1b13a6bc0>]], name[tempsourcedir]]]
if compare[constant[setup.py] <ast.NotIn object at 0x7da2590d7190> name[source_files]] begin[:]
call[name[copy_files], parameter[list[[<ast.Constant object at 0x7da1b13a62f0>]], name[tempsourcedir]]]
call[name[info_begin], parameter[constant[building a clean sdist]]]
with call[name[cd], parameter[name[tempsourcedir]]] begin[:]
with call[name[mkdtemp], parameter[constant[-sdist]]] begin[:]
call[name[os].environ][constant[SETUPTOOLS_SCM_PRETEND_VERSION]] assign[=] name[version]
call[name[run], parameter[list[[<ast.Name object at 0x7da1b13a72b0>, <ast.Constant object at 0x7da1b13a7340>, <ast.Constant object at 0x7da1b13a79a0>, <ast.Constant object at 0x7da1b13a7a00>, <ast.Name object at 0x7da1b13a6a40>]]]]
variable[sdist_filename] assign[=] call[name[get_one_file_in], parameter[name[tempdir]]]
call[name[info_continue], parameter[binary_operation[constant[: %s] <ast.Mod object at 0x7da2590d6920> call[name[os].path.basename, parameter[name[sdist_filename]]]]]]
variable[clean_sdist_files] assign[=] call[name[sorted], parameter[call[name[normalize_names], parameter[call[name[strip_sdist_extras], parameter[call[name[strip_toplevel_name], parameter[call[name[get_archive_file_list], parameter[name[sdist_filename]]]]]]]]]]]
call[name[info_continue], parameter[binary_operation[constant[: %d files and directories] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[clean_sdist_files]]]]]]
variable[missing_from_manifest] assign[=] binary_operation[call[name[set], parameter[name[source_files]]] - call[name[set], parameter[name[clean_sdist_files]]]]
variable[missing_from_VCS] assign[=] binary_operation[call[name[set], parameter[binary_operation[name[sdist_files] + name[clean_sdist_files]]]] - call[name[set], parameter[name[source_files]]]]
if <ast.BoolOp object at 0x7da1b13a5c00> begin[:]
call[name[info], parameter[constant[lists of files in version control and sdist match]]]
variable[bad_ideas] assign[=] call[name[find_bad_ideas], parameter[name[all_source_files]]]
variable[filtered_bad_ideas] assign[=] <ast.ListComp object at 0x7da1b13a7dc0>
if name[filtered_bad_ideas] begin[:]
call[name[warning], parameter[binary_operation[constant[you have %s in source control!
that's a bad idea: auto-generated files should not be versioned] <ast.Mod object at 0x7da2590d6920> call[name[filtered_bad_ideas]][constant[0]]]]]
if compare[call[name[len], parameter[name[filtered_bad_ideas]]] greater[>] constant[1]] begin[:]
call[name[warning], parameter[binary_operation[constant[this also applies to the following:
%s] <ast.Mod object at 0x7da2590d6920> call[name[format_list], parameter[call[name[filtered_bad_ideas]][<ast.Slice object at 0x7da1b13a7b80>]]]]]]
variable[all_ok] assign[=] constant[False]
return[name[all_ok]] | keyword[def] identifier[check_manifest] ( identifier[source_tree] = literal[string] , identifier[create] = keyword[False] , identifier[update] = keyword[False] ,
identifier[python] = identifier[sys] . identifier[executable] ):
literal[string]
identifier[all_ok] = keyword[True]
keyword[if] identifier[os] . identifier[path] . identifier[sep] keyword[in] identifier[python] :
identifier[python] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[python] )
keyword[with] identifier[cd] ( identifier[source_tree] ):
keyword[if] keyword[not] identifier[is_package] ():
keyword[raise] identifier[Failure] ( literal[string] )
identifier[read_config] ()
identifier[read_manifest] ()
identifier[info_begin] ( literal[string] )
identifier[all_source_files] = identifier[sorted] ( identifier[get_vcs_files] ())
identifier[source_files] = identifier[strip_sdist_extras] ( identifier[all_source_files] )
identifier[info_continue] ( literal[string] % identifier[len] ( identifier[source_files] ))
keyword[if] keyword[not] identifier[all_source_files] :
keyword[raise] identifier[Failure] ( literal[string] )
identifier[info_begin] ( literal[string] )
keyword[with] identifier[mkdtemp] ( literal[string] ) keyword[as] identifier[tempdir] :
identifier[run] ([ identifier[python] , literal[string] , literal[string] , literal[string] , identifier[tempdir] ])
identifier[sdist_filename] = identifier[get_one_file_in] ( identifier[tempdir] )
identifier[info_continue] ( literal[string] % identifier[os] . identifier[path] . identifier[basename] ( identifier[sdist_filename] ))
identifier[sdist_files] = identifier[sorted] ( identifier[normalize_names] ( identifier[strip_sdist_extras] (
identifier[strip_toplevel_name] ( identifier[get_archive_file_list] ( identifier[sdist_filename] )))))
identifier[info_continue] ( literal[string] % identifier[len] ( identifier[sdist_files] ))
identifier[version] = identifier[extract_version_from_filename] ( identifier[sdist_filename] )
identifier[existing_source_files] = identifier[list] ( identifier[filter] ( identifier[os] . identifier[path] . identifier[exists] , identifier[all_source_files] ))
identifier[missing_source_files] = identifier[sorted] ( identifier[set] ( identifier[all_source_files] )- identifier[set] ( identifier[existing_source_files] ))
keyword[if] identifier[missing_source_files] :
identifier[warning] ( literal[string]
% identifier[format_list] ( identifier[missing_source_files] ))
identifier[info_begin] ( literal[string] )
keyword[with] identifier[mkdtemp] ( literal[string] ) keyword[as] identifier[tempsourcedir] :
identifier[copy_files] ( identifier[existing_source_files] , identifier[tempsourcedir] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( literal[string] ) keyword[and] literal[string] keyword[not] keyword[in] identifier[source_files] :
identifier[copy_files] ([ literal[string] ], identifier[tempsourcedir] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[source_files] :
identifier[copy_files] ([ literal[string] ], identifier[tempsourcedir] )
identifier[info_begin] ( literal[string] )
keyword[with] identifier[cd] ( identifier[tempsourcedir] ):
keyword[with] identifier[mkdtemp] ( literal[string] ) keyword[as] identifier[tempdir] :
identifier[os] . identifier[environ] [ literal[string] ]= identifier[version]
identifier[run] ([ identifier[python] , literal[string] , literal[string] , literal[string] , identifier[tempdir] ])
identifier[sdist_filename] = identifier[get_one_file_in] ( identifier[tempdir] )
identifier[info_continue] ( literal[string] % identifier[os] . identifier[path] . identifier[basename] ( identifier[sdist_filename] ))
identifier[clean_sdist_files] = identifier[sorted] ( identifier[normalize_names] ( identifier[strip_sdist_extras] (
identifier[strip_toplevel_name] ( identifier[get_archive_file_list] ( identifier[sdist_filename] )))))
identifier[info_continue] ( literal[string] % identifier[len] ( identifier[clean_sdist_files] ))
identifier[missing_from_manifest] = identifier[set] ( identifier[source_files] )- identifier[set] ( identifier[clean_sdist_files] )
identifier[missing_from_VCS] = identifier[set] ( identifier[sdist_files] + identifier[clean_sdist_files] )- identifier[set] ( identifier[source_files] )
keyword[if] keyword[not] identifier[missing_from_manifest] keyword[and] keyword[not] identifier[missing_from_VCS] :
identifier[info] ( literal[string] )
keyword[else] :
identifier[error] ( literal[string]
% identifier[format_missing] ( identifier[missing_from_VCS] , identifier[missing_from_manifest] ,
literal[string] , literal[string] ))
identifier[suggestions] , identifier[unknowns] = identifier[find_suggestions] ( identifier[missing_from_manifest] )
identifier[user_asked_for_help] = identifier[update] keyword[or] ( identifier[create] keyword[and] keyword[not]
identifier[os] . identifier[path] . identifier[exists] ( literal[string] ))
keyword[if] literal[string] keyword[not] keyword[in] identifier[existing_source_files] :
keyword[if] identifier[suggestions] keyword[and] keyword[not] identifier[user_asked_for_help] :
identifier[info] ( literal[string] )
keyword[else] :
identifier[info] ( literal[string] )
keyword[if] identifier[suggestions] :
identifier[info] ( literal[string]
% identifier[format_list] ( identifier[suggestions] ))
keyword[if] identifier[user_asked_for_help] :
identifier[existed] = identifier[os] . identifier[path] . identifier[exists] ( literal[string] )
keyword[with] identifier[open] ( literal[string] , literal[string] ) keyword[as] identifier[f] :
keyword[if] keyword[not] identifier[existed] :
identifier[info] ( literal[string] )
keyword[else] :
identifier[info] ( literal[string] )
identifier[f] . identifier[write] ( literal[string] )
identifier[f] . identifier[write] ( literal[string] . identifier[join] ( identifier[suggestions] )+ literal[string] )
keyword[if] identifier[unknowns] :
identifier[info] ( literal[string]
% identifier[format_list] ( identifier[unknowns] ))
keyword[elif] identifier[user_asked_for_help] :
identifier[info] ( literal[string]
literal[string] )
identifier[all_ok] = keyword[False]
identifier[bad_ideas] = identifier[find_bad_ideas] ( identifier[all_source_files] )
identifier[filtered_bad_ideas] =[ identifier[bad_idea] keyword[for] identifier[bad_idea] keyword[in] identifier[bad_ideas]
keyword[if] keyword[not] identifier[file_matches] ( identifier[bad_idea] , identifier[IGNORE_BAD_IDEAS] )]
keyword[if] identifier[filtered_bad_ideas] :
identifier[warning] ( literal[string]
literal[string]
% identifier[filtered_bad_ideas] [ literal[int] ])
keyword[if] identifier[len] ( identifier[filtered_bad_ideas] )> literal[int] :
identifier[warning] ( literal[string]
% identifier[format_list] ( identifier[filtered_bad_ideas] [ literal[int] :]))
identifier[all_ok] = keyword[False]
keyword[return] identifier[all_ok] | def check_manifest(source_tree='.', create=False, update=False, python=sys.executable):
"""Compare a generated source distribution with list of files in a VCS.
Returns True if the manifest is fine.
"""
all_ok = True
if os.path.sep in python:
python = os.path.abspath(python) # depends on [control=['if'], data=['python']]
with cd(source_tree):
if not is_package():
raise Failure('This is not a Python project (no setup.py).') # depends on [control=['if'], data=[]]
read_config()
read_manifest()
info_begin('listing source files under version control')
all_source_files = sorted(get_vcs_files())
source_files = strip_sdist_extras(all_source_files)
info_continue(': %d files and directories' % len(source_files))
if not all_source_files:
raise Failure('There are no files added to version control!') # depends on [control=['if'], data=[]]
info_begin('building an sdist')
with mkdtemp('-sdist') as tempdir:
run([python, 'setup.py', 'sdist', '-d', tempdir])
sdist_filename = get_one_file_in(tempdir)
info_continue(': %s' % os.path.basename(sdist_filename))
sdist_files = sorted(normalize_names(strip_sdist_extras(strip_toplevel_name(get_archive_file_list(sdist_filename)))))
info_continue(': %d files and directories' % len(sdist_files))
version = extract_version_from_filename(sdist_filename) # depends on [control=['with'], data=['tempdir']]
existing_source_files = list(filter(os.path.exists, all_source_files))
missing_source_files = sorted(set(all_source_files) - set(existing_source_files))
if missing_source_files:
warning('some files listed as being under source control are missing:\n%s' % format_list(missing_source_files)) # depends on [control=['if'], data=[]]
info_begin('copying source files to a temporary directory')
with mkdtemp('-sources') as tempsourcedir:
copy_files(existing_source_files, tempsourcedir)
if os.path.exists('MANIFEST.in') and 'MANIFEST.in' not in source_files:
# See https://github.com/mgedmin/check-manifest/issues/7
# if do this, we will emit a warning about MANIFEST.in not
# being in source control, if we don't do this, the user
# gets confused about their new manifest rules being
# ignored.
copy_files(['MANIFEST.in'], tempsourcedir) # depends on [control=['if'], data=[]]
if 'setup.py' not in source_files:
# See https://github.com/mgedmin/check-manifest/issues/46
# if do this, we will emit a warning about setup.py not
# being in source control, if we don't do this, the user
# gets a scary error
copy_files(['setup.py'], tempsourcedir) # depends on [control=['if'], data=[]]
info_begin('building a clean sdist')
with cd(tempsourcedir):
with mkdtemp('-sdist') as tempdir:
os.environ['SETUPTOOLS_SCM_PRETEND_VERSION'] = version
run([python, 'setup.py', 'sdist', '-d', tempdir])
sdist_filename = get_one_file_in(tempdir)
info_continue(': %s' % os.path.basename(sdist_filename))
clean_sdist_files = sorted(normalize_names(strip_sdist_extras(strip_toplevel_name(get_archive_file_list(sdist_filename)))))
info_continue(': %d files and directories' % len(clean_sdist_files)) # depends on [control=['with'], data=['tempdir']] # depends on [control=['with'], data=[]] # depends on [control=['with'], data=['mkdtemp', 'tempsourcedir']]
missing_from_manifest = set(source_files) - set(clean_sdist_files)
missing_from_VCS = set(sdist_files + clean_sdist_files) - set(source_files)
if not missing_from_manifest and (not missing_from_VCS):
info('lists of files in version control and sdist match') # depends on [control=['if'], data=[]]
else:
error('lists of files in version control and sdist do not match!\n%s' % format_missing(missing_from_VCS, missing_from_manifest, 'VCS', 'sdist'))
(suggestions, unknowns) = find_suggestions(missing_from_manifest)
user_asked_for_help = update or (create and (not os.path.exists('MANIFEST.in')))
if 'MANIFEST.in' not in existing_source_files:
if suggestions and (not user_asked_for_help):
info("no MANIFEST.in found; you can run 'check-manifest -c' to create one") # depends on [control=['if'], data=[]]
else:
info('no MANIFEST.in found') # depends on [control=['if'], data=[]]
if suggestions:
info('suggested MANIFEST.in rules:\n%s' % format_list(suggestions))
if user_asked_for_help:
existed = os.path.exists('MANIFEST.in')
with open('MANIFEST.in', 'a') as f:
if not existed:
info('creating MANIFEST.in') # depends on [control=['if'], data=[]]
else:
info('updating MANIFEST.in')
f.write('\n# added by check_manifest.py\n')
f.write('\n'.join(suggestions) + '\n') # depends on [control=['with'], data=['f']]
if unknowns:
info("don't know how to come up with rules matching\n%s" % format_list(unknowns)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif user_asked_for_help:
info("don't know how to come up with rules matching any of the files, sorry!") # depends on [control=['if'], data=[]]
all_ok = False
bad_ideas = find_bad_ideas(all_source_files)
filtered_bad_ideas = [bad_idea for bad_idea in bad_ideas if not file_matches(bad_idea, IGNORE_BAD_IDEAS)]
if filtered_bad_ideas:
warning("you have %s in source control!\nthat's a bad idea: auto-generated files should not be versioned" % filtered_bad_ideas[0])
if len(filtered_bad_ideas) > 1:
warning('this also applies to the following:\n%s' % format_list(filtered_bad_ideas[1:])) # depends on [control=['if'], data=[]]
all_ok = False # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['cd']]
return all_ok |
def setup(app):
"""Install the plugin.
:param app: Sphinx application context.
"""
app.info('Initializing GitHub plugin')
app.add_role('ghissue', ghissue_role)
app.add_role('ghpull', ghissue_role)
app.add_role('ghuser', ghuser_role)
app.add_role('ghcommit', ghcommit_role)
app.add_config_value('github_project_url', None, 'env')
return | def function[setup, parameter[app]]:
constant[Install the plugin.
:param app: Sphinx application context.
]
call[name[app].info, parameter[constant[Initializing GitHub plugin]]]
call[name[app].add_role, parameter[constant[ghissue], name[ghissue_role]]]
call[name[app].add_role, parameter[constant[ghpull], name[ghissue_role]]]
call[name[app].add_role, parameter[constant[ghuser], name[ghuser_role]]]
call[name[app].add_role, parameter[constant[ghcommit], name[ghcommit_role]]]
call[name[app].add_config_value, parameter[constant[github_project_url], constant[None], constant[env]]]
return[None] | keyword[def] identifier[setup] ( identifier[app] ):
literal[string]
identifier[app] . identifier[info] ( literal[string] )
identifier[app] . identifier[add_role] ( literal[string] , identifier[ghissue_role] )
identifier[app] . identifier[add_role] ( literal[string] , identifier[ghissue_role] )
identifier[app] . identifier[add_role] ( literal[string] , identifier[ghuser_role] )
identifier[app] . identifier[add_role] ( literal[string] , identifier[ghcommit_role] )
identifier[app] . identifier[add_config_value] ( literal[string] , keyword[None] , literal[string] )
keyword[return] | def setup(app):
"""Install the plugin.
:param app: Sphinx application context.
"""
app.info('Initializing GitHub plugin')
app.add_role('ghissue', ghissue_role)
app.add_role('ghpull', ghissue_role)
app.add_role('ghuser', ghuser_role)
app.add_role('ghcommit', ghcommit_role)
app.add_config_value('github_project_url', None, 'env')
return |
def init_centers_widths(self, R):
"""Initialize prior of centers and widths
Returns
-------
centers : 2D array, with shape [K, n_dim]
Prior of factors' centers.
widths : 1D array, with shape [K, 1]
Prior of factors' widths.
"""
kmeans = KMeans(
init='k-means++',
n_clusters=self.K,
n_init=10,
random_state=100)
kmeans.fit(R)
centers = kmeans.cluster_centers_
widths = self._get_max_sigma(R) * np.ones((self.K, 1))
return centers, widths | def function[init_centers_widths, parameter[self, R]]:
constant[Initialize prior of centers and widths
Returns
-------
centers : 2D array, with shape [K, n_dim]
Prior of factors' centers.
widths : 1D array, with shape [K, 1]
Prior of factors' widths.
]
variable[kmeans] assign[=] call[name[KMeans], parameter[]]
call[name[kmeans].fit, parameter[name[R]]]
variable[centers] assign[=] name[kmeans].cluster_centers_
variable[widths] assign[=] binary_operation[call[name[self]._get_max_sigma, parameter[name[R]]] * call[name[np].ones, parameter[tuple[[<ast.Attribute object at 0x7da18fe91750>, <ast.Constant object at 0x7da18fe91870>]]]]]
return[tuple[[<ast.Name object at 0x7da18fe90f70>, <ast.Name object at 0x7da18fe92500>]]] | keyword[def] identifier[init_centers_widths] ( identifier[self] , identifier[R] ):
literal[string]
identifier[kmeans] = identifier[KMeans] (
identifier[init] = literal[string] ,
identifier[n_clusters] = identifier[self] . identifier[K] ,
identifier[n_init] = literal[int] ,
identifier[random_state] = literal[int] )
identifier[kmeans] . identifier[fit] ( identifier[R] )
identifier[centers] = identifier[kmeans] . identifier[cluster_centers_]
identifier[widths] = identifier[self] . identifier[_get_max_sigma] ( identifier[R] )* identifier[np] . identifier[ones] (( identifier[self] . identifier[K] , literal[int] ))
keyword[return] identifier[centers] , identifier[widths] | def init_centers_widths(self, R):
"""Initialize prior of centers and widths
Returns
-------
centers : 2D array, with shape [K, n_dim]
Prior of factors' centers.
widths : 1D array, with shape [K, 1]
Prior of factors' widths.
"""
kmeans = KMeans(init='k-means++', n_clusters=self.K, n_init=10, random_state=100)
kmeans.fit(R)
centers = kmeans.cluster_centers_
widths = self._get_max_sigma(R) * np.ones((self.K, 1))
return (centers, widths) |
def to_pfull_from_phalf(arr, pfull_coord):
"""Compute data at full pressure levels from values at half levels."""
phalf_top = arr.isel(**{internal_names.PHALF_STR: slice(1, None)})
phalf_top = replace_coord(phalf_top, internal_names.PHALF_STR,
internal_names.PFULL_STR, pfull_coord)
phalf_bot = arr.isel(**{internal_names.PHALF_STR: slice(None, -1)})
phalf_bot = replace_coord(phalf_bot, internal_names.PHALF_STR,
internal_names.PFULL_STR, pfull_coord)
return 0.5*(phalf_bot + phalf_top) | def function[to_pfull_from_phalf, parameter[arr, pfull_coord]]:
constant[Compute data at full pressure levels from values at half levels.]
variable[phalf_top] assign[=] call[name[arr].isel, parameter[]]
variable[phalf_top] assign[=] call[name[replace_coord], parameter[name[phalf_top], name[internal_names].PHALF_STR, name[internal_names].PFULL_STR, name[pfull_coord]]]
variable[phalf_bot] assign[=] call[name[arr].isel, parameter[]]
variable[phalf_bot] assign[=] call[name[replace_coord], parameter[name[phalf_bot], name[internal_names].PHALF_STR, name[internal_names].PFULL_STR, name[pfull_coord]]]
return[binary_operation[constant[0.5] * binary_operation[name[phalf_bot] + name[phalf_top]]]] | keyword[def] identifier[to_pfull_from_phalf] ( identifier[arr] , identifier[pfull_coord] ):
literal[string]
identifier[phalf_top] = identifier[arr] . identifier[isel] (**{ identifier[internal_names] . identifier[PHALF_STR] : identifier[slice] ( literal[int] , keyword[None] )})
identifier[phalf_top] = identifier[replace_coord] ( identifier[phalf_top] , identifier[internal_names] . identifier[PHALF_STR] ,
identifier[internal_names] . identifier[PFULL_STR] , identifier[pfull_coord] )
identifier[phalf_bot] = identifier[arr] . identifier[isel] (**{ identifier[internal_names] . identifier[PHALF_STR] : identifier[slice] ( keyword[None] ,- literal[int] )})
identifier[phalf_bot] = identifier[replace_coord] ( identifier[phalf_bot] , identifier[internal_names] . identifier[PHALF_STR] ,
identifier[internal_names] . identifier[PFULL_STR] , identifier[pfull_coord] )
keyword[return] literal[int] *( identifier[phalf_bot] + identifier[phalf_top] ) | def to_pfull_from_phalf(arr, pfull_coord):
"""Compute data at full pressure levels from values at half levels."""
phalf_top = arr.isel(**{internal_names.PHALF_STR: slice(1, None)})
phalf_top = replace_coord(phalf_top, internal_names.PHALF_STR, internal_names.PFULL_STR, pfull_coord)
phalf_bot = arr.isel(**{internal_names.PHALF_STR: slice(None, -1)})
phalf_bot = replace_coord(phalf_bot, internal_names.PHALF_STR, internal_names.PFULL_STR, pfull_coord)
return 0.5 * (phalf_bot + phalf_top) |
def add_project(project,**kwargs):
"""
Add a new project
returns a project complexmodel
"""
user_id = kwargs.get('user_id')
existing_proj = get_project_by_name(project.name,user_id=user_id)
if len(existing_proj) > 0:
raise HydraError("A Project with the name \"%s\" already exists"%(project.name,))
#check_perm(user_id, 'add_project')
proj_i = Project()
proj_i.name = project.name
proj_i.description = project.description
proj_i.created_by = user_id
attr_map = hdb.add_resource_attributes(proj_i, project.attributes)
db.DBSession.flush() #Needed to get the resource attr's ID
proj_data = _add_project_attribute_data(proj_i, attr_map, project.attribute_data)
proj_i.attribute_data = proj_data
proj_i.set_owner(user_id)
db.DBSession.add(proj_i)
db.DBSession.flush()
return proj_i | def function[add_project, parameter[project]]:
constant[
Add a new project
returns a project complexmodel
]
variable[user_id] assign[=] call[name[kwargs].get, parameter[constant[user_id]]]
variable[existing_proj] assign[=] call[name[get_project_by_name], parameter[name[project].name]]
if compare[call[name[len], parameter[name[existing_proj]]] greater[>] constant[0]] begin[:]
<ast.Raise object at 0x7da20e9546a0>
variable[proj_i] assign[=] call[name[Project], parameter[]]
name[proj_i].name assign[=] name[project].name
name[proj_i].description assign[=] name[project].description
name[proj_i].created_by assign[=] name[user_id]
variable[attr_map] assign[=] call[name[hdb].add_resource_attributes, parameter[name[proj_i], name[project].attributes]]
call[name[db].DBSession.flush, parameter[]]
variable[proj_data] assign[=] call[name[_add_project_attribute_data], parameter[name[proj_i], name[attr_map], name[project].attribute_data]]
name[proj_i].attribute_data assign[=] name[proj_data]
call[name[proj_i].set_owner, parameter[name[user_id]]]
call[name[db].DBSession.add, parameter[name[proj_i]]]
call[name[db].DBSession.flush, parameter[]]
return[name[proj_i]] | keyword[def] identifier[add_project] ( identifier[project] ,** identifier[kwargs] ):
literal[string]
identifier[user_id] = identifier[kwargs] . identifier[get] ( literal[string] )
identifier[existing_proj] = identifier[get_project_by_name] ( identifier[project] . identifier[name] , identifier[user_id] = identifier[user_id] )
keyword[if] identifier[len] ( identifier[existing_proj] )> literal[int] :
keyword[raise] identifier[HydraError] ( literal[string] %( identifier[project] . identifier[name] ,))
identifier[proj_i] = identifier[Project] ()
identifier[proj_i] . identifier[name] = identifier[project] . identifier[name]
identifier[proj_i] . identifier[description] = identifier[project] . identifier[description]
identifier[proj_i] . identifier[created_by] = identifier[user_id]
identifier[attr_map] = identifier[hdb] . identifier[add_resource_attributes] ( identifier[proj_i] , identifier[project] . identifier[attributes] )
identifier[db] . identifier[DBSession] . identifier[flush] ()
identifier[proj_data] = identifier[_add_project_attribute_data] ( identifier[proj_i] , identifier[attr_map] , identifier[project] . identifier[attribute_data] )
identifier[proj_i] . identifier[attribute_data] = identifier[proj_data]
identifier[proj_i] . identifier[set_owner] ( identifier[user_id] )
identifier[db] . identifier[DBSession] . identifier[add] ( identifier[proj_i] )
identifier[db] . identifier[DBSession] . identifier[flush] ()
keyword[return] identifier[proj_i] | def add_project(project, **kwargs):
"""
Add a new project
returns a project complexmodel
"""
user_id = kwargs.get('user_id')
existing_proj = get_project_by_name(project.name, user_id=user_id)
if len(existing_proj) > 0:
raise HydraError('A Project with the name "%s" already exists' % (project.name,)) # depends on [control=['if'], data=[]]
#check_perm(user_id, 'add_project')
proj_i = Project()
proj_i.name = project.name
proj_i.description = project.description
proj_i.created_by = user_id
attr_map = hdb.add_resource_attributes(proj_i, project.attributes)
db.DBSession.flush() #Needed to get the resource attr's ID
proj_data = _add_project_attribute_data(proj_i, attr_map, project.attribute_data)
proj_i.attribute_data = proj_data
proj_i.set_owner(user_id)
db.DBSession.add(proj_i)
db.DBSession.flush()
return proj_i |
def execute(self, query, args=None):
'''Execute a query'''
conn = self._get_db()
while (yield self.nextset()):
pass
if PY2: # Use bytes on Python 2 always
encoding = conn.encoding
def ensure_bytes(x):
if isinstance(x, unicode):
x = x.encode(encoding)
return x
query = ensure_bytes(query)
if args is not None:
if isinstance(args, (tuple, list)):
args = tuple(map(ensure_bytes, args))
elif isinstance(args, dict):
args = dict((ensure_bytes(key), ensure_bytes(val)) for (key, val) in args.items())
else:
args = ensure_bytes(args)
if args is not None:
query = query % self._escape_args(args, conn)
yield self._query(query)
self._executed = query
raise gen.Return(self.rowcount) | def function[execute, parameter[self, query, args]]:
constant[Execute a query]
variable[conn] assign[=] call[name[self]._get_db, parameter[]]
while <ast.Yield object at 0x7da18f09cd60> begin[:]
pass
if name[PY2] begin[:]
variable[encoding] assign[=] name[conn].encoding
def function[ensure_bytes, parameter[x]]:
if call[name[isinstance], parameter[name[x], name[unicode]]] begin[:]
variable[x] assign[=] call[name[x].encode, parameter[name[encoding]]]
return[name[x]]
variable[query] assign[=] call[name[ensure_bytes], parameter[name[query]]]
if compare[name[args] is_not constant[None]] begin[:]
if call[name[isinstance], parameter[name[args], tuple[[<ast.Name object at 0x7da18f09c520>, <ast.Name object at 0x7da18f09e950>]]]] begin[:]
variable[args] assign[=] call[name[tuple], parameter[call[name[map], parameter[name[ensure_bytes], name[args]]]]]
if compare[name[args] is_not constant[None]] begin[:]
variable[query] assign[=] binary_operation[name[query] <ast.Mod object at 0x7da2590d6920> call[name[self]._escape_args, parameter[name[args], name[conn]]]]
<ast.Yield object at 0x7da18eb57d00>
name[self]._executed assign[=] name[query]
<ast.Raise object at 0x7da18eb57400> | keyword[def] identifier[execute] ( identifier[self] , identifier[query] , identifier[args] = keyword[None] ):
literal[string]
identifier[conn] = identifier[self] . identifier[_get_db] ()
keyword[while] ( keyword[yield] identifier[self] . identifier[nextset] ()):
keyword[pass]
keyword[if] identifier[PY2] :
identifier[encoding] = identifier[conn] . identifier[encoding]
keyword[def] identifier[ensure_bytes] ( identifier[x] ):
keyword[if] identifier[isinstance] ( identifier[x] , identifier[unicode] ):
identifier[x] = identifier[x] . identifier[encode] ( identifier[encoding] )
keyword[return] identifier[x]
identifier[query] = identifier[ensure_bytes] ( identifier[query] )
keyword[if] identifier[args] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[isinstance] ( identifier[args] ,( identifier[tuple] , identifier[list] )):
identifier[args] = identifier[tuple] ( identifier[map] ( identifier[ensure_bytes] , identifier[args] ))
keyword[elif] identifier[isinstance] ( identifier[args] , identifier[dict] ):
identifier[args] = identifier[dict] (( identifier[ensure_bytes] ( identifier[key] ), identifier[ensure_bytes] ( identifier[val] )) keyword[for] ( identifier[key] , identifier[val] ) keyword[in] identifier[args] . identifier[items] ())
keyword[else] :
identifier[args] = identifier[ensure_bytes] ( identifier[args] )
keyword[if] identifier[args] keyword[is] keyword[not] keyword[None] :
identifier[query] = identifier[query] % identifier[self] . identifier[_escape_args] ( identifier[args] , identifier[conn] )
keyword[yield] identifier[self] . identifier[_query] ( identifier[query] )
identifier[self] . identifier[_executed] = identifier[query]
keyword[raise] identifier[gen] . identifier[Return] ( identifier[self] . identifier[rowcount] ) | def execute(self, query, args=None):
"""Execute a query"""
conn = self._get_db()
while (yield self.nextset()):
pass # depends on [control=['while'], data=[]]
if PY2: # Use bytes on Python 2 always
encoding = conn.encoding
def ensure_bytes(x):
if isinstance(x, unicode):
x = x.encode(encoding) # depends on [control=['if'], data=[]]
return x
query = ensure_bytes(query)
if args is not None:
if isinstance(args, (tuple, list)):
args = tuple(map(ensure_bytes, args)) # depends on [control=['if'], data=[]]
elif isinstance(args, dict):
args = dict(((ensure_bytes(key), ensure_bytes(val)) for (key, val) in args.items())) # depends on [control=['if'], data=[]]
else:
args = ensure_bytes(args) # depends on [control=['if'], data=['args']] # depends on [control=['if'], data=[]]
if args is not None:
query = query % self._escape_args(args, conn) # depends on [control=['if'], data=['args']]
yield self._query(query)
self._executed = query
raise gen.Return(self.rowcount) |
def get_subnetid(vm_):
'''
Returns the SubnetId to use
'''
subnetid = config.get_cloud_config_value(
'subnetid', vm_, __opts__, search_global=False
)
if subnetid:
return subnetid
subnetname = config.get_cloud_config_value(
'subnetname', vm_, __opts__, search_global=False
)
if subnetname:
return _get_subnetname_id(subnetname)
return None | def function[get_subnetid, parameter[vm_]]:
constant[
Returns the SubnetId to use
]
variable[subnetid] assign[=] call[name[config].get_cloud_config_value, parameter[constant[subnetid], name[vm_], name[__opts__]]]
if name[subnetid] begin[:]
return[name[subnetid]]
variable[subnetname] assign[=] call[name[config].get_cloud_config_value, parameter[constant[subnetname], name[vm_], name[__opts__]]]
if name[subnetname] begin[:]
return[call[name[_get_subnetname_id], parameter[name[subnetname]]]]
return[constant[None]] | keyword[def] identifier[get_subnetid] ( identifier[vm_] ):
literal[string]
identifier[subnetid] = identifier[config] . identifier[get_cloud_config_value] (
literal[string] , identifier[vm_] , identifier[__opts__] , identifier[search_global] = keyword[False]
)
keyword[if] identifier[subnetid] :
keyword[return] identifier[subnetid]
identifier[subnetname] = identifier[config] . identifier[get_cloud_config_value] (
literal[string] , identifier[vm_] , identifier[__opts__] , identifier[search_global] = keyword[False]
)
keyword[if] identifier[subnetname] :
keyword[return] identifier[_get_subnetname_id] ( identifier[subnetname] )
keyword[return] keyword[None] | def get_subnetid(vm_):
"""
Returns the SubnetId to use
"""
subnetid = config.get_cloud_config_value('subnetid', vm_, __opts__, search_global=False)
if subnetid:
return subnetid # depends on [control=['if'], data=[]]
subnetname = config.get_cloud_config_value('subnetname', vm_, __opts__, search_global=False)
if subnetname:
return _get_subnetname_id(subnetname) # depends on [control=['if'], data=[]]
return None |
def cli(ctx, settings, app):
"""Manage Morp application services"""
if app is None and settings is None:
print('Either --app or --settings must be supplied')
ctx.ensure_object(dict)
ctx.obj['app'] = app
ctx.obj['settings'] = settings | def function[cli, parameter[ctx, settings, app]]:
constant[Manage Morp application services]
if <ast.BoolOp object at 0x7da204565120> begin[:]
call[name[print], parameter[constant[Either --app or --settings must be supplied]]]
call[name[ctx].ensure_object, parameter[name[dict]]]
call[name[ctx].obj][constant[app]] assign[=] name[app]
call[name[ctx].obj][constant[settings]] assign[=] name[settings] | keyword[def] identifier[cli] ( identifier[ctx] , identifier[settings] , identifier[app] ):
literal[string]
keyword[if] identifier[app] keyword[is] keyword[None] keyword[and] identifier[settings] keyword[is] keyword[None] :
identifier[print] ( literal[string] )
identifier[ctx] . identifier[ensure_object] ( identifier[dict] )
identifier[ctx] . identifier[obj] [ literal[string] ]= identifier[app]
identifier[ctx] . identifier[obj] [ literal[string] ]= identifier[settings] | def cli(ctx, settings, app):
"""Manage Morp application services"""
if app is None and settings is None:
print('Either --app or --settings must be supplied') # depends on [control=['if'], data=[]]
ctx.ensure_object(dict)
ctx.obj['app'] = app
ctx.obj['settings'] = settings |
def which_users_can(self, name):
"""Which role can SendMail? """
_roles = self.which_roles_can(name)
result = [self.get_role_members(i.get('role')) for i in _roles]
return result | def function[which_users_can, parameter[self, name]]:
constant[Which role can SendMail? ]
variable[_roles] assign[=] call[name[self].which_roles_can, parameter[name[name]]]
variable[result] assign[=] <ast.ListComp object at 0x7da1b0fdef20>
return[name[result]] | keyword[def] identifier[which_users_can] ( identifier[self] , identifier[name] ):
literal[string]
identifier[_roles] = identifier[self] . identifier[which_roles_can] ( identifier[name] )
identifier[result] =[ identifier[self] . identifier[get_role_members] ( identifier[i] . identifier[get] ( literal[string] )) keyword[for] identifier[i] keyword[in] identifier[_roles] ]
keyword[return] identifier[result] | def which_users_can(self, name):
"""Which role can SendMail? """
_roles = self.which_roles_can(name)
result = [self.get_role_members(i.get('role')) for i in _roles]
return result |
def from_csv(input_csv_pattern, headers=None, schema_file=None):
"""Create a Metrics instance from csv file pattern.
Args:
input_csv_pattern: Path to Csv file pattern (with no header). Can be local or GCS path.
headers: Csv headers.
schema_file: Path to a JSON file containing BigQuery schema. Used if "headers" is None.
Returns:
a Metrics instance.
Raises:
ValueError if both headers and schema_file are None.
"""
if headers is not None:
names = headers
elif schema_file is not None:
with _util.open_local_or_gcs(schema_file, mode='r') as f:
schema = json.load(f)
names = [x['name'] for x in schema]
else:
raise ValueError('Either headers or schema_file is needed')
metrics = Metrics(input_csv_pattern=input_csv_pattern, headers=names)
return metrics | def function[from_csv, parameter[input_csv_pattern, headers, schema_file]]:
constant[Create a Metrics instance from csv file pattern.
Args:
input_csv_pattern: Path to Csv file pattern (with no header). Can be local or GCS path.
headers: Csv headers.
schema_file: Path to a JSON file containing BigQuery schema. Used if "headers" is None.
Returns:
a Metrics instance.
Raises:
ValueError if both headers and schema_file are None.
]
if compare[name[headers] is_not constant[None]] begin[:]
variable[names] assign[=] name[headers]
variable[metrics] assign[=] call[name[Metrics], parameter[]]
return[name[metrics]] | keyword[def] identifier[from_csv] ( identifier[input_csv_pattern] , identifier[headers] = keyword[None] , identifier[schema_file] = keyword[None] ):
literal[string]
keyword[if] identifier[headers] keyword[is] keyword[not] keyword[None] :
identifier[names] = identifier[headers]
keyword[elif] identifier[schema_file] keyword[is] keyword[not] keyword[None] :
keyword[with] identifier[_util] . identifier[open_local_or_gcs] ( identifier[schema_file] , identifier[mode] = literal[string] ) keyword[as] identifier[f] :
identifier[schema] = identifier[json] . identifier[load] ( identifier[f] )
identifier[names] =[ identifier[x] [ literal[string] ] keyword[for] identifier[x] keyword[in] identifier[schema] ]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[metrics] = identifier[Metrics] ( identifier[input_csv_pattern] = identifier[input_csv_pattern] , identifier[headers] = identifier[names] )
keyword[return] identifier[metrics] | def from_csv(input_csv_pattern, headers=None, schema_file=None):
"""Create a Metrics instance from csv file pattern.
Args:
input_csv_pattern: Path to Csv file pattern (with no header). Can be local or GCS path.
headers: Csv headers.
schema_file: Path to a JSON file containing BigQuery schema. Used if "headers" is None.
Returns:
a Metrics instance.
Raises:
ValueError if both headers and schema_file are None.
"""
if headers is not None:
names = headers # depends on [control=['if'], data=['headers']]
elif schema_file is not None:
with _util.open_local_or_gcs(schema_file, mode='r') as f:
schema = json.load(f) # depends on [control=['with'], data=['f']]
names = [x['name'] for x in schema] # depends on [control=['if'], data=['schema_file']]
else:
raise ValueError('Either headers or schema_file is needed')
metrics = Metrics(input_csv_pattern=input_csv_pattern, headers=names)
return metrics |
def set_legend(self, legend):
"""legend needs to be a list, tuple or None"""
assert(isinstance(legend, list) or isinstance(legend, tuple) or
legend is None)
if legend:
self.legend = [quote(a) for a in legend]
else:
self.legend = None | def function[set_legend, parameter[self, legend]]:
constant[legend needs to be a list, tuple or None]
assert[<ast.BoolOp object at 0x7da1b10e6e60>]
if name[legend] begin[:]
name[self].legend assign[=] <ast.ListComp object at 0x7da1b10e5480> | keyword[def] identifier[set_legend] ( identifier[self] , identifier[legend] ):
literal[string]
keyword[assert] ( identifier[isinstance] ( identifier[legend] , identifier[list] ) keyword[or] identifier[isinstance] ( identifier[legend] , identifier[tuple] ) keyword[or]
identifier[legend] keyword[is] keyword[None] )
keyword[if] identifier[legend] :
identifier[self] . identifier[legend] =[ identifier[quote] ( identifier[a] ) keyword[for] identifier[a] keyword[in] identifier[legend] ]
keyword[else] :
identifier[self] . identifier[legend] = keyword[None] | def set_legend(self, legend):
"""legend needs to be a list, tuple or None"""
assert isinstance(legend, list) or isinstance(legend, tuple) or legend is None
if legend:
self.legend = [quote(a) for a in legend] # depends on [control=['if'], data=[]]
else:
self.legend = None |
def title_translations(self, key, value):
"""Populate the ``title_translations`` key."""
return {
'language': langdetect.detect(value.get('a')),
'source': value.get('9'),
'subtitle': value.get('b'),
'title': value.get('a'),
} | def function[title_translations, parameter[self, key, value]]:
constant[Populate the ``title_translations`` key.]
return[dictionary[[<ast.Constant object at 0x7da20c6c4e20>, <ast.Constant object at 0x7da20c6c6fe0>, <ast.Constant object at 0x7da20c6c7310>, <ast.Constant object at 0x7da20c6c5240>], [<ast.Call object at 0x7da20c6c6b90>, <ast.Call object at 0x7da20c6c72b0>, <ast.Call object at 0x7da20c6c5db0>, <ast.Call object at 0x7da20c6c53c0>]]] | keyword[def] identifier[title_translations] ( identifier[self] , identifier[key] , identifier[value] ):
literal[string]
keyword[return] {
literal[string] : identifier[langdetect] . identifier[detect] ( identifier[value] . identifier[get] ( literal[string] )),
literal[string] : identifier[value] . identifier[get] ( literal[string] ),
literal[string] : identifier[value] . identifier[get] ( literal[string] ),
literal[string] : identifier[value] . identifier[get] ( literal[string] ),
} | def title_translations(self, key, value):
"""Populate the ``title_translations`` key."""
return {'language': langdetect.detect(value.get('a')), 'source': value.get('9'), 'subtitle': value.get('b'), 'title': value.get('a')} |
def unsubscribe(self, transform="", downlink=False):
"""Unsubscribes from a previously subscribed stream. Note that the same values of transform
and downlink must be passed in order to do the correct unsubscribe::
s.subscribe(callback,transform="if last")
s.unsubscribe(transform="if last")
"""
streampath = self.path
if downlink:
streampath += "/downlink"
return self.db.unsubscribe(streampath, transform) | def function[unsubscribe, parameter[self, transform, downlink]]:
constant[Unsubscribes from a previously subscribed stream. Note that the same values of transform
and downlink must be passed in order to do the correct unsubscribe::
s.subscribe(callback,transform="if last")
s.unsubscribe(transform="if last")
]
variable[streampath] assign[=] name[self].path
if name[downlink] begin[:]
<ast.AugAssign object at 0x7da204620340>
return[call[name[self].db.unsubscribe, parameter[name[streampath], name[transform]]]] | keyword[def] identifier[unsubscribe] ( identifier[self] , identifier[transform] = literal[string] , identifier[downlink] = keyword[False] ):
literal[string]
identifier[streampath] = identifier[self] . identifier[path]
keyword[if] identifier[downlink] :
identifier[streampath] += literal[string]
keyword[return] identifier[self] . identifier[db] . identifier[unsubscribe] ( identifier[streampath] , identifier[transform] ) | def unsubscribe(self, transform='', downlink=False):
"""Unsubscribes from a previously subscribed stream. Note that the same values of transform
and downlink must be passed in order to do the correct unsubscribe::
s.subscribe(callback,transform="if last")
s.unsubscribe(transform="if last")
"""
streampath = self.path
if downlink:
streampath += '/downlink' # depends on [control=['if'], data=[]]
return self.db.unsubscribe(streampath, transform) |
def _get_auth_from_netrc(self, hostname):
"""Try to find login auth in ``~/.netrc``."""
try:
hostauth = netrc(self.NETRC_FILE)
except IOError as cause:
if cause.errno != errno.ENOENT:
raise
return None
except NetrcParseError as cause:
raise # TODO: Map to common base class, so caller has to handle less error types?
# Try to find specific `user@host` credentials first, then just `host`
auth = hostauth.hosts.get('{}@{}'.format(self.user or getpass.getuser(), hostname), None)
if not auth:
auth = hostauth.hosts.get(hostname, None)
if auth:
username, account, password = auth # pylint: disable=unpacking-non-sequence
if username:
self.user = username
if password == 'base64':
# support for password obfuscation, prevent "over the shoulder lookup"
self.password = account.decode('base64')
elif password:
self.password = password
return 'netrc' | def function[_get_auth_from_netrc, parameter[self, hostname]]:
constant[Try to find login auth in ``~/.netrc``.]
<ast.Try object at 0x7da2043476d0>
variable[auth] assign[=] call[name[hostauth].hosts.get, parameter[call[constant[{}@{}].format, parameter[<ast.BoolOp object at 0x7da204345510>, name[hostname]]], constant[None]]]
if <ast.UnaryOp object at 0x7da204346ce0> begin[:]
variable[auth] assign[=] call[name[hostauth].hosts.get, parameter[name[hostname], constant[None]]]
if name[auth] begin[:]
<ast.Tuple object at 0x7da2043447c0> assign[=] name[auth]
if name[username] begin[:]
name[self].user assign[=] name[username]
if compare[name[password] equal[==] constant[base64]] begin[:]
name[self].password assign[=] call[name[account].decode, parameter[constant[base64]]]
return[constant[netrc]] | keyword[def] identifier[_get_auth_from_netrc] ( identifier[self] , identifier[hostname] ):
literal[string]
keyword[try] :
identifier[hostauth] = identifier[netrc] ( identifier[self] . identifier[NETRC_FILE] )
keyword[except] identifier[IOError] keyword[as] identifier[cause] :
keyword[if] identifier[cause] . identifier[errno] != identifier[errno] . identifier[ENOENT] :
keyword[raise]
keyword[return] keyword[None]
keyword[except] identifier[NetrcParseError] keyword[as] identifier[cause] :
keyword[raise]
identifier[auth] = identifier[hostauth] . identifier[hosts] . identifier[get] ( literal[string] . identifier[format] ( identifier[self] . identifier[user] keyword[or] identifier[getpass] . identifier[getuser] (), identifier[hostname] ), keyword[None] )
keyword[if] keyword[not] identifier[auth] :
identifier[auth] = identifier[hostauth] . identifier[hosts] . identifier[get] ( identifier[hostname] , keyword[None] )
keyword[if] identifier[auth] :
identifier[username] , identifier[account] , identifier[password] = identifier[auth]
keyword[if] identifier[username] :
identifier[self] . identifier[user] = identifier[username]
keyword[if] identifier[password] == literal[string] :
identifier[self] . identifier[password] = identifier[account] . identifier[decode] ( literal[string] )
keyword[elif] identifier[password] :
identifier[self] . identifier[password] = identifier[password]
keyword[return] literal[string] | def _get_auth_from_netrc(self, hostname):
"""Try to find login auth in ``~/.netrc``."""
try:
hostauth = netrc(self.NETRC_FILE) # depends on [control=['try'], data=[]]
except IOError as cause:
if cause.errno != errno.ENOENT:
raise # depends on [control=['if'], data=[]]
return None # depends on [control=['except'], data=['cause']]
except NetrcParseError as cause:
raise # TODO: Map to common base class, so caller has to handle less error types? # depends on [control=['except'], data=[]]
# Try to find specific `user@host` credentials first, then just `host`
auth = hostauth.hosts.get('{}@{}'.format(self.user or getpass.getuser(), hostname), None)
if not auth:
auth = hostauth.hosts.get(hostname, None) # depends on [control=['if'], data=[]]
if auth:
(username, account, password) = auth # pylint: disable=unpacking-non-sequence
if username:
self.user = username # depends on [control=['if'], data=[]]
if password == 'base64':
# support for password obfuscation, prevent "over the shoulder lookup"
self.password = account.decode('base64') # depends on [control=['if'], data=[]]
elif password:
self.password = password # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return 'netrc' |
def bulk_write(self, requests, ordered=True,
bypass_document_validation=False):
"""Send a batch of write operations to the server.
Requests are passed as a list of write operation instances (
:class:`~pymongo.operations.InsertOne`,
:class:`~pymongo.operations.UpdateOne`,
:class:`~pymongo.operations.UpdateMany`,
:class:`~pymongo.operations.ReplaceOne`,
:class:`~pymongo.operations.DeleteOne`, or
:class:`~pymongo.operations.DeleteMany`).
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634ef')}
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634f0')}
>>> # DeleteMany, UpdateOne, and UpdateMany are also available.
...
>>> from pymongo import InsertOne, DeleteOne, ReplaceOne
>>> requests = [InsertOne({'y': 1}), DeleteOne({'x': 1}),
... ReplaceOne({'w': 1}, {'z': 1}, upsert=True)]
>>> result = db.test.bulk_write(requests)
>>> result.inserted_count
1
>>> result.deleted_count
1
>>> result.modified_count
0
>>> result.upserted_ids
{2: ObjectId('54f62ee28891e756a6e1abd5')}
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634f0')}
{u'y': 1, u'_id': ObjectId('54f62ee2fba5226811f634f1')}
{u'z': 1, u'_id': ObjectId('54f62ee28891e756a6e1abd5')}
:Parameters:
- `requests`: A list of write operations (see examples above).
- `ordered` (optional): If ``True`` (the default) requests will be
performed on the server serially, in the order provided. If an error
occurs all remaining operations are aborted. If ``False`` requests
will be performed on the server in arbitrary order, possibly in
parallel, and all operations will be attempted.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
:Returns:
An instance of :class:`~pymongo.results.BulkWriteResult`.
.. seealso:: :ref:`writes-and-ids`
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
if not isinstance(requests, list):
raise TypeError("requests must be a list")
blk = _Bulk(self, ordered, bypass_document_validation)
for request in requests:
try:
request._add_to_bulk(blk)
except AttributeError:
raise TypeError("%r is not a valid request" % (request,))
bulk_api_result = blk.execute(self.write_concern.document)
if bulk_api_result is not None:
return BulkWriteResult(bulk_api_result, True)
return BulkWriteResult({}, False) | def function[bulk_write, parameter[self, requests, ordered, bypass_document_validation]]:
constant[Send a batch of write operations to the server.
Requests are passed as a list of write operation instances (
:class:`~pymongo.operations.InsertOne`,
:class:`~pymongo.operations.UpdateOne`,
:class:`~pymongo.operations.UpdateMany`,
:class:`~pymongo.operations.ReplaceOne`,
:class:`~pymongo.operations.DeleteOne`, or
:class:`~pymongo.operations.DeleteMany`).
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634ef')}
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634f0')}
>>> # DeleteMany, UpdateOne, and UpdateMany are also available.
...
>>> from pymongo import InsertOne, DeleteOne, ReplaceOne
>>> requests = [InsertOne({'y': 1}), DeleteOne({'x': 1}),
... ReplaceOne({'w': 1}, {'z': 1}, upsert=True)]
>>> result = db.test.bulk_write(requests)
>>> result.inserted_count
1
>>> result.deleted_count
1
>>> result.modified_count
0
>>> result.upserted_ids
{2: ObjectId('54f62ee28891e756a6e1abd5')}
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634f0')}
{u'y': 1, u'_id': ObjectId('54f62ee2fba5226811f634f1')}
{u'z': 1, u'_id': ObjectId('54f62ee28891e756a6e1abd5')}
:Parameters:
- `requests`: A list of write operations (see examples above).
- `ordered` (optional): If ``True`` (the default) requests will be
performed on the server serially, in the order provided. If an error
occurs all remaining operations are aborted. If ``False`` requests
will be performed on the server in arbitrary order, possibly in
parallel, and all operations will be attempted.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
:Returns:
An instance of :class:`~pymongo.results.BulkWriteResult`.
.. seealso:: :ref:`writes-and-ids`
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
]
if <ast.UnaryOp object at 0x7da20c6a88e0> begin[:]
<ast.Raise object at 0x7da20c6abf40>
variable[blk] assign[=] call[name[_Bulk], parameter[name[self], name[ordered], name[bypass_document_validation]]]
for taget[name[request]] in starred[name[requests]] begin[:]
<ast.Try object at 0x7da20c7c85e0>
variable[bulk_api_result] assign[=] call[name[blk].execute, parameter[name[self].write_concern.document]]
if compare[name[bulk_api_result] is_not constant[None]] begin[:]
return[call[name[BulkWriteResult], parameter[name[bulk_api_result], constant[True]]]]
return[call[name[BulkWriteResult], parameter[dictionary[[], []], constant[False]]]] | keyword[def] identifier[bulk_write] ( identifier[self] , identifier[requests] , identifier[ordered] = keyword[True] ,
identifier[bypass_document_validation] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[requests] , identifier[list] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[blk] = identifier[_Bulk] ( identifier[self] , identifier[ordered] , identifier[bypass_document_validation] )
keyword[for] identifier[request] keyword[in] identifier[requests] :
keyword[try] :
identifier[request] . identifier[_add_to_bulk] ( identifier[blk] )
keyword[except] identifier[AttributeError] :
keyword[raise] identifier[TypeError] ( literal[string] %( identifier[request] ,))
identifier[bulk_api_result] = identifier[blk] . identifier[execute] ( identifier[self] . identifier[write_concern] . identifier[document] )
keyword[if] identifier[bulk_api_result] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[BulkWriteResult] ( identifier[bulk_api_result] , keyword[True] )
keyword[return] identifier[BulkWriteResult] ({}, keyword[False] ) | def bulk_write(self, requests, ordered=True, bypass_document_validation=False):
"""Send a batch of write operations to the server.
Requests are passed as a list of write operation instances (
:class:`~pymongo.operations.InsertOne`,
:class:`~pymongo.operations.UpdateOne`,
:class:`~pymongo.operations.UpdateMany`,
:class:`~pymongo.operations.ReplaceOne`,
:class:`~pymongo.operations.DeleteOne`, or
:class:`~pymongo.operations.DeleteMany`).
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634ef')}
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634f0')}
>>> # DeleteMany, UpdateOne, and UpdateMany are also available.
...
>>> from pymongo import InsertOne, DeleteOne, ReplaceOne
>>> requests = [InsertOne({'y': 1}), DeleteOne({'x': 1}),
... ReplaceOne({'w': 1}, {'z': 1}, upsert=True)]
>>> result = db.test.bulk_write(requests)
>>> result.inserted_count
1
>>> result.deleted_count
1
>>> result.modified_count
0
>>> result.upserted_ids
{2: ObjectId('54f62ee28891e756a6e1abd5')}
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634f0')}
{u'y': 1, u'_id': ObjectId('54f62ee2fba5226811f634f1')}
{u'z': 1, u'_id': ObjectId('54f62ee28891e756a6e1abd5')}
:Parameters:
- `requests`: A list of write operations (see examples above).
- `ordered` (optional): If ``True`` (the default) requests will be
performed on the server serially, in the order provided. If an error
occurs all remaining operations are aborted. If ``False`` requests
will be performed on the server in arbitrary order, possibly in
parallel, and all operations will be attempted.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
:Returns:
An instance of :class:`~pymongo.results.BulkWriteResult`.
.. seealso:: :ref:`writes-and-ids`
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
if not isinstance(requests, list):
raise TypeError('requests must be a list') # depends on [control=['if'], data=[]]
blk = _Bulk(self, ordered, bypass_document_validation)
for request in requests:
try:
request._add_to_bulk(blk) # depends on [control=['try'], data=[]]
except AttributeError:
raise TypeError('%r is not a valid request' % (request,)) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['request']]
bulk_api_result = blk.execute(self.write_concern.document)
if bulk_api_result is not None:
return BulkWriteResult(bulk_api_result, True) # depends on [control=['if'], data=['bulk_api_result']]
return BulkWriteResult({}, False) |
def do_show(self, repo):
'''
List repo attributes
'''
self.abort_on_nonexisting_effective_repo(repo, 'show')
repo = self.network.get_repo(repo)
repo.print_attributes() | def function[do_show, parameter[self, repo]]:
constant[
List repo attributes
]
call[name[self].abort_on_nonexisting_effective_repo, parameter[name[repo], constant[show]]]
variable[repo] assign[=] call[name[self].network.get_repo, parameter[name[repo]]]
call[name[repo].print_attributes, parameter[]] | keyword[def] identifier[do_show] ( identifier[self] , identifier[repo] ):
literal[string]
identifier[self] . identifier[abort_on_nonexisting_effective_repo] ( identifier[repo] , literal[string] )
identifier[repo] = identifier[self] . identifier[network] . identifier[get_repo] ( identifier[repo] )
identifier[repo] . identifier[print_attributes] () | def do_show(self, repo):
"""
List repo attributes
"""
self.abort_on_nonexisting_effective_repo(repo, 'show')
repo = self.network.get_repo(repo)
repo.print_attributes() |
def update(self, data, default=False):
"""Update this :attr:`Config` with ``data``.
:param data: must be a ``Mapping`` like object exposing the ``item``
method for iterating through key-value pairs.
:param default: if ``True`` the updated :attr:`settings` will also
set their :attr:`~Setting.default` attribute with the
updating value (provided it is a valid one).
"""
for name, value in data.items():
if value is not None:
self.set(name, value, default) | def function[update, parameter[self, data, default]]:
constant[Update this :attr:`Config` with ``data``.
:param data: must be a ``Mapping`` like object exposing the ``item``
method for iterating through key-value pairs.
:param default: if ``True`` the updated :attr:`settings` will also
set their :attr:`~Setting.default` attribute with the
updating value (provided it is a valid one).
]
for taget[tuple[[<ast.Name object at 0x7da204566950>, <ast.Name object at 0x7da204567880>]]] in starred[call[name[data].items, parameter[]]] begin[:]
if compare[name[value] is_not constant[None]] begin[:]
call[name[self].set, parameter[name[name], name[value], name[default]]] | keyword[def] identifier[update] ( identifier[self] , identifier[data] , identifier[default] = keyword[False] ):
literal[string]
keyword[for] identifier[name] , identifier[value] keyword[in] identifier[data] . identifier[items] ():
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[set] ( identifier[name] , identifier[value] , identifier[default] ) | def update(self, data, default=False):
"""Update this :attr:`Config` with ``data``.
:param data: must be a ``Mapping`` like object exposing the ``item``
method for iterating through key-value pairs.
:param default: if ``True`` the updated :attr:`settings` will also
set their :attr:`~Setting.default` attribute with the
updating value (provided it is a valid one).
"""
for (name, value) in data.items():
if value is not None:
self.set(name, value, default) # depends on [control=['if'], data=['value']] # depends on [control=['for'], data=[]] |
def balance(self, as_of=None, raw=False, leg_query=None, **kwargs):
"""Get the balance for this account, including child accounts
Args:
as_of (Date): Only include transactions on or before this date
raw (bool): If true the returned balance should not have its sign
adjusted for display purposes.
kwargs (dict): Will be used to filter the transaction legs
Returns:
Balance
See Also:
:meth:`simple_balance()`
"""
balances = [
account.simple_balance(as_of=as_of, raw=raw, leg_query=leg_query, **kwargs)
for account in self.get_descendants(include_self=True)
]
return sum(balances, Balance()) | def function[balance, parameter[self, as_of, raw, leg_query]]:
constant[Get the balance for this account, including child accounts
Args:
as_of (Date): Only include transactions on or before this date
raw (bool): If true the returned balance should not have its sign
adjusted for display purposes.
kwargs (dict): Will be used to filter the transaction legs
Returns:
Balance
See Also:
:meth:`simple_balance()`
]
variable[balances] assign[=] <ast.ListComp object at 0x7da20c76e650>
return[call[name[sum], parameter[name[balances], call[name[Balance], parameter[]]]]] | keyword[def] identifier[balance] ( identifier[self] , identifier[as_of] = keyword[None] , identifier[raw] = keyword[False] , identifier[leg_query] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[balances] =[
identifier[account] . identifier[simple_balance] ( identifier[as_of] = identifier[as_of] , identifier[raw] = identifier[raw] , identifier[leg_query] = identifier[leg_query] ,** identifier[kwargs] )
keyword[for] identifier[account] keyword[in] identifier[self] . identifier[get_descendants] ( identifier[include_self] = keyword[True] )
]
keyword[return] identifier[sum] ( identifier[balances] , identifier[Balance] ()) | def balance(self, as_of=None, raw=False, leg_query=None, **kwargs):
"""Get the balance for this account, including child accounts
Args:
as_of (Date): Only include transactions on or before this date
raw (bool): If true the returned balance should not have its sign
adjusted for display purposes.
kwargs (dict): Will be used to filter the transaction legs
Returns:
Balance
See Also:
:meth:`simple_balance()`
"""
balances = [account.simple_balance(as_of=as_of, raw=raw, leg_query=leg_query, **kwargs) for account in self.get_descendants(include_self=True)]
return sum(balances, Balance()) |
def match(self, category, pattern):
"""Match the category."""
return fnmatch.fnmatch(category, pattern, flags=self.FNMATCH_FLAGS) | def function[match, parameter[self, category, pattern]]:
constant[Match the category.]
return[call[name[fnmatch].fnmatch, parameter[name[category], name[pattern]]]] | keyword[def] identifier[match] ( identifier[self] , identifier[category] , identifier[pattern] ):
literal[string]
keyword[return] identifier[fnmatch] . identifier[fnmatch] ( identifier[category] , identifier[pattern] , identifier[flags] = identifier[self] . identifier[FNMATCH_FLAGS] ) | def match(self, category, pattern):
"""Match the category."""
return fnmatch.fnmatch(category, pattern, flags=self.FNMATCH_FLAGS) |
def uriunsplit(uri):
"""
Reverse of urisplit()
>>> uriunsplit(('scheme','authority','path','query','fragment'))
"scheme://authority/path?query#fragment"
"""
(scheme, authority, path, query, fragment) = uri
result = ''
if scheme:
result += scheme + ':'
if authority:
result += '//' + authority
if path:
result += path
if query:
result += '?' + query
if fragment:
result += '#' + fragment
return result | def function[uriunsplit, parameter[uri]]:
constant[
Reverse of urisplit()
>>> uriunsplit(('scheme','authority','path','query','fragment'))
"scheme://authority/path?query#fragment"
]
<ast.Tuple object at 0x7da18f00e200> assign[=] name[uri]
variable[result] assign[=] constant[]
if name[scheme] begin[:]
<ast.AugAssign object at 0x7da18f00ea70>
if name[authority] begin[:]
<ast.AugAssign object at 0x7da18f00fc40>
if name[path] begin[:]
<ast.AugAssign object at 0x7da18f00d1b0>
if name[query] begin[:]
<ast.AugAssign object at 0x7da18f00dc30>
if name[fragment] begin[:]
<ast.AugAssign object at 0x7da18f00e290>
return[name[result]] | keyword[def] identifier[uriunsplit] ( identifier[uri] ):
literal[string]
( identifier[scheme] , identifier[authority] , identifier[path] , identifier[query] , identifier[fragment] )= identifier[uri]
identifier[result] = literal[string]
keyword[if] identifier[scheme] :
identifier[result] += identifier[scheme] + literal[string]
keyword[if] identifier[authority] :
identifier[result] += literal[string] + identifier[authority]
keyword[if] identifier[path] :
identifier[result] += identifier[path]
keyword[if] identifier[query] :
identifier[result] += literal[string] + identifier[query]
keyword[if] identifier[fragment] :
identifier[result] += literal[string] + identifier[fragment]
keyword[return] identifier[result] | def uriunsplit(uri):
"""
Reverse of urisplit()
>>> uriunsplit(('scheme','authority','path','query','fragment'))
"scheme://authority/path?query#fragment"
"""
(scheme, authority, path, query, fragment) = uri
result = ''
if scheme:
result += scheme + ':' # depends on [control=['if'], data=[]]
if authority:
result += '//' + authority # depends on [control=['if'], data=[]]
if path:
result += path # depends on [control=['if'], data=[]]
if query:
result += '?' + query # depends on [control=['if'], data=[]]
if fragment:
result += '#' + fragment # depends on [control=['if'], data=[]]
return result |
def red_ext(request, message=None):
'''
The external landing.
Also a convenience function for redirecting users who don't have site access to the external page.
Parameters:
request - the request in the calling function
message - a message from the caller function
'''
if message:
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(reverse('external')) | def function[red_ext, parameter[request, message]]:
constant[
The external landing.
Also a convenience function for redirecting users who don't have site access to the external page.
Parameters:
request - the request in the calling function
message - a message from the caller function
]
if name[message] begin[:]
call[name[messages].add_message, parameter[name[request], name[messages].ERROR, name[message]]]
return[call[name[HttpResponseRedirect], parameter[call[name[reverse], parameter[constant[external]]]]]] | keyword[def] identifier[red_ext] ( identifier[request] , identifier[message] = keyword[None] ):
literal[string]
keyword[if] identifier[message] :
identifier[messages] . identifier[add_message] ( identifier[request] , identifier[messages] . identifier[ERROR] , identifier[message] )
keyword[return] identifier[HttpResponseRedirect] ( identifier[reverse] ( literal[string] )) | def red_ext(request, message=None):
"""
The external landing.
Also a convenience function for redirecting users who don't have site access to the external page.
Parameters:
request - the request in the calling function
message - a message from the caller function
"""
if message:
messages.add_message(request, messages.ERROR, message) # depends on [control=['if'], data=[]]
return HttpResponseRedirect(reverse('external')) |
def _reqs(self, tag):
""" Grab all the pull requests """
return [
(tag, i) for i in
self.client.get_pulls(*tag.split('/'))
] | def function[_reqs, parameter[self, tag]]:
constant[ Grab all the pull requests ]
return[<ast.ListComp object at 0x7da1b025aa40>] | keyword[def] identifier[_reqs] ( identifier[self] , identifier[tag] ):
literal[string]
keyword[return] [
( identifier[tag] , identifier[i] ) keyword[for] identifier[i] keyword[in]
identifier[self] . identifier[client] . identifier[get_pulls] (* identifier[tag] . identifier[split] ( literal[string] ))
] | def _reqs(self, tag):
""" Grab all the pull requests """
return [(tag, i) for i in self.client.get_pulls(*tag.split('/'))] |
def _find_first_transactions(
transactions,
customer_id_col,
datetime_col,
monetary_value_col=None,
datetime_format=None,
observation_period_end=None,
freq="D",
):
"""
Return dataframe with first transactions.
This takes a DataFrame of transaction data of the form:
customer_id, datetime [, monetary_value]
and appends a column named 'repeated' to the transaction log which indicates which rows
are repeated transactions for that customer_id.
Parameters
----------
transactions: :obj: DataFrame
a Pandas DataFrame that contains the customer_id col and the datetime col.
customer_id_col: string
the column in transactions DataFrame that denotes the customer_id
datetime_col: string
the column in transactions that denotes the datetime the purchase was made.
monetary_value_col: string, optional
the column in transactions that denotes the monetary value of the transaction.
Optional, only needed for customer lifetime value estimation models.
observation_period_end: :obj: datetime
a string or datetime to denote the final date of the study.
Events after this date are truncated. If not given, defaults to the max 'datetime_col'.
datetime_format: string, optional
a string that represents the timestamp format. Useful if Pandas can't understand
the provided format.
freq: string, optional
Default 'D' for days, 'W' for weeks, 'M' for months... etc. Full list here:
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#dateoffset-objects
"""
if observation_period_end is None:
observation_period_end = transactions[datetime_col].max()
if type(observation_period_end) == pd.Period:
observation_period_end = observation_period_end.to_timestamp()
select_columns = [customer_id_col, datetime_col]
if monetary_value_col:
select_columns.append(monetary_value_col)
transactions = transactions[select_columns].sort_values(select_columns).copy()
# make sure the date column uses datetime objects, and use Pandas' DateTimeIndex.to_period()
# to convert the column to a PeriodIndex which is useful for time-wise grouping and truncating
transactions[datetime_col] = pd.to_datetime(transactions[datetime_col], format=datetime_format)
transactions = transactions.set_index(datetime_col).to_period(freq).to_timestamp()
transactions = transactions.loc[(transactions.index <= observation_period_end)].reset_index()
period_groupby = transactions.groupby([datetime_col, customer_id_col], sort=False, as_index=False)
if monetary_value_col:
# when we have a monetary column, make sure to sum together any values in the same period
period_transactions = period_groupby.sum()
else:
# by calling head() on the groupby object, the datetime_col and customer_id_col columns
# will be reduced
period_transactions = period_groupby.head(1)
# initialize a new column where we will indicate which are the first transactions
period_transactions["first"] = False
# find all of the initial transactions and store as an index
first_transactions = period_transactions.groupby(customer_id_col, sort=True, as_index=False).head(1).index
# mark the initial transactions as True
period_transactions.loc[first_transactions, "first"] = True
select_columns.append("first")
# reset datetime_col to period
period_transactions[datetime_col] = pd.Index(period_transactions[datetime_col]).to_period(freq)
return period_transactions[select_columns] | def function[_find_first_transactions, parameter[transactions, customer_id_col, datetime_col, monetary_value_col, datetime_format, observation_period_end, freq]]:
constant[
Return dataframe with first transactions.
This takes a DataFrame of transaction data of the form:
customer_id, datetime [, monetary_value]
and appends a column named 'repeated' to the transaction log which indicates which rows
are repeated transactions for that customer_id.
Parameters
----------
transactions: :obj: DataFrame
a Pandas DataFrame that contains the customer_id col and the datetime col.
customer_id_col: string
the column in transactions DataFrame that denotes the customer_id
datetime_col: string
the column in transactions that denotes the datetime the purchase was made.
monetary_value_col: string, optional
the column in transactions that denotes the monetary value of the transaction.
Optional, only needed for customer lifetime value estimation models.
observation_period_end: :obj: datetime
a string or datetime to denote the final date of the study.
Events after this date are truncated. If not given, defaults to the max 'datetime_col'.
datetime_format: string, optional
a string that represents the timestamp format. Useful if Pandas can't understand
the provided format.
freq: string, optional
Default 'D' for days, 'W' for weeks, 'M' for months... etc. Full list here:
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#dateoffset-objects
]
if compare[name[observation_period_end] is constant[None]] begin[:]
variable[observation_period_end] assign[=] call[call[name[transactions]][name[datetime_col]].max, parameter[]]
if compare[call[name[type], parameter[name[observation_period_end]]] equal[==] name[pd].Period] begin[:]
variable[observation_period_end] assign[=] call[name[observation_period_end].to_timestamp, parameter[]]
variable[select_columns] assign[=] list[[<ast.Name object at 0x7da1b1d8b7c0>, <ast.Name object at 0x7da1b1d8b790>]]
if name[monetary_value_col] begin[:]
call[name[select_columns].append, parameter[name[monetary_value_col]]]
variable[transactions] assign[=] call[call[call[name[transactions]][name[select_columns]].sort_values, parameter[name[select_columns]]].copy, parameter[]]
call[name[transactions]][name[datetime_col]] assign[=] call[name[pd].to_datetime, parameter[call[name[transactions]][name[datetime_col]]]]
variable[transactions] assign[=] call[call[call[name[transactions].set_index, parameter[name[datetime_col]]].to_period, parameter[name[freq]]].to_timestamp, parameter[]]
variable[transactions] assign[=] call[call[name[transactions].loc][compare[name[transactions].index less_or_equal[<=] name[observation_period_end]]].reset_index, parameter[]]
variable[period_groupby] assign[=] call[name[transactions].groupby, parameter[list[[<ast.Name object at 0x7da1b23885e0>, <ast.Name object at 0x7da1b2388670>]]]]
if name[monetary_value_col] begin[:]
variable[period_transactions] assign[=] call[name[period_groupby].sum, parameter[]]
call[name[period_transactions]][constant[first]] assign[=] constant[False]
variable[first_transactions] assign[=] call[call[name[period_transactions].groupby, parameter[name[customer_id_col]]].head, parameter[constant[1]]].index
call[name[period_transactions].loc][tuple[[<ast.Name object at 0x7da1b22b8e50>, <ast.Constant object at 0x7da1b22bb0a0>]]] assign[=] constant[True]
call[name[select_columns].append, parameter[constant[first]]]
call[name[period_transactions]][name[datetime_col]] assign[=] call[call[name[pd].Index, parameter[call[name[period_transactions]][name[datetime_col]]]].to_period, parameter[name[freq]]]
return[call[name[period_transactions]][name[select_columns]]] | keyword[def] identifier[_find_first_transactions] (
identifier[transactions] ,
identifier[customer_id_col] ,
identifier[datetime_col] ,
identifier[monetary_value_col] = keyword[None] ,
identifier[datetime_format] = keyword[None] ,
identifier[observation_period_end] = keyword[None] ,
identifier[freq] = literal[string] ,
):
literal[string]
keyword[if] identifier[observation_period_end] keyword[is] keyword[None] :
identifier[observation_period_end] = identifier[transactions] [ identifier[datetime_col] ]. identifier[max] ()
keyword[if] identifier[type] ( identifier[observation_period_end] )== identifier[pd] . identifier[Period] :
identifier[observation_period_end] = identifier[observation_period_end] . identifier[to_timestamp] ()
identifier[select_columns] =[ identifier[customer_id_col] , identifier[datetime_col] ]
keyword[if] identifier[monetary_value_col] :
identifier[select_columns] . identifier[append] ( identifier[monetary_value_col] )
identifier[transactions] = identifier[transactions] [ identifier[select_columns] ]. identifier[sort_values] ( identifier[select_columns] ). identifier[copy] ()
identifier[transactions] [ identifier[datetime_col] ]= identifier[pd] . identifier[to_datetime] ( identifier[transactions] [ identifier[datetime_col] ], identifier[format] = identifier[datetime_format] )
identifier[transactions] = identifier[transactions] . identifier[set_index] ( identifier[datetime_col] ). identifier[to_period] ( identifier[freq] ). identifier[to_timestamp] ()
identifier[transactions] = identifier[transactions] . identifier[loc] [( identifier[transactions] . identifier[index] <= identifier[observation_period_end] )]. identifier[reset_index] ()
identifier[period_groupby] = identifier[transactions] . identifier[groupby] ([ identifier[datetime_col] , identifier[customer_id_col] ], identifier[sort] = keyword[False] , identifier[as_index] = keyword[False] )
keyword[if] identifier[monetary_value_col] :
identifier[period_transactions] = identifier[period_groupby] . identifier[sum] ()
keyword[else] :
identifier[period_transactions] = identifier[period_groupby] . identifier[head] ( literal[int] )
identifier[period_transactions] [ literal[string] ]= keyword[False]
identifier[first_transactions] = identifier[period_transactions] . identifier[groupby] ( identifier[customer_id_col] , identifier[sort] = keyword[True] , identifier[as_index] = keyword[False] ). identifier[head] ( literal[int] ). identifier[index]
identifier[period_transactions] . identifier[loc] [ identifier[first_transactions] , literal[string] ]= keyword[True]
identifier[select_columns] . identifier[append] ( literal[string] )
identifier[period_transactions] [ identifier[datetime_col] ]= identifier[pd] . identifier[Index] ( identifier[period_transactions] [ identifier[datetime_col] ]). identifier[to_period] ( identifier[freq] )
keyword[return] identifier[period_transactions] [ identifier[select_columns] ] | def _find_first_transactions(transactions, customer_id_col, datetime_col, monetary_value_col=None, datetime_format=None, observation_period_end=None, freq='D'):
"""
Return dataframe with first transactions.
This takes a DataFrame of transaction data of the form:
customer_id, datetime [, monetary_value]
and appends a column named 'repeated' to the transaction log which indicates which rows
are repeated transactions for that customer_id.
Parameters
----------
transactions: :obj: DataFrame
a Pandas DataFrame that contains the customer_id col and the datetime col.
customer_id_col: string
the column in transactions DataFrame that denotes the customer_id
datetime_col: string
the column in transactions that denotes the datetime the purchase was made.
monetary_value_col: string, optional
the column in transactions that denotes the monetary value of the transaction.
Optional, only needed for customer lifetime value estimation models.
observation_period_end: :obj: datetime
a string or datetime to denote the final date of the study.
Events after this date are truncated. If not given, defaults to the max 'datetime_col'.
datetime_format: string, optional
a string that represents the timestamp format. Useful if Pandas can't understand
the provided format.
freq: string, optional
Default 'D' for days, 'W' for weeks, 'M' for months... etc. Full list here:
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#dateoffset-objects
"""
if observation_period_end is None:
observation_period_end = transactions[datetime_col].max() # depends on [control=['if'], data=['observation_period_end']]
if type(observation_period_end) == pd.Period:
observation_period_end = observation_period_end.to_timestamp() # depends on [control=['if'], data=[]]
select_columns = [customer_id_col, datetime_col]
if monetary_value_col:
select_columns.append(monetary_value_col) # depends on [control=['if'], data=[]]
transactions = transactions[select_columns].sort_values(select_columns).copy()
# make sure the date column uses datetime objects, and use Pandas' DateTimeIndex.to_period()
# to convert the column to a PeriodIndex which is useful for time-wise grouping and truncating
transactions[datetime_col] = pd.to_datetime(transactions[datetime_col], format=datetime_format)
transactions = transactions.set_index(datetime_col).to_period(freq).to_timestamp()
transactions = transactions.loc[transactions.index <= observation_period_end].reset_index()
period_groupby = transactions.groupby([datetime_col, customer_id_col], sort=False, as_index=False)
if monetary_value_col:
# when we have a monetary column, make sure to sum together any values in the same period
period_transactions = period_groupby.sum() # depends on [control=['if'], data=[]]
else:
# by calling head() on the groupby object, the datetime_col and customer_id_col columns
# will be reduced
period_transactions = period_groupby.head(1)
# initialize a new column where we will indicate which are the first transactions
period_transactions['first'] = False
# find all of the initial transactions and store as an index
first_transactions = period_transactions.groupby(customer_id_col, sort=True, as_index=False).head(1).index
# mark the initial transactions as True
period_transactions.loc[first_transactions, 'first'] = True
select_columns.append('first')
# reset datetime_col to period
period_transactions[datetime_col] = pd.Index(period_transactions[datetime_col]).to_period(freq)
return period_transactions[select_columns] |
def print_diff(self, summary1=None, summary2=None):
"""Compute diff between to summaries and print it.
If no summary is provided, the diff from the last to the current
summary is used. If summary1 is provided the diff from summary1
to the current summary is used. If summary1 and summary2 are
provided, the diff between these two is used.
"""
summary.print_(self.diff(summary1=summary1, summary2=summary2)) | def function[print_diff, parameter[self, summary1, summary2]]:
constant[Compute diff between to summaries and print it.
If no summary is provided, the diff from the last to the current
summary is used. If summary1 is provided the diff from summary1
to the current summary is used. If summary1 and summary2 are
provided, the diff between these two is used.
]
call[name[summary].print_, parameter[call[name[self].diff, parameter[]]]] | keyword[def] identifier[print_diff] ( identifier[self] , identifier[summary1] = keyword[None] , identifier[summary2] = keyword[None] ):
literal[string]
identifier[summary] . identifier[print_] ( identifier[self] . identifier[diff] ( identifier[summary1] = identifier[summary1] , identifier[summary2] = identifier[summary2] )) | def print_diff(self, summary1=None, summary2=None):
"""Compute diff between to summaries and print it.
If no summary is provided, the diff from the last to the current
summary is used. If summary1 is provided the diff from summary1
to the current summary is used. If summary1 and summary2 are
provided, the diff between these two is used.
"""
summary.print_(self.diff(summary1=summary1, summary2=summary2)) |
def diff_dumps(ih1, ih2, tofile=None, name1="a", name2="b", n_context=3):
"""Diff 2 IntelHex objects and produce unified diff output for their
hex dumps.
@param ih1 first IntelHex object to compare
@param ih2 second IntelHex object to compare
@param tofile file-like object to write output
@param name1 name of the first hex file to show in the diff header
@param name2 name of the first hex file to show in the diff header
@param n_context number of context lines in the unidiff output
"""
def prepare_lines(ih):
sio = StringIO()
ih.dump(sio)
dump = sio.getvalue()
lines = dump.splitlines()
return lines
a = prepare_lines(ih1)
b = prepare_lines(ih2)
import difflib
result = list(difflib.unified_diff(a, b, fromfile=name1, tofile=name2, n=n_context, lineterm=''))
if tofile is None:
tofile = sys.stdout
output = '\n'.join(result)+'\n'
tofile.write(output) | def function[diff_dumps, parameter[ih1, ih2, tofile, name1, name2, n_context]]:
constant[Diff 2 IntelHex objects and produce unified diff output for their
hex dumps.
@param ih1 first IntelHex object to compare
@param ih2 second IntelHex object to compare
@param tofile file-like object to write output
@param name1 name of the first hex file to show in the diff header
@param name2 name of the first hex file to show in the diff header
@param n_context number of context lines in the unidiff output
]
def function[prepare_lines, parameter[ih]]:
variable[sio] assign[=] call[name[StringIO], parameter[]]
call[name[ih].dump, parameter[name[sio]]]
variable[dump] assign[=] call[name[sio].getvalue, parameter[]]
variable[lines] assign[=] call[name[dump].splitlines, parameter[]]
return[name[lines]]
variable[a] assign[=] call[name[prepare_lines], parameter[name[ih1]]]
variable[b] assign[=] call[name[prepare_lines], parameter[name[ih2]]]
import module[difflib]
variable[result] assign[=] call[name[list], parameter[call[name[difflib].unified_diff, parameter[name[a], name[b]]]]]
if compare[name[tofile] is constant[None]] begin[:]
variable[tofile] assign[=] name[sys].stdout
variable[output] assign[=] binary_operation[call[constant[
].join, parameter[name[result]]] + constant[
]]
call[name[tofile].write, parameter[name[output]]] | keyword[def] identifier[diff_dumps] ( identifier[ih1] , identifier[ih2] , identifier[tofile] = keyword[None] , identifier[name1] = literal[string] , identifier[name2] = literal[string] , identifier[n_context] = literal[int] ):
literal[string]
keyword[def] identifier[prepare_lines] ( identifier[ih] ):
identifier[sio] = identifier[StringIO] ()
identifier[ih] . identifier[dump] ( identifier[sio] )
identifier[dump] = identifier[sio] . identifier[getvalue] ()
identifier[lines] = identifier[dump] . identifier[splitlines] ()
keyword[return] identifier[lines]
identifier[a] = identifier[prepare_lines] ( identifier[ih1] )
identifier[b] = identifier[prepare_lines] ( identifier[ih2] )
keyword[import] identifier[difflib]
identifier[result] = identifier[list] ( identifier[difflib] . identifier[unified_diff] ( identifier[a] , identifier[b] , identifier[fromfile] = identifier[name1] , identifier[tofile] = identifier[name2] , identifier[n] = identifier[n_context] , identifier[lineterm] = literal[string] ))
keyword[if] identifier[tofile] keyword[is] keyword[None] :
identifier[tofile] = identifier[sys] . identifier[stdout]
identifier[output] = literal[string] . identifier[join] ( identifier[result] )+ literal[string]
identifier[tofile] . identifier[write] ( identifier[output] ) | def diff_dumps(ih1, ih2, tofile=None, name1='a', name2='b', n_context=3):
"""Diff 2 IntelHex objects and produce unified diff output for their
hex dumps.
@param ih1 first IntelHex object to compare
@param ih2 second IntelHex object to compare
@param tofile file-like object to write output
@param name1 name of the first hex file to show in the diff header
@param name2 name of the first hex file to show in the diff header
@param n_context number of context lines in the unidiff output
"""
def prepare_lines(ih):
sio = StringIO()
ih.dump(sio)
dump = sio.getvalue()
lines = dump.splitlines()
return lines
a = prepare_lines(ih1)
b = prepare_lines(ih2)
import difflib
result = list(difflib.unified_diff(a, b, fromfile=name1, tofile=name2, n=n_context, lineterm=''))
if tofile is None:
tofile = sys.stdout # depends on [control=['if'], data=['tofile']]
output = '\n'.join(result) + '\n'
tofile.write(output) |
def delete_file(self, project_name, remote_path):
"""
Delete a file or folder from a project
:param project_name: str: name of the project containing a file we will delete
:param remote_path: str: remote path specifying file to delete
"""
project = self._get_or_create_project(project_name)
remote_file = project.get_child_for_path(remote_path)
remote_file.delete() | def function[delete_file, parameter[self, project_name, remote_path]]:
constant[
Delete a file or folder from a project
:param project_name: str: name of the project containing a file we will delete
:param remote_path: str: remote path specifying file to delete
]
variable[project] assign[=] call[name[self]._get_or_create_project, parameter[name[project_name]]]
variable[remote_file] assign[=] call[name[project].get_child_for_path, parameter[name[remote_path]]]
call[name[remote_file].delete, parameter[]] | keyword[def] identifier[delete_file] ( identifier[self] , identifier[project_name] , identifier[remote_path] ):
literal[string]
identifier[project] = identifier[self] . identifier[_get_or_create_project] ( identifier[project_name] )
identifier[remote_file] = identifier[project] . identifier[get_child_for_path] ( identifier[remote_path] )
identifier[remote_file] . identifier[delete] () | def delete_file(self, project_name, remote_path):
"""
Delete a file or folder from a project
:param project_name: str: name of the project containing a file we will delete
:param remote_path: str: remote path specifying file to delete
"""
project = self._get_or_create_project(project_name)
remote_file = project.get_child_for_path(remote_path)
remote_file.delete() |
def can_infect(self, event):
"""
Whether the spreading stop can infect using this event.
"""
if event.from_stop_I != self.stop_I:
return False
if not self.has_been_visited():
return False
else:
time_sep = event.dep_time_ut-self.get_min_visit_time()
# if the gap between the earliest visit_time and current time is
# smaller than the min. transfer time, the stop can pass the spreading
# forward
if (time_sep >= self.min_transfer_time) or (event.trip_I == -1 and time_sep >= 0):
return True
else:
for visit in self.visit_events:
# if no transfer, please hop-on
if (event.trip_I == visit.trip_I) and (time_sep >= 0):
return True
return False | def function[can_infect, parameter[self, event]]:
constant[
Whether the spreading stop can infect using this event.
]
if compare[name[event].from_stop_I not_equal[!=] name[self].stop_I] begin[:]
return[constant[False]]
if <ast.UnaryOp object at 0x7da1b0140070> begin[:]
return[constant[False]] | keyword[def] identifier[can_infect] ( identifier[self] , identifier[event] ):
literal[string]
keyword[if] identifier[event] . identifier[from_stop_I] != identifier[self] . identifier[stop_I] :
keyword[return] keyword[False]
keyword[if] keyword[not] identifier[self] . identifier[has_been_visited] ():
keyword[return] keyword[False]
keyword[else] :
identifier[time_sep] = identifier[event] . identifier[dep_time_ut] - identifier[self] . identifier[get_min_visit_time] ()
keyword[if] ( identifier[time_sep] >= identifier[self] . identifier[min_transfer_time] ) keyword[or] ( identifier[event] . identifier[trip_I] ==- literal[int] keyword[and] identifier[time_sep] >= literal[int] ):
keyword[return] keyword[True]
keyword[else] :
keyword[for] identifier[visit] keyword[in] identifier[self] . identifier[visit_events] :
keyword[if] ( identifier[event] . identifier[trip_I] == identifier[visit] . identifier[trip_I] ) keyword[and] ( identifier[time_sep] >= literal[int] ):
keyword[return] keyword[True]
keyword[return] keyword[False] | def can_infect(self, event):
"""
Whether the spreading stop can infect using this event.
"""
if event.from_stop_I != self.stop_I:
return False # depends on [control=['if'], data=[]]
if not self.has_been_visited():
return False # depends on [control=['if'], data=[]]
else:
time_sep = event.dep_time_ut - self.get_min_visit_time()
# if the gap between the earliest visit_time and current time is
# smaller than the min. transfer time, the stop can pass the spreading
# forward
if time_sep >= self.min_transfer_time or (event.trip_I == -1 and time_sep >= 0):
return True # depends on [control=['if'], data=[]]
else:
for visit in self.visit_events:
# if no transfer, please hop-on
if event.trip_I == visit.trip_I and time_sep >= 0:
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['visit']]
return False |
def create(name):
'''
Create a basic chroot environment.
Note that this environment is not functional. The caller needs to
install the minimal required binaries, including Python if
chroot.call is called.
name
Path to the chroot environment
CLI Example:
.. code-block:: bash
salt myminion chroot.create /chroot
'''
if not exist(name):
dev = os.path.join(name, 'dev')
proc = os.path.join(name, 'proc')
try:
os.makedirs(dev, mode=0o755)
os.makedirs(proc, mode=0o555)
except OSError as e:
log.error('Error when trying to create chroot directories: %s', e)
return False
return True | def function[create, parameter[name]]:
constant[
Create a basic chroot environment.
Note that this environment is not functional. The caller needs to
install the minimal required binaries, including Python if
chroot.call is called.
name
Path to the chroot environment
CLI Example:
.. code-block:: bash
salt myminion chroot.create /chroot
]
if <ast.UnaryOp object at 0x7da1b1c15e40> begin[:]
variable[dev] assign[=] call[name[os].path.join, parameter[name[name], constant[dev]]]
variable[proc] assign[=] call[name[os].path.join, parameter[name[name], constant[proc]]]
<ast.Try object at 0x7da1b1c16710>
return[constant[True]] | keyword[def] identifier[create] ( identifier[name] ):
literal[string]
keyword[if] keyword[not] identifier[exist] ( identifier[name] ):
identifier[dev] = identifier[os] . identifier[path] . identifier[join] ( identifier[name] , literal[string] )
identifier[proc] = identifier[os] . identifier[path] . identifier[join] ( identifier[name] , literal[string] )
keyword[try] :
identifier[os] . identifier[makedirs] ( identifier[dev] , identifier[mode] = literal[int] )
identifier[os] . identifier[makedirs] ( identifier[proc] , identifier[mode] = literal[int] )
keyword[except] identifier[OSError] keyword[as] identifier[e] :
identifier[log] . identifier[error] ( literal[string] , identifier[e] )
keyword[return] keyword[False]
keyword[return] keyword[True] | def create(name):
"""
Create a basic chroot environment.
Note that this environment is not functional. The caller needs to
install the minimal required binaries, including Python if
chroot.call is called.
name
Path to the chroot environment
CLI Example:
.. code-block:: bash
salt myminion chroot.create /chroot
"""
if not exist(name):
dev = os.path.join(name, 'dev')
proc = os.path.join(name, 'proc')
try:
os.makedirs(dev, mode=493)
os.makedirs(proc, mode=365) # depends on [control=['try'], data=[]]
except OSError as e:
log.error('Error when trying to create chroot directories: %s', e)
return False # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
return True |
def _storestr(ins):
""" Stores a string value into a memory address.
It copies content of 2nd operand (string), into 1st, reallocating
dynamic memory for the 1st str. These instruction DOES ALLOW
inmediate strings for the 2nd parameter, starting with '#'.
Must prepend '#' (immediate sigil) to 1st operand, as we need
the & address of the destination.
"""
op1 = ins.quad[1]
indirect = op1[0] == '*'
if indirect:
op1 = op1[1:]
immediate = op1[0] == '#'
if immediate and not indirect:
raise InvalidIC('storestr does not allow immediate destination', ins.quad)
if not indirect:
op1 = '#' + op1
tmp1, tmp2, output = _str_oper(op1, ins.quad[2], no_exaf=True)
if not tmp2:
output.append('call __STORE_STR')
REQUIRES.add('storestr.asm')
else:
output.append('call __STORE_STR2')
REQUIRES.add('storestr2.asm')
return output | def function[_storestr, parameter[ins]]:
constant[ Stores a string value into a memory address.
It copies content of 2nd operand (string), into 1st, reallocating
dynamic memory for the 1st str. These instruction DOES ALLOW
inmediate strings for the 2nd parameter, starting with '#'.
Must prepend '#' (immediate sigil) to 1st operand, as we need
the & address of the destination.
]
variable[op1] assign[=] call[name[ins].quad][constant[1]]
variable[indirect] assign[=] compare[call[name[op1]][constant[0]] equal[==] constant[*]]
if name[indirect] begin[:]
variable[op1] assign[=] call[name[op1]][<ast.Slice object at 0x7da20eb2ada0>]
variable[immediate] assign[=] compare[call[name[op1]][constant[0]] equal[==] constant[#]]
if <ast.BoolOp object at 0x7da20eb2ba60> begin[:]
<ast.Raise object at 0x7da20eb29cc0>
if <ast.UnaryOp object at 0x7da20c6aa320> begin[:]
variable[op1] assign[=] binary_operation[constant[#] + name[op1]]
<ast.Tuple object at 0x7da18bcc9600> assign[=] call[name[_str_oper], parameter[name[op1], call[name[ins].quad][constant[2]]]]
if <ast.UnaryOp object at 0x7da18bcc9c60> begin[:]
call[name[output].append, parameter[constant[call __STORE_STR]]]
call[name[REQUIRES].add, parameter[constant[storestr.asm]]]
return[name[output]] | keyword[def] identifier[_storestr] ( identifier[ins] ):
literal[string]
identifier[op1] = identifier[ins] . identifier[quad] [ literal[int] ]
identifier[indirect] = identifier[op1] [ literal[int] ]== literal[string]
keyword[if] identifier[indirect] :
identifier[op1] = identifier[op1] [ literal[int] :]
identifier[immediate] = identifier[op1] [ literal[int] ]== literal[string]
keyword[if] identifier[immediate] keyword[and] keyword[not] identifier[indirect] :
keyword[raise] identifier[InvalidIC] ( literal[string] , identifier[ins] . identifier[quad] )
keyword[if] keyword[not] identifier[indirect] :
identifier[op1] = literal[string] + identifier[op1]
identifier[tmp1] , identifier[tmp2] , identifier[output] = identifier[_str_oper] ( identifier[op1] , identifier[ins] . identifier[quad] [ literal[int] ], identifier[no_exaf] = keyword[True] )
keyword[if] keyword[not] identifier[tmp2] :
identifier[output] . identifier[append] ( literal[string] )
identifier[REQUIRES] . identifier[add] ( literal[string] )
keyword[else] :
identifier[output] . identifier[append] ( literal[string] )
identifier[REQUIRES] . identifier[add] ( literal[string] )
keyword[return] identifier[output] | def _storestr(ins):
""" Stores a string value into a memory address.
It copies content of 2nd operand (string), into 1st, reallocating
dynamic memory for the 1st str. These instruction DOES ALLOW
inmediate strings for the 2nd parameter, starting with '#'.
Must prepend '#' (immediate sigil) to 1st operand, as we need
the & address of the destination.
"""
op1 = ins.quad[1]
indirect = op1[0] == '*'
if indirect:
op1 = op1[1:] # depends on [control=['if'], data=[]]
immediate = op1[0] == '#'
if immediate and (not indirect):
raise InvalidIC('storestr does not allow immediate destination', ins.quad) # depends on [control=['if'], data=[]]
if not indirect:
op1 = '#' + op1 # depends on [control=['if'], data=[]]
(tmp1, tmp2, output) = _str_oper(op1, ins.quad[2], no_exaf=True)
if not tmp2:
output.append('call __STORE_STR')
REQUIRES.add('storestr.asm') # depends on [control=['if'], data=[]]
else:
output.append('call __STORE_STR2')
REQUIRES.add('storestr2.asm')
return output |
def view_show(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/views#show-view"
api_path = "/api/v2/views/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) | def function[view_show, parameter[self, id]]:
constant[https://developer.zendesk.com/rest_api/docs/core/views#show-view]
variable[api_path] assign[=] constant[/api/v2/views/{id}.json]
variable[api_path] assign[=] call[name[api_path].format, parameter[]]
return[call[name[self].call, parameter[name[api_path]]]] | keyword[def] identifier[view_show] ( identifier[self] , identifier[id] ,** identifier[kwargs] ):
literal[string]
identifier[api_path] = literal[string]
identifier[api_path] = identifier[api_path] . identifier[format] ( identifier[id] = identifier[id] )
keyword[return] identifier[self] . identifier[call] ( identifier[api_path] ,** identifier[kwargs] ) | def view_show(self, id, **kwargs):
"""https://developer.zendesk.com/rest_api/docs/core/views#show-view"""
api_path = '/api/v2/views/{id}.json'
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) |
def plot_graph_route(G, route, bbox=None, fig_height=6, fig_width=None,
margin=0.02, bgcolor='w', axis_off=True, show=True,
save=False, close=True, file_format='png', filename='temp',
dpi=300, annotate=False, node_color='#999999',
node_size=15, node_alpha=1, node_edgecolor='none',
node_zorder=1, edge_color='#999999', edge_linewidth=1,
edge_alpha=1, use_geom=True, origin_point=None,
destination_point=None, route_color='r', route_linewidth=4,
route_alpha=0.5, orig_dest_node_alpha=0.5,
orig_dest_node_size=100, orig_dest_node_color='r',
orig_dest_point_color='b'):
"""
Plot a route along a networkx spatial graph.
Parameters
----------
G : networkx multidigraph
route : list
the route as a list of nodes
bbox : tuple
bounding box as north,south,east,west - if None will calculate from
spatial extents of data. if passing a bbox, you probably also want to
pass margin=0 to constrain it.
fig_height : int
matplotlib figure height in inches
fig_width : int
matplotlib figure width in inches
margin : float
relative margin around the figure
axis_off : bool
if True turn off the matplotlib axis
bgcolor : string
the background color of the figure and axis
show : bool
if True, show the figure
save : bool
if True, save the figure as an image file to disk
close : bool
close the figure (only if show equals False) to prevent display
file_format : string
the format of the file to save (e.g., 'jpg', 'png', 'svg')
filename : string
the name of the file if saving
dpi : int
the resolution of the image file if saving
annotate : bool
if True, annotate the nodes in the figure
node_color : string
the color of the nodes
node_size : int
the size of the nodes
node_alpha : float
the opacity of the nodes
node_edgecolor : string
the color of the node's marker's border
node_zorder : int
zorder to plot nodes, edges are always 2, so make node_zorder 1 to plot
nodes beneath them or 3 to plot nodes atop them
edge_color : string
the color of the edges' lines
edge_linewidth : float
the width of the edges' lines
edge_alpha : float
the opacity of the edges' lines
use_geom : bool
if True, use the spatial geometry attribute of the edges to draw
geographically accurate edges, rather than just lines straight from node
to node
origin_point : tuple
optional, an origin (lat, lon) point to plot instead of the origin node
destination_point : tuple
optional, a destination (lat, lon) point to plot instead of the
destination node
route_color : string
the color of the route
route_linewidth : int
the width of the route line
route_alpha : float
the opacity of the route line
orig_dest_node_alpha : float
the opacity of the origin and destination nodes
orig_dest_node_size : int
the size of the origin and destination nodes
orig_dest_node_color : string
the color of the origin and destination nodes
orig_dest_point_color : string
the color of the origin and destination points if being plotted instead
of nodes
Returns
-------
fig, ax : tuple
"""
# plot the graph but not the route
fig, ax = plot_graph(G, bbox=bbox, fig_height=fig_height, fig_width=fig_width,
margin=margin, axis_off=axis_off, bgcolor=bgcolor,
show=False, save=False, close=False, filename=filename,
dpi=dpi, annotate=annotate, node_color=node_color,
node_size=node_size, node_alpha=node_alpha,
node_edgecolor=node_edgecolor, node_zorder=node_zorder,
edge_color=edge_color, edge_linewidth=edge_linewidth,
edge_alpha=edge_alpha, use_geom=use_geom)
# the origin and destination nodes are the first and last nodes in the route
origin_node = route[0]
destination_node = route[-1]
if origin_point is None or destination_point is None:
# if caller didn't pass points, use the first and last node in route as
# origin/destination
origin_destination_lats = (G.nodes[origin_node]['y'], G.nodes[destination_node]['y'])
origin_destination_lons = (G.nodes[origin_node]['x'], G.nodes[destination_node]['x'])
else:
# otherwise, use the passed points as origin/destination
origin_destination_lats = (origin_point[0], destination_point[0])
origin_destination_lons = (origin_point[1], destination_point[1])
orig_dest_node_color = orig_dest_point_color
# scatter the origin and destination points
ax.scatter(origin_destination_lons, origin_destination_lats, s=orig_dest_node_size,
c=orig_dest_node_color, alpha=orig_dest_node_alpha, edgecolor=node_edgecolor, zorder=4)
# plot the route lines
lines = node_list_to_coordinate_lines(G, route, use_geom)
# add the lines to the axis as a linecollection
lc = LineCollection(lines, colors=route_color, linewidths=route_linewidth, alpha=route_alpha, zorder=3)
ax.add_collection(lc)
# save and show the figure as specified
fig, ax = save_and_show(fig, ax, save, show, close, filename, file_format, dpi, axis_off)
return fig, ax | def function[plot_graph_route, parameter[G, route, bbox, fig_height, fig_width, margin, bgcolor, axis_off, show, save, close, file_format, filename, dpi, annotate, node_color, node_size, node_alpha, node_edgecolor, node_zorder, edge_color, edge_linewidth, edge_alpha, use_geom, origin_point, destination_point, route_color, route_linewidth, route_alpha, orig_dest_node_alpha, orig_dest_node_size, orig_dest_node_color, orig_dest_point_color]]:
constant[
Plot a route along a networkx spatial graph.
Parameters
----------
G : networkx multidigraph
route : list
the route as a list of nodes
bbox : tuple
bounding box as north,south,east,west - if None will calculate from
spatial extents of data. if passing a bbox, you probably also want to
pass margin=0 to constrain it.
fig_height : int
matplotlib figure height in inches
fig_width : int
matplotlib figure width in inches
margin : float
relative margin around the figure
axis_off : bool
if True turn off the matplotlib axis
bgcolor : string
the background color of the figure and axis
show : bool
if True, show the figure
save : bool
if True, save the figure as an image file to disk
close : bool
close the figure (only if show equals False) to prevent display
file_format : string
the format of the file to save (e.g., 'jpg', 'png', 'svg')
filename : string
the name of the file if saving
dpi : int
the resolution of the image file if saving
annotate : bool
if True, annotate the nodes in the figure
node_color : string
the color of the nodes
node_size : int
the size of the nodes
node_alpha : float
the opacity of the nodes
node_edgecolor : string
the color of the node's marker's border
node_zorder : int
zorder to plot nodes, edges are always 2, so make node_zorder 1 to plot
nodes beneath them or 3 to plot nodes atop them
edge_color : string
the color of the edges' lines
edge_linewidth : float
the width of the edges' lines
edge_alpha : float
the opacity of the edges' lines
use_geom : bool
if True, use the spatial geometry attribute of the edges to draw
geographically accurate edges, rather than just lines straight from node
to node
origin_point : tuple
optional, an origin (lat, lon) point to plot instead of the origin node
destination_point : tuple
optional, a destination (lat, lon) point to plot instead of the
destination node
route_color : string
the color of the route
route_linewidth : int
the width of the route line
route_alpha : float
the opacity of the route line
orig_dest_node_alpha : float
the opacity of the origin and destination nodes
orig_dest_node_size : int
the size of the origin and destination nodes
orig_dest_node_color : string
the color of the origin and destination nodes
orig_dest_point_color : string
the color of the origin and destination points if being plotted instead
of nodes
Returns
-------
fig, ax : tuple
]
<ast.Tuple object at 0x7da1b1cee4d0> assign[=] call[name[plot_graph], parameter[name[G]]]
variable[origin_node] assign[=] call[name[route]][constant[0]]
variable[destination_node] assign[=] call[name[route]][<ast.UnaryOp object at 0x7da1b1cee740>]
if <ast.BoolOp object at 0x7da1b1cef3a0> begin[:]
variable[origin_destination_lats] assign[=] tuple[[<ast.Subscript object at 0x7da1b1cef6a0>, <ast.Subscript object at 0x7da1b1ceef20>]]
variable[origin_destination_lons] assign[=] tuple[[<ast.Subscript object at 0x7da1b1cee800>, <ast.Subscript object at 0x7da1b1b5eec0>]]
call[name[ax].scatter, parameter[name[origin_destination_lons], name[origin_destination_lats]]]
variable[lines] assign[=] call[name[node_list_to_coordinate_lines], parameter[name[G], name[route], name[use_geom]]]
variable[lc] assign[=] call[name[LineCollection], parameter[name[lines]]]
call[name[ax].add_collection, parameter[name[lc]]]
<ast.Tuple object at 0x7da1b1b5ef20> assign[=] call[name[save_and_show], parameter[name[fig], name[ax], name[save], name[show], name[close], name[filename], name[file_format], name[dpi], name[axis_off]]]
return[tuple[[<ast.Name object at 0x7da1b1b69cc0>, <ast.Name object at 0x7da1b1b6b370>]]] | keyword[def] identifier[plot_graph_route] ( identifier[G] , identifier[route] , identifier[bbox] = keyword[None] , identifier[fig_height] = literal[int] , identifier[fig_width] = keyword[None] ,
identifier[margin] = literal[int] , identifier[bgcolor] = literal[string] , identifier[axis_off] = keyword[True] , identifier[show] = keyword[True] ,
identifier[save] = keyword[False] , identifier[close] = keyword[True] , identifier[file_format] = literal[string] , identifier[filename] = literal[string] ,
identifier[dpi] = literal[int] , identifier[annotate] = keyword[False] , identifier[node_color] = literal[string] ,
identifier[node_size] = literal[int] , identifier[node_alpha] = literal[int] , identifier[node_edgecolor] = literal[string] ,
identifier[node_zorder] = literal[int] , identifier[edge_color] = literal[string] , identifier[edge_linewidth] = literal[int] ,
identifier[edge_alpha] = literal[int] , identifier[use_geom] = keyword[True] , identifier[origin_point] = keyword[None] ,
identifier[destination_point] = keyword[None] , identifier[route_color] = literal[string] , identifier[route_linewidth] = literal[int] ,
identifier[route_alpha] = literal[int] , identifier[orig_dest_node_alpha] = literal[int] ,
identifier[orig_dest_node_size] = literal[int] , identifier[orig_dest_node_color] = literal[string] ,
identifier[orig_dest_point_color] = literal[string] ):
literal[string]
identifier[fig] , identifier[ax] = identifier[plot_graph] ( identifier[G] , identifier[bbox] = identifier[bbox] , identifier[fig_height] = identifier[fig_height] , identifier[fig_width] = identifier[fig_width] ,
identifier[margin] = identifier[margin] , identifier[axis_off] = identifier[axis_off] , identifier[bgcolor] = identifier[bgcolor] ,
identifier[show] = keyword[False] , identifier[save] = keyword[False] , identifier[close] = keyword[False] , identifier[filename] = identifier[filename] ,
identifier[dpi] = identifier[dpi] , identifier[annotate] = identifier[annotate] , identifier[node_color] = identifier[node_color] ,
identifier[node_size] = identifier[node_size] , identifier[node_alpha] = identifier[node_alpha] ,
identifier[node_edgecolor] = identifier[node_edgecolor] , identifier[node_zorder] = identifier[node_zorder] ,
identifier[edge_color] = identifier[edge_color] , identifier[edge_linewidth] = identifier[edge_linewidth] ,
identifier[edge_alpha] = identifier[edge_alpha] , identifier[use_geom] = identifier[use_geom] )
identifier[origin_node] = identifier[route] [ literal[int] ]
identifier[destination_node] = identifier[route] [- literal[int] ]
keyword[if] identifier[origin_point] keyword[is] keyword[None] keyword[or] identifier[destination_point] keyword[is] keyword[None] :
identifier[origin_destination_lats] =( identifier[G] . identifier[nodes] [ identifier[origin_node] ][ literal[string] ], identifier[G] . identifier[nodes] [ identifier[destination_node] ][ literal[string] ])
identifier[origin_destination_lons] =( identifier[G] . identifier[nodes] [ identifier[origin_node] ][ literal[string] ], identifier[G] . identifier[nodes] [ identifier[destination_node] ][ literal[string] ])
keyword[else] :
identifier[origin_destination_lats] =( identifier[origin_point] [ literal[int] ], identifier[destination_point] [ literal[int] ])
identifier[origin_destination_lons] =( identifier[origin_point] [ literal[int] ], identifier[destination_point] [ literal[int] ])
identifier[orig_dest_node_color] = identifier[orig_dest_point_color]
identifier[ax] . identifier[scatter] ( identifier[origin_destination_lons] , identifier[origin_destination_lats] , identifier[s] = identifier[orig_dest_node_size] ,
identifier[c] = identifier[orig_dest_node_color] , identifier[alpha] = identifier[orig_dest_node_alpha] , identifier[edgecolor] = identifier[node_edgecolor] , identifier[zorder] = literal[int] )
identifier[lines] = identifier[node_list_to_coordinate_lines] ( identifier[G] , identifier[route] , identifier[use_geom] )
identifier[lc] = identifier[LineCollection] ( identifier[lines] , identifier[colors] = identifier[route_color] , identifier[linewidths] = identifier[route_linewidth] , identifier[alpha] = identifier[route_alpha] , identifier[zorder] = literal[int] )
identifier[ax] . identifier[add_collection] ( identifier[lc] )
identifier[fig] , identifier[ax] = identifier[save_and_show] ( identifier[fig] , identifier[ax] , identifier[save] , identifier[show] , identifier[close] , identifier[filename] , identifier[file_format] , identifier[dpi] , identifier[axis_off] )
keyword[return] identifier[fig] , identifier[ax] | def plot_graph_route(G, route, bbox=None, fig_height=6, fig_width=None, margin=0.02, bgcolor='w', axis_off=True, show=True, save=False, close=True, file_format='png', filename='temp', dpi=300, annotate=False, node_color='#999999', node_size=15, node_alpha=1, node_edgecolor='none', node_zorder=1, edge_color='#999999', edge_linewidth=1, edge_alpha=1, use_geom=True, origin_point=None, destination_point=None, route_color='r', route_linewidth=4, route_alpha=0.5, orig_dest_node_alpha=0.5, orig_dest_node_size=100, orig_dest_node_color='r', orig_dest_point_color='b'):
"""
Plot a route along a networkx spatial graph.
Parameters
----------
G : networkx multidigraph
route : list
the route as a list of nodes
bbox : tuple
bounding box as north,south,east,west - if None will calculate from
spatial extents of data. if passing a bbox, you probably also want to
pass margin=0 to constrain it.
fig_height : int
matplotlib figure height in inches
fig_width : int
matplotlib figure width in inches
margin : float
relative margin around the figure
axis_off : bool
if True turn off the matplotlib axis
bgcolor : string
the background color of the figure and axis
show : bool
if True, show the figure
save : bool
if True, save the figure as an image file to disk
close : bool
close the figure (only if show equals False) to prevent display
file_format : string
the format of the file to save (e.g., 'jpg', 'png', 'svg')
filename : string
the name of the file if saving
dpi : int
the resolution of the image file if saving
annotate : bool
if True, annotate the nodes in the figure
node_color : string
the color of the nodes
node_size : int
the size of the nodes
node_alpha : float
the opacity of the nodes
node_edgecolor : string
the color of the node's marker's border
node_zorder : int
zorder to plot nodes, edges are always 2, so make node_zorder 1 to plot
nodes beneath them or 3 to plot nodes atop them
edge_color : string
the color of the edges' lines
edge_linewidth : float
the width of the edges' lines
edge_alpha : float
the opacity of the edges' lines
use_geom : bool
if True, use the spatial geometry attribute of the edges to draw
geographically accurate edges, rather than just lines straight from node
to node
origin_point : tuple
optional, an origin (lat, lon) point to plot instead of the origin node
destination_point : tuple
optional, a destination (lat, lon) point to plot instead of the
destination node
route_color : string
the color of the route
route_linewidth : int
the width of the route line
route_alpha : float
the opacity of the route line
orig_dest_node_alpha : float
the opacity of the origin and destination nodes
orig_dest_node_size : int
the size of the origin and destination nodes
orig_dest_node_color : string
the color of the origin and destination nodes
orig_dest_point_color : string
the color of the origin and destination points if being plotted instead
of nodes
Returns
-------
fig, ax : tuple
"""
# plot the graph but not the route
(fig, ax) = plot_graph(G, bbox=bbox, fig_height=fig_height, fig_width=fig_width, margin=margin, axis_off=axis_off, bgcolor=bgcolor, show=False, save=False, close=False, filename=filename, dpi=dpi, annotate=annotate, node_color=node_color, node_size=node_size, node_alpha=node_alpha, node_edgecolor=node_edgecolor, node_zorder=node_zorder, edge_color=edge_color, edge_linewidth=edge_linewidth, edge_alpha=edge_alpha, use_geom=use_geom)
# the origin and destination nodes are the first and last nodes in the route
origin_node = route[0]
destination_node = route[-1]
if origin_point is None or destination_point is None:
# if caller didn't pass points, use the first and last node in route as
# origin/destination
origin_destination_lats = (G.nodes[origin_node]['y'], G.nodes[destination_node]['y'])
origin_destination_lons = (G.nodes[origin_node]['x'], G.nodes[destination_node]['x']) # depends on [control=['if'], data=[]]
else:
# otherwise, use the passed points as origin/destination
origin_destination_lats = (origin_point[0], destination_point[0])
origin_destination_lons = (origin_point[1], destination_point[1])
orig_dest_node_color = orig_dest_point_color
# scatter the origin and destination points
ax.scatter(origin_destination_lons, origin_destination_lats, s=orig_dest_node_size, c=orig_dest_node_color, alpha=orig_dest_node_alpha, edgecolor=node_edgecolor, zorder=4)
# plot the route lines
lines = node_list_to_coordinate_lines(G, route, use_geom)
# add the lines to the axis as a linecollection
lc = LineCollection(lines, colors=route_color, linewidths=route_linewidth, alpha=route_alpha, zorder=3)
ax.add_collection(lc)
# save and show the figure as specified
(fig, ax) = save_and_show(fig, ax, save, show, close, filename, file_format, dpi, axis_off)
return (fig, ax) |
def put(self, template_name):
"""Update a template"""
self.reqparse.add_argument('template', type=str, required=True)
args = self.reqparse.parse_args()
template = db.Template.find_one(template_name=template_name)
if not template:
return self.make_response('No such template found', HTTP.NOT_FOUND)
changes = diff(template.template, args['template'])
template.template = args['template']
template.is_modified = True
db.session.add(template)
db.session.commit()
auditlog(
event='template.update',
actor=session['user'].username,
data={
'template_name': template_name,
'template_changes': changes
}
)
return self.make_response('Template {} has been updated'.format(template_name)) | def function[put, parameter[self, template_name]]:
constant[Update a template]
call[name[self].reqparse.add_argument, parameter[constant[template]]]
variable[args] assign[=] call[name[self].reqparse.parse_args, parameter[]]
variable[template] assign[=] call[name[db].Template.find_one, parameter[]]
if <ast.UnaryOp object at 0x7da1b20171c0> begin[:]
return[call[name[self].make_response, parameter[constant[No such template found], name[HTTP].NOT_FOUND]]]
variable[changes] assign[=] call[name[diff], parameter[name[template].template, call[name[args]][constant[template]]]]
name[template].template assign[=] call[name[args]][constant[template]]
name[template].is_modified assign[=] constant[True]
call[name[db].session.add, parameter[name[template]]]
call[name[db].session.commit, parameter[]]
call[name[auditlog], parameter[]]
return[call[name[self].make_response, parameter[call[constant[Template {} has been updated].format, parameter[name[template_name]]]]]] | keyword[def] identifier[put] ( identifier[self] , identifier[template_name] ):
literal[string]
identifier[self] . identifier[reqparse] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[str] , identifier[required] = keyword[True] )
identifier[args] = identifier[self] . identifier[reqparse] . identifier[parse_args] ()
identifier[template] = identifier[db] . identifier[Template] . identifier[find_one] ( identifier[template_name] = identifier[template_name] )
keyword[if] keyword[not] identifier[template] :
keyword[return] identifier[self] . identifier[make_response] ( literal[string] , identifier[HTTP] . identifier[NOT_FOUND] )
identifier[changes] = identifier[diff] ( identifier[template] . identifier[template] , identifier[args] [ literal[string] ])
identifier[template] . identifier[template] = identifier[args] [ literal[string] ]
identifier[template] . identifier[is_modified] = keyword[True]
identifier[db] . identifier[session] . identifier[add] ( identifier[template] )
identifier[db] . identifier[session] . identifier[commit] ()
identifier[auditlog] (
identifier[event] = literal[string] ,
identifier[actor] = identifier[session] [ literal[string] ]. identifier[username] ,
identifier[data] ={
literal[string] : identifier[template_name] ,
literal[string] : identifier[changes]
}
)
keyword[return] identifier[self] . identifier[make_response] ( literal[string] . identifier[format] ( identifier[template_name] )) | def put(self, template_name):
"""Update a template"""
self.reqparse.add_argument('template', type=str, required=True)
args = self.reqparse.parse_args()
template = db.Template.find_one(template_name=template_name)
if not template:
return self.make_response('No such template found', HTTP.NOT_FOUND) # depends on [control=['if'], data=[]]
changes = diff(template.template, args['template'])
template.template = args['template']
template.is_modified = True
db.session.add(template)
db.session.commit()
auditlog(event='template.update', actor=session['user'].username, data={'template_name': template_name, 'template_changes': changes})
return self.make_response('Template {} has been updated'.format(template_name)) |
def calc_remotefailure_v1(self):
"""Estimate the shortfall of actual discharge under the required discharge
of a cross section far downstream.
Required control parameters:
|NmbLogEntries|
|RemoteDischargeMinimum|
Required derived parameters:
|dam_derived.TOY|
Required log sequence:
|LoggedTotalRemoteDischarge|
Calculated flux sequence:
|RemoteFailure|
Basic equation:
:math:`RemoteFailure =
\\frac{\\Sigma(LoggedTotalRemoteDischarge)}{NmbLogEntries} -
RemoteDischargeMinimum`
Examples:
As explained in the documentation on method |calc_remotedemand_v1|,
we have to define a simulation period first:
>>> from hydpy import pub
>>> pub.timegrids = '2001.03.30', '2001.04.03', '1d'
Now we prepare a dam model with log sequences memorizing three values:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> nmblogentries(3)
Again, the required discharge is 2 m³/s in summer and 0 m³/s in winter:
>>> remotedischargeminimum(_11_1_12=0.0, _03_31_12=0.0,
... _04_1_12=2.0, _10_31_12=2.0)
>>> derived.toy.update()
Let it be supposed that the actual discharge at the remote
cross section droped from 2 m³/s to 0 m³/s over the last three days:
>>> logs.loggedtotalremotedischarge(0.0, 1.0, 2.0)
This means that for the April 1 there would have been an averaged
shortfall of 1 m³/s:
>>> model.idx_sim = pub.timegrids.init['2001.04.01']
>>> model.calc_remotefailure_v1()
>>> fluxes.remotefailure
remotefailure(1.0)
Instead for May 31 there would have been an excess of 1 m³/s, which
is interpreted to be a "negative failure":
>>> model.idx_sim = pub.timegrids.init['2001.03.31']
>>> model.calc_remotefailure_v1()
>>> fluxes.remotefailure
remotefailure(-1.0)
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
log = self.sequences.logs.fastaccess
flu.remotefailure = 0
for idx in range(con.nmblogentries):
flu.remotefailure -= log.loggedtotalremotedischarge[idx]
flu.remotefailure /= con.nmblogentries
flu.remotefailure += con.remotedischargeminimum[der.toy[self.idx_sim]] | def function[calc_remotefailure_v1, parameter[self]]:
constant[Estimate the shortfall of actual discharge under the required discharge
of a cross section far downstream.
Required control parameters:
|NmbLogEntries|
|RemoteDischargeMinimum|
Required derived parameters:
|dam_derived.TOY|
Required log sequence:
|LoggedTotalRemoteDischarge|
Calculated flux sequence:
|RemoteFailure|
Basic equation:
:math:`RemoteFailure =
\frac{\Sigma(LoggedTotalRemoteDischarge)}{NmbLogEntries} -
RemoteDischargeMinimum`
Examples:
As explained in the documentation on method |calc_remotedemand_v1|,
we have to define a simulation period first:
>>> from hydpy import pub
>>> pub.timegrids = '2001.03.30', '2001.04.03', '1d'
Now we prepare a dam model with log sequences memorizing three values:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> nmblogentries(3)
Again, the required discharge is 2 m³/s in summer and 0 m³/s in winter:
>>> remotedischargeminimum(_11_1_12=0.0, _03_31_12=0.0,
... _04_1_12=2.0, _10_31_12=2.0)
>>> derived.toy.update()
Let it be supposed that the actual discharge at the remote
cross section droped from 2 m³/s to 0 m³/s over the last three days:
>>> logs.loggedtotalremotedischarge(0.0, 1.0, 2.0)
This means that for the April 1 there would have been an averaged
shortfall of 1 m³/s:
>>> model.idx_sim = pub.timegrids.init['2001.04.01']
>>> model.calc_remotefailure_v1()
>>> fluxes.remotefailure
remotefailure(1.0)
Instead for May 31 there would have been an excess of 1 m³/s, which
is interpreted to be a "negative failure":
>>> model.idx_sim = pub.timegrids.init['2001.03.31']
>>> model.calc_remotefailure_v1()
>>> fluxes.remotefailure
remotefailure(-1.0)
]
variable[con] assign[=] name[self].parameters.control.fastaccess
variable[der] assign[=] name[self].parameters.derived.fastaccess
variable[flu] assign[=] name[self].sequences.fluxes.fastaccess
variable[log] assign[=] name[self].sequences.logs.fastaccess
name[flu].remotefailure assign[=] constant[0]
for taget[name[idx]] in starred[call[name[range], parameter[name[con].nmblogentries]]] begin[:]
<ast.AugAssign object at 0x7da18bcc8910>
<ast.AugAssign object at 0x7da18bccbc70>
<ast.AugAssign object at 0x7da18bcc9360> | keyword[def] identifier[calc_remotefailure_v1] ( identifier[self] ):
literal[string]
identifier[con] = identifier[self] . identifier[parameters] . identifier[control] . identifier[fastaccess]
identifier[der] = identifier[self] . identifier[parameters] . identifier[derived] . identifier[fastaccess]
identifier[flu] = identifier[self] . identifier[sequences] . identifier[fluxes] . identifier[fastaccess]
identifier[log] = identifier[self] . identifier[sequences] . identifier[logs] . identifier[fastaccess]
identifier[flu] . identifier[remotefailure] = literal[int]
keyword[for] identifier[idx] keyword[in] identifier[range] ( identifier[con] . identifier[nmblogentries] ):
identifier[flu] . identifier[remotefailure] -= identifier[log] . identifier[loggedtotalremotedischarge] [ identifier[idx] ]
identifier[flu] . identifier[remotefailure] /= identifier[con] . identifier[nmblogentries]
identifier[flu] . identifier[remotefailure] += identifier[con] . identifier[remotedischargeminimum] [ identifier[der] . identifier[toy] [ identifier[self] . identifier[idx_sim] ]] | def calc_remotefailure_v1(self):
"""Estimate the shortfall of actual discharge under the required discharge
of a cross section far downstream.
Required control parameters:
|NmbLogEntries|
|RemoteDischargeMinimum|
Required derived parameters:
|dam_derived.TOY|
Required log sequence:
|LoggedTotalRemoteDischarge|
Calculated flux sequence:
|RemoteFailure|
Basic equation:
:math:`RemoteFailure =
\\frac{\\Sigma(LoggedTotalRemoteDischarge)}{NmbLogEntries} -
RemoteDischargeMinimum`
Examples:
As explained in the documentation on method |calc_remotedemand_v1|,
we have to define a simulation period first:
>>> from hydpy import pub
>>> pub.timegrids = '2001.03.30', '2001.04.03', '1d'
Now we prepare a dam model with log sequences memorizing three values:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> nmblogentries(3)
Again, the required discharge is 2 m³/s in summer and 0 m³/s in winter:
>>> remotedischargeminimum(_11_1_12=0.0, _03_31_12=0.0,
... _04_1_12=2.0, _10_31_12=2.0)
>>> derived.toy.update()
Let it be supposed that the actual discharge at the remote
cross section droped from 2 m³/s to 0 m³/s over the last three days:
>>> logs.loggedtotalremotedischarge(0.0, 1.0, 2.0)
This means that for the April 1 there would have been an averaged
shortfall of 1 m³/s:
>>> model.idx_sim = pub.timegrids.init['2001.04.01']
>>> model.calc_remotefailure_v1()
>>> fluxes.remotefailure
remotefailure(1.0)
Instead for May 31 there would have been an excess of 1 m³/s, which
is interpreted to be a "negative failure":
>>> model.idx_sim = pub.timegrids.init['2001.03.31']
>>> model.calc_remotefailure_v1()
>>> fluxes.remotefailure
remotefailure(-1.0)
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
log = self.sequences.logs.fastaccess
flu.remotefailure = 0
for idx in range(con.nmblogentries):
flu.remotefailure -= log.loggedtotalremotedischarge[idx] # depends on [control=['for'], data=['idx']]
flu.remotefailure /= con.nmblogentries
flu.remotefailure += con.remotedischargeminimum[der.toy[self.idx_sim]] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.