code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def _wrap_client_error(e):
"""
Wrap botocore ClientError exception into ServerlessRepoClientError.
:param e: botocore exception
:type e: ClientError
:return: S3PermissionsRequired or InvalidS3UriError or general ServerlessRepoClientError
"""
error_code = e.response['Error']['Code']
message = e.response['Error']['Message']
if error_code == 'BadRequestException':
if "Failed to copy S3 object. Access denied:" in message:
match = re.search('bucket=(.+?), key=(.+?)$', message)
if match:
return S3PermissionsRequired(bucket=match.group(1), key=match.group(2))
if "Invalid S3 URI" in message:
return InvalidS3UriError(message=message)
return ServerlessRepoClientError(message=message) | def function[_wrap_client_error, parameter[e]]:
constant[
Wrap botocore ClientError exception into ServerlessRepoClientError.
:param e: botocore exception
:type e: ClientError
:return: S3PermissionsRequired or InvalidS3UriError or general ServerlessRepoClientError
]
variable[error_code] assign[=] call[call[name[e].response][constant[Error]]][constant[Code]]
variable[message] assign[=] call[call[name[e].response][constant[Error]]][constant[Message]]
if compare[name[error_code] equal[==] constant[BadRequestException]] begin[:]
if compare[constant[Failed to copy S3 object. Access denied:] in name[message]] begin[:]
variable[match] assign[=] call[name[re].search, parameter[constant[bucket=(.+?), key=(.+?)$], name[message]]]
if name[match] begin[:]
return[call[name[S3PermissionsRequired], parameter[]]]
if compare[constant[Invalid S3 URI] in name[message]] begin[:]
return[call[name[InvalidS3UriError], parameter[]]]
return[call[name[ServerlessRepoClientError], parameter[]]] | keyword[def] identifier[_wrap_client_error] ( identifier[e] ):
literal[string]
identifier[error_code] = identifier[e] . identifier[response] [ literal[string] ][ literal[string] ]
identifier[message] = identifier[e] . identifier[response] [ literal[string] ][ literal[string] ]
keyword[if] identifier[error_code] == literal[string] :
keyword[if] literal[string] keyword[in] identifier[message] :
identifier[match] = identifier[re] . identifier[search] ( literal[string] , identifier[message] )
keyword[if] identifier[match] :
keyword[return] identifier[S3PermissionsRequired] ( identifier[bucket] = identifier[match] . identifier[group] ( literal[int] ), identifier[key] = identifier[match] . identifier[group] ( literal[int] ))
keyword[if] literal[string] keyword[in] identifier[message] :
keyword[return] identifier[InvalidS3UriError] ( identifier[message] = identifier[message] )
keyword[return] identifier[ServerlessRepoClientError] ( identifier[message] = identifier[message] ) | def _wrap_client_error(e):
"""
Wrap botocore ClientError exception into ServerlessRepoClientError.
:param e: botocore exception
:type e: ClientError
:return: S3PermissionsRequired or InvalidS3UriError or general ServerlessRepoClientError
"""
error_code = e.response['Error']['Code']
message = e.response['Error']['Message']
if error_code == 'BadRequestException':
if 'Failed to copy S3 object. Access denied:' in message:
match = re.search('bucket=(.+?), key=(.+?)$', message)
if match:
return S3PermissionsRequired(bucket=match.group(1), key=match.group(2)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['message']]
if 'Invalid S3 URI' in message:
return InvalidS3UriError(message=message) # depends on [control=['if'], data=['message']] # depends on [control=['if'], data=[]]
return ServerlessRepoClientError(message=message) |
def get_airports(self, country):
"""Returns a list of all the airports
For a given country this returns a list of dicts, one for each airport, with information like the iata code of the airport etc
Args:
country (str): The country for which the airports will be fetched
Example::
from pyflightdata import FlightData
f=FlightData()
f.get_airports('India')
"""
url = AIRPORT_BASE.format(country.replace(" ", "-"))
return self._fr24.get_airports_data(url) | def function[get_airports, parameter[self, country]]:
constant[Returns a list of all the airports
For a given country this returns a list of dicts, one for each airport, with information like the iata code of the airport etc
Args:
country (str): The country for which the airports will be fetched
Example::
from pyflightdata import FlightData
f=FlightData()
f.get_airports('India')
]
variable[url] assign[=] call[name[AIRPORT_BASE].format, parameter[call[name[country].replace, parameter[constant[ ], constant[-]]]]]
return[call[name[self]._fr24.get_airports_data, parameter[name[url]]]] | keyword[def] identifier[get_airports] ( identifier[self] , identifier[country] ):
literal[string]
identifier[url] = identifier[AIRPORT_BASE] . identifier[format] ( identifier[country] . identifier[replace] ( literal[string] , literal[string] ))
keyword[return] identifier[self] . identifier[_fr24] . identifier[get_airports_data] ( identifier[url] ) | def get_airports(self, country):
"""Returns a list of all the airports
For a given country this returns a list of dicts, one for each airport, with information like the iata code of the airport etc
Args:
country (str): The country for which the airports will be fetched
Example::
from pyflightdata import FlightData
f=FlightData()
f.get_airports('India')
"""
url = AIRPORT_BASE.format(country.replace(' ', '-'))
return self._fr24.get_airports_data(url) |
def directory_complete(self, text, line, begidx, endidx):
"""Figure out what directories match the completion."""
return [filename for filename in self.filename_complete(text, line, begidx, endidx) if filename[-1] == '/'] | def function[directory_complete, parameter[self, text, line, begidx, endidx]]:
constant[Figure out what directories match the completion.]
return[<ast.ListComp object at 0x7da207f035e0>] | keyword[def] identifier[directory_complete] ( identifier[self] , identifier[text] , identifier[line] , identifier[begidx] , identifier[endidx] ):
literal[string]
keyword[return] [ identifier[filename] keyword[for] identifier[filename] keyword[in] identifier[self] . identifier[filename_complete] ( identifier[text] , identifier[line] , identifier[begidx] , identifier[endidx] ) keyword[if] identifier[filename] [- literal[int] ]== literal[string] ] | def directory_complete(self, text, line, begidx, endidx):
"""Figure out what directories match the completion."""
return [filename for filename in self.filename_complete(text, line, begidx, endidx) if filename[-1] == '/'] |
def status_icon(self):
'glyphicon for task status; requires bootstrap'
icon = self.status_icon_map.get(self.status.lower(),
self.unknown_icon)
style = self.status_style.get(self.status.lower(), '')
return mark_safe(
'<span class="glyphicon %s %s" aria-hidden="true"></span>' %
(icon, style)) | def function[status_icon, parameter[self]]:
constant[glyphicon for task status; requires bootstrap]
variable[icon] assign[=] call[name[self].status_icon_map.get, parameter[call[name[self].status.lower, parameter[]], name[self].unknown_icon]]
variable[style] assign[=] call[name[self].status_style.get, parameter[call[name[self].status.lower, parameter[]], constant[]]]
return[call[name[mark_safe], parameter[binary_operation[constant[<span class="glyphicon %s %s" aria-hidden="true"></span>] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f722a70>, <ast.Name object at 0x7da18f723400>]]]]]] | keyword[def] identifier[status_icon] ( identifier[self] ):
literal[string]
identifier[icon] = identifier[self] . identifier[status_icon_map] . identifier[get] ( identifier[self] . identifier[status] . identifier[lower] (),
identifier[self] . identifier[unknown_icon] )
identifier[style] = identifier[self] . identifier[status_style] . identifier[get] ( identifier[self] . identifier[status] . identifier[lower] (), literal[string] )
keyword[return] identifier[mark_safe] (
literal[string] %
( identifier[icon] , identifier[style] )) | def status_icon(self):
"""glyphicon for task status; requires bootstrap"""
icon = self.status_icon_map.get(self.status.lower(), self.unknown_icon)
style = self.status_style.get(self.status.lower(), '')
return mark_safe('<span class="glyphicon %s %s" aria-hidden="true"></span>' % (icon, style)) |
def K_branch_diverging_Crane(D_run, D_branch, Q_run, Q_branch, angle=90):
r'''Returns the loss coefficient for the branch of a diverging tee or wye
according to the Crane method [1]_.
.. math::
K_{branch} = G\left[1 + H\left(\frac{Q_{branch}}{Q_{comb}
\beta_{branch}^2}\right)^2 - J\left(\frac{Q_{branch}}{Q_{comb}
\beta_{branch}^2}\right)\cos\theta\right]
.. math::
\beta_{branch} = \frac{D_{branch}}{D_{comb}}
See the notes for definitions of H, J, and G.
Parameters
----------
D_run : float
Diameter of the straight-through inlet portion of the tee or wye [m]
D_branch : float
Diameter of the pipe attached at an angle to the straight-through, [m]
Q_run : float
Volumetric flow rate in the straight-through outlet of the tee or wye,
[m^3/s]
Q_branch : float
Volumetric flow rate in the pipe attached at an angle to the straight-
through, [m^3/s]
angle : float, optional
Angle the branch makes with the straight-through (tee=90, wye<90)
[degrees]
Returns
-------
K : float
Loss coefficient of branch with respect to the velocity and inside
diameter of the combined flow inlet [-]
Notes
-----
If :math:`\beta_{branch} = 1, \theta = 90^\circ`, H = 0.3 and J = 0.
Otherwise H = 1 and J = 2.
G is determined according to the following pseudocode:
.. code-block:: python
if angle < 75:
if beta2 <= 0.35:
if Q_ratio <= 0.4:
G = 1.1 - 0.7*Q_ratio
else:
G = 0.85
else:
if Q_ratio <= 0.6:
G = 1.0 - 0.6*Q_ratio
else:
G = 0.6
else:
if beta2 <= 2/3.:
G = 1
else:
G = 1 + 0.3*Q_ratio*Q_ratio
Note that there are several errors in the text of [1]_; the errata can be
obtained here: http://www.flowoffluids.com/publications/tp-410-errata.aspx
Examples
--------
Example 7-36 of [1]_. A DN150 schedule 80 wye has 1515 liters/minute of
water exiting the straight leg, and 950 liters/minute of water
exiting it through a 45° branch. Calculate the loss coefficient in
the branch. The calculated value there is 0.4640.
>>> K_branch_diverging_Crane(0.146, 0.146, 0.02525, 0.01583, angle=45)
0.4639895627496694
References
----------
.. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane,
2009.
'''
beta = (D_branch/D_run)
beta2 = beta*beta
Q_comb = Q_run + Q_branch
Q_ratio = Q_branch/Q_comb
if angle < 60 or beta <= 2/3.:
H, J = 1., 2.
else:
H, J = 0.3, 0
if angle < 75:
if beta2 <= 0.35:
if Q_ratio <= 0.4:
G = 1.1 - 0.7*Q_ratio
else:
G = 0.85
else:
if Q_ratio <= 0.6:
G = 1.0 - 0.6*Q_ratio
else:
G = 0.6
else:
if beta2 <= 2/3.:
G = 1
else:
G = 1 + 0.3*Q_ratio*Q_ratio
angle_rad = radians(angle)
K_branch = G*(1 + H*(Q_ratio/beta2)**2 - J*(Q_ratio/beta2)*cos(angle_rad))
return K_branch | def function[K_branch_diverging_Crane, parameter[D_run, D_branch, Q_run, Q_branch, angle]]:
constant[Returns the loss coefficient for the branch of a diverging tee or wye
according to the Crane method [1]_.
.. math::
K_{branch} = G\left[1 + H\left(\frac{Q_{branch}}{Q_{comb}
\beta_{branch}^2}\right)^2 - J\left(\frac{Q_{branch}}{Q_{comb}
\beta_{branch}^2}\right)\cos\theta\right]
.. math::
\beta_{branch} = \frac{D_{branch}}{D_{comb}}
See the notes for definitions of H, J, and G.
Parameters
----------
D_run : float
Diameter of the straight-through inlet portion of the tee or wye [m]
D_branch : float
Diameter of the pipe attached at an angle to the straight-through, [m]
Q_run : float
Volumetric flow rate in the straight-through outlet of the tee or wye,
[m^3/s]
Q_branch : float
Volumetric flow rate in the pipe attached at an angle to the straight-
through, [m^3/s]
angle : float, optional
Angle the branch makes with the straight-through (tee=90, wye<90)
[degrees]
Returns
-------
K : float
Loss coefficient of branch with respect to the velocity and inside
diameter of the combined flow inlet [-]
Notes
-----
If :math:`\beta_{branch} = 1, \theta = 90^\circ`, H = 0.3 and J = 0.
Otherwise H = 1 and J = 2.
G is determined according to the following pseudocode:
.. code-block:: python
if angle < 75:
if beta2 <= 0.35:
if Q_ratio <= 0.4:
G = 1.1 - 0.7*Q_ratio
else:
G = 0.85
else:
if Q_ratio <= 0.6:
G = 1.0 - 0.6*Q_ratio
else:
G = 0.6
else:
if beta2 <= 2/3.:
G = 1
else:
G = 1 + 0.3*Q_ratio*Q_ratio
Note that there are several errors in the text of [1]_; the errata can be
obtained here: http://www.flowoffluids.com/publications/tp-410-errata.aspx
Examples
--------
Example 7-36 of [1]_. A DN150 schedule 80 wye has 1515 liters/minute of
water exiting the straight leg, and 950 liters/minute of water
exiting it through a 45° branch. Calculate the loss coefficient in
the branch. The calculated value there is 0.4640.
>>> K_branch_diverging_Crane(0.146, 0.146, 0.02525, 0.01583, angle=45)
0.4639895627496694
References
----------
.. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane,
2009.
]
variable[beta] assign[=] binary_operation[name[D_branch] / name[D_run]]
variable[beta2] assign[=] binary_operation[name[beta] * name[beta]]
variable[Q_comb] assign[=] binary_operation[name[Q_run] + name[Q_branch]]
variable[Q_ratio] assign[=] binary_operation[name[Q_branch] / name[Q_comb]]
if <ast.BoolOp object at 0x7da18f09ead0> begin[:]
<ast.Tuple object at 0x7da18f09d4b0> assign[=] tuple[[<ast.Constant object at 0x7da18f09e380>, <ast.Constant object at 0x7da18f09dde0>]]
if compare[name[angle] less[<] constant[75]] begin[:]
if compare[name[beta2] less_or_equal[<=] constant[0.35]] begin[:]
if compare[name[Q_ratio] less_or_equal[<=] constant[0.4]] begin[:]
variable[G] assign[=] binary_operation[constant[1.1] - binary_operation[constant[0.7] * name[Q_ratio]]]
variable[angle_rad] assign[=] call[name[radians], parameter[name[angle]]]
variable[K_branch] assign[=] binary_operation[name[G] * binary_operation[binary_operation[constant[1] + binary_operation[name[H] * binary_operation[binary_operation[name[Q_ratio] / name[beta2]] ** constant[2]]]] - binary_operation[binary_operation[name[J] * binary_operation[name[Q_ratio] / name[beta2]]] * call[name[cos], parameter[name[angle_rad]]]]]]
return[name[K_branch]] | keyword[def] identifier[K_branch_diverging_Crane] ( identifier[D_run] , identifier[D_branch] , identifier[Q_run] , identifier[Q_branch] , identifier[angle] = literal[int] ):
literal[string]
identifier[beta] =( identifier[D_branch] / identifier[D_run] )
identifier[beta2] = identifier[beta] * identifier[beta]
identifier[Q_comb] = identifier[Q_run] + identifier[Q_branch]
identifier[Q_ratio] = identifier[Q_branch] / identifier[Q_comb]
keyword[if] identifier[angle] < literal[int] keyword[or] identifier[beta] <= literal[int] / literal[int] :
identifier[H] , identifier[J] = literal[int] , literal[int]
keyword[else] :
identifier[H] , identifier[J] = literal[int] , literal[int]
keyword[if] identifier[angle] < literal[int] :
keyword[if] identifier[beta2] <= literal[int] :
keyword[if] identifier[Q_ratio] <= literal[int] :
identifier[G] = literal[int] - literal[int] * identifier[Q_ratio]
keyword[else] :
identifier[G] = literal[int]
keyword[else] :
keyword[if] identifier[Q_ratio] <= literal[int] :
identifier[G] = literal[int] - literal[int] * identifier[Q_ratio]
keyword[else] :
identifier[G] = literal[int]
keyword[else] :
keyword[if] identifier[beta2] <= literal[int] / literal[int] :
identifier[G] = literal[int]
keyword[else] :
identifier[G] = literal[int] + literal[int] * identifier[Q_ratio] * identifier[Q_ratio]
identifier[angle_rad] = identifier[radians] ( identifier[angle] )
identifier[K_branch] = identifier[G] *( literal[int] + identifier[H] *( identifier[Q_ratio] / identifier[beta2] )** literal[int] - identifier[J] *( identifier[Q_ratio] / identifier[beta2] )* identifier[cos] ( identifier[angle_rad] ))
keyword[return] identifier[K_branch] | def K_branch_diverging_Crane(D_run, D_branch, Q_run, Q_branch, angle=90):
"""Returns the loss coefficient for the branch of a diverging tee or wye
according to the Crane method [1]_.
.. math::
K_{branch} = G\\left[1 + H\\left(\\frac{Q_{branch}}{Q_{comb}
\\beta_{branch}^2}\\right)^2 - J\\left(\\frac{Q_{branch}}{Q_{comb}
\\beta_{branch}^2}\\right)\\cos\\theta\\right]
.. math::
\\beta_{branch} = \\frac{D_{branch}}{D_{comb}}
See the notes for definitions of H, J, and G.
Parameters
----------
D_run : float
Diameter of the straight-through inlet portion of the tee or wye [m]
D_branch : float
Diameter of the pipe attached at an angle to the straight-through, [m]
Q_run : float
Volumetric flow rate in the straight-through outlet of the tee or wye,
[m^3/s]
Q_branch : float
Volumetric flow rate in the pipe attached at an angle to the straight-
through, [m^3/s]
angle : float, optional
Angle the branch makes with the straight-through (tee=90, wye<90)
[degrees]
Returns
-------
K : float
Loss coefficient of branch with respect to the velocity and inside
diameter of the combined flow inlet [-]
Notes
-----
If :math:`\\beta_{branch} = 1, \\theta = 90^\\circ`, H = 0.3 and J = 0.
Otherwise H = 1 and J = 2.
G is determined according to the following pseudocode:
.. code-block:: python
if angle < 75:
if beta2 <= 0.35:
if Q_ratio <= 0.4:
G = 1.1 - 0.7*Q_ratio
else:
G = 0.85
else:
if Q_ratio <= 0.6:
G = 1.0 - 0.6*Q_ratio
else:
G = 0.6
else:
if beta2 <= 2/3.:
G = 1
else:
G = 1 + 0.3*Q_ratio*Q_ratio
Note that there are several errors in the text of [1]_; the errata can be
obtained here: http://www.flowoffluids.com/publications/tp-410-errata.aspx
Examples
--------
Example 7-36 of [1]_. A DN150 schedule 80 wye has 1515 liters/minute of
water exiting the straight leg, and 950 liters/minute of water
exiting it through a 45° branch. Calculate the loss coefficient in
the branch. The calculated value there is 0.4640.
>>> K_branch_diverging_Crane(0.146, 0.146, 0.02525, 0.01583, angle=45)
0.4639895627496694
References
----------
.. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane,
2009.
"""
beta = D_branch / D_run
beta2 = beta * beta
Q_comb = Q_run + Q_branch
Q_ratio = Q_branch / Q_comb
if angle < 60 or beta <= 2 / 3.0:
(H, J) = (1.0, 2.0) # depends on [control=['if'], data=[]]
else:
(H, J) = (0.3, 0)
if angle < 75:
if beta2 <= 0.35:
if Q_ratio <= 0.4:
G = 1.1 - 0.7 * Q_ratio # depends on [control=['if'], data=['Q_ratio']]
else:
G = 0.85 # depends on [control=['if'], data=[]]
elif Q_ratio <= 0.6:
G = 1.0 - 0.6 * Q_ratio # depends on [control=['if'], data=['Q_ratio']]
else:
G = 0.6 # depends on [control=['if'], data=[]]
elif beta2 <= 2 / 3.0:
G = 1 # depends on [control=['if'], data=[]]
else:
G = 1 + 0.3 * Q_ratio * Q_ratio
angle_rad = radians(angle)
K_branch = G * (1 + H * (Q_ratio / beta2) ** 2 - J * (Q_ratio / beta2) * cos(angle_rad))
return K_branch |
def _teardown(self):
"Handles the restoration of any potential global state set."
self.example.after(self.context)
if self.is_root_runner:
run.after_all.execute(self.context)
#self.context = self.context._parent
self.has_ran = True | def function[_teardown, parameter[self]]:
constant[Handles the restoration of any potential global state set.]
call[name[self].example.after, parameter[name[self].context]]
if name[self].is_root_runner begin[:]
call[name[run].after_all.execute, parameter[name[self].context]]
name[self].has_ran assign[=] constant[True] | keyword[def] identifier[_teardown] ( identifier[self] ):
literal[string]
identifier[self] . identifier[example] . identifier[after] ( identifier[self] . identifier[context] )
keyword[if] identifier[self] . identifier[is_root_runner] :
identifier[run] . identifier[after_all] . identifier[execute] ( identifier[self] . identifier[context] )
identifier[self] . identifier[has_ran] = keyword[True] | def _teardown(self):
"""Handles the restoration of any potential global state set."""
self.example.after(self.context)
if self.is_root_runner:
run.after_all.execute(self.context) # depends on [control=['if'], data=[]]
#self.context = self.context._parent
self.has_ran = True |
def printWelcomeMessage(msg, place=10):
''' Print any welcome message '''
logging.debug('*' * 30)
welcome = ' ' * place
welcome+= msg
logging.debug(welcome)
logging.debug('*' * 30 + '\n') | def function[printWelcomeMessage, parameter[msg, place]]:
constant[ Print any welcome message ]
call[name[logging].debug, parameter[binary_operation[constant[*] * constant[30]]]]
variable[welcome] assign[=] binary_operation[constant[ ] * name[place]]
<ast.AugAssign object at 0x7da20c7c91e0>
call[name[logging].debug, parameter[name[welcome]]]
call[name[logging].debug, parameter[binary_operation[binary_operation[constant[*] * constant[30]] + constant[
]]]] | keyword[def] identifier[printWelcomeMessage] ( identifier[msg] , identifier[place] = literal[int] ):
literal[string]
identifier[logging] . identifier[debug] ( literal[string] * literal[int] )
identifier[welcome] = literal[string] * identifier[place]
identifier[welcome] += identifier[msg]
identifier[logging] . identifier[debug] ( identifier[welcome] )
identifier[logging] . identifier[debug] ( literal[string] * literal[int] + literal[string] ) | def printWelcomeMessage(msg, place=10):
""" Print any welcome message """
logging.debug('*' * 30)
welcome = ' ' * place
welcome += msg
logging.debug(welcome)
logging.debug('*' * 30 + '\n') |
def emboss_pepstats_on_fasta(infile, outfile='', outdir='', outext='.pepstats', force_rerun=False):
"""Run EMBOSS pepstats on a FASTA file.
Args:
infile: Path to FASTA file
outfile: Name of output file without extension
outdir: Path to output directory
outext: Extension of results file, default is ".pepstats"
force_rerun: Flag to rerun pepstats
Returns:
str: Path to output file.
"""
# Create the output file name
outfile = ssbio.utils.outfile_maker(inname=infile, outname=outfile, outdir=outdir, outext=outext)
# Run pepstats
program = 'pepstats'
pepstats_args = '-sequence="{}" -outfile="{}"'.format(infile, outfile)
cmd_string = '{} {}'.format(program, pepstats_args)
ssbio.utils.command_runner(cmd_string, force_rerun_flag=force_rerun, outfile_checker=outfile, silent=True)
return outfile | def function[emboss_pepstats_on_fasta, parameter[infile, outfile, outdir, outext, force_rerun]]:
constant[Run EMBOSS pepstats on a FASTA file.
Args:
infile: Path to FASTA file
outfile: Name of output file without extension
outdir: Path to output directory
outext: Extension of results file, default is ".pepstats"
force_rerun: Flag to rerun pepstats
Returns:
str: Path to output file.
]
variable[outfile] assign[=] call[name[ssbio].utils.outfile_maker, parameter[]]
variable[program] assign[=] constant[pepstats]
variable[pepstats_args] assign[=] call[constant[-sequence="{}" -outfile="{}"].format, parameter[name[infile], name[outfile]]]
variable[cmd_string] assign[=] call[constant[{} {}].format, parameter[name[program], name[pepstats_args]]]
call[name[ssbio].utils.command_runner, parameter[name[cmd_string]]]
return[name[outfile]] | keyword[def] identifier[emboss_pepstats_on_fasta] ( identifier[infile] , identifier[outfile] = literal[string] , identifier[outdir] = literal[string] , identifier[outext] = literal[string] , identifier[force_rerun] = keyword[False] ):
literal[string]
identifier[outfile] = identifier[ssbio] . identifier[utils] . identifier[outfile_maker] ( identifier[inname] = identifier[infile] , identifier[outname] = identifier[outfile] , identifier[outdir] = identifier[outdir] , identifier[outext] = identifier[outext] )
identifier[program] = literal[string]
identifier[pepstats_args] = literal[string] . identifier[format] ( identifier[infile] , identifier[outfile] )
identifier[cmd_string] = literal[string] . identifier[format] ( identifier[program] , identifier[pepstats_args] )
identifier[ssbio] . identifier[utils] . identifier[command_runner] ( identifier[cmd_string] , identifier[force_rerun_flag] = identifier[force_rerun] , identifier[outfile_checker] = identifier[outfile] , identifier[silent] = keyword[True] )
keyword[return] identifier[outfile] | def emboss_pepstats_on_fasta(infile, outfile='', outdir='', outext='.pepstats', force_rerun=False):
"""Run EMBOSS pepstats on a FASTA file.
Args:
infile: Path to FASTA file
outfile: Name of output file without extension
outdir: Path to output directory
outext: Extension of results file, default is ".pepstats"
force_rerun: Flag to rerun pepstats
Returns:
str: Path to output file.
"""
# Create the output file name
outfile = ssbio.utils.outfile_maker(inname=infile, outname=outfile, outdir=outdir, outext=outext)
# Run pepstats
program = 'pepstats'
pepstats_args = '-sequence="{}" -outfile="{}"'.format(infile, outfile)
cmd_string = '{} {}'.format(program, pepstats_args)
ssbio.utils.command_runner(cmd_string, force_rerun_flag=force_rerun, outfile_checker=outfile, silent=True)
return outfile |
def _load(self, load_dict):
"""Reconstructs the data and exploration array.
Checks if it can find the array identifier in the `load_dict`, i.e. '__rr__'.
If not calls :class:`~pypet.parameter.Parameter._load` of the parent class.
If the parameter is explored, the exploration range of arrays is reconstructed
as it was stored in :func:`~pypet.parameter.ArrayParameter._store`.
"""
if self.v_locked:
raise pex.ParameterLockedException('Parameter `%s` is locked!' % self.v_full_name)
try:
self._data = load_dict['data' + ArrayParameter.IDENTIFIER]
if 'explored_data' + ArrayParameter.IDENTIFIER in load_dict:
explore_table = load_dict['explored_data' + ArrayParameter.IDENTIFIER]
idx = explore_table['idx']
explore_list = []
# Recall the arrays in the order stored in the ObjectTable 'explored_data__rr__'
for name_idx in idx:
arrayname = self._build_name(name_idx)
explore_list.append(load_dict[arrayname])
self._explored_range = [x for x in explore_list]
self._explored = True
except KeyError:
super(ArrayParameter, self)._load(load_dict)
self._default = self._data
self._locked = True | def function[_load, parameter[self, load_dict]]:
constant[Reconstructs the data and exploration array.
Checks if it can find the array identifier in the `load_dict`, i.e. '__rr__'.
If not calls :class:`~pypet.parameter.Parameter._load` of the parent class.
If the parameter is explored, the exploration range of arrays is reconstructed
as it was stored in :func:`~pypet.parameter.ArrayParameter._store`.
]
if name[self].v_locked begin[:]
<ast.Raise object at 0x7da18f723a00>
<ast.Try object at 0x7da18f723370>
name[self]._default assign[=] name[self]._data
name[self]._locked assign[=] constant[True] | keyword[def] identifier[_load] ( identifier[self] , identifier[load_dict] ):
literal[string]
keyword[if] identifier[self] . identifier[v_locked] :
keyword[raise] identifier[pex] . identifier[ParameterLockedException] ( literal[string] % identifier[self] . identifier[v_full_name] )
keyword[try] :
identifier[self] . identifier[_data] = identifier[load_dict] [ literal[string] + identifier[ArrayParameter] . identifier[IDENTIFIER] ]
keyword[if] literal[string] + identifier[ArrayParameter] . identifier[IDENTIFIER] keyword[in] identifier[load_dict] :
identifier[explore_table] = identifier[load_dict] [ literal[string] + identifier[ArrayParameter] . identifier[IDENTIFIER] ]
identifier[idx] = identifier[explore_table] [ literal[string] ]
identifier[explore_list] =[]
keyword[for] identifier[name_idx] keyword[in] identifier[idx] :
identifier[arrayname] = identifier[self] . identifier[_build_name] ( identifier[name_idx] )
identifier[explore_list] . identifier[append] ( identifier[load_dict] [ identifier[arrayname] ])
identifier[self] . identifier[_explored_range] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[explore_list] ]
identifier[self] . identifier[_explored] = keyword[True]
keyword[except] identifier[KeyError] :
identifier[super] ( identifier[ArrayParameter] , identifier[self] ). identifier[_load] ( identifier[load_dict] )
identifier[self] . identifier[_default] = identifier[self] . identifier[_data]
identifier[self] . identifier[_locked] = keyword[True] | def _load(self, load_dict):
"""Reconstructs the data and exploration array.
Checks if it can find the array identifier in the `load_dict`, i.e. '__rr__'.
If not calls :class:`~pypet.parameter.Parameter._load` of the parent class.
If the parameter is explored, the exploration range of arrays is reconstructed
as it was stored in :func:`~pypet.parameter.ArrayParameter._store`.
"""
if self.v_locked:
raise pex.ParameterLockedException('Parameter `%s` is locked!' % self.v_full_name) # depends on [control=['if'], data=[]]
try:
self._data = load_dict['data' + ArrayParameter.IDENTIFIER]
if 'explored_data' + ArrayParameter.IDENTIFIER in load_dict:
explore_table = load_dict['explored_data' + ArrayParameter.IDENTIFIER]
idx = explore_table['idx']
explore_list = []
# Recall the arrays in the order stored in the ObjectTable 'explored_data__rr__'
for name_idx in idx:
arrayname = self._build_name(name_idx)
explore_list.append(load_dict[arrayname]) # depends on [control=['for'], data=['name_idx']]
self._explored_range = [x for x in explore_list]
self._explored = True # depends on [control=['if'], data=['load_dict']] # depends on [control=['try'], data=[]]
except KeyError:
super(ArrayParameter, self)._load(load_dict) # depends on [control=['except'], data=[]]
self._default = self._data
self._locked = True |
def _list_element_starts_with(items, needle):
"""True of any of the list elements starts with needle"""
for item in items:
if item.startswith(needle):
return True
return False | def function[_list_element_starts_with, parameter[items, needle]]:
constant[True of any of the list elements starts with needle]
for taget[name[item]] in starred[name[items]] begin[:]
if call[name[item].startswith, parameter[name[needle]]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[_list_element_starts_with] ( identifier[items] , identifier[needle] ):
literal[string]
keyword[for] identifier[item] keyword[in] identifier[items] :
keyword[if] identifier[item] . identifier[startswith] ( identifier[needle] ):
keyword[return] keyword[True]
keyword[return] keyword[False] | def _list_element_starts_with(items, needle):
"""True of any of the list elements starts with needle"""
for item in items:
if item.startswith(needle):
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
return False |
def formatted_str_to_val(data, format, enum_set=None):
""" Return an unsigned integer representation of the data given format specified.
:param data: a string holding the value to convert
:param format: a string holding a format which will be used to convert the data string
:param enum_set: an iterable of enums which are used as part of the converstion process
Given a string (not a wirevector!) covert that to an unsigned integer ready for input
to the simulation enviornment. This helps deal with signed/unsigned numbers (simulation
assumes the values have been converted via two's complement already), but it also takes
hex, binary, and enum types as inputs. It is easiest to see how it works with some
examples. ::
formatted_str_to_val('2', 's3') == 2 # 0b010
formatted_str_to_val('-1', 's3') == 7 # 0b111
formatted_str_to_val('101', 'b3') == 5
formatted_str_to_val('5', 'u3') == 5
formatted_str_to_val('-3', 's3') == 5
formatted_str_to_val('a', 'x3') == 10
class Ctl(Enum):
ADD = 5
SUB = 12
formatted_str_to_val('ADD', 'e3/Ctl', [Ctl]) == 5
formatted_str_to_val('SUB', 'e3/Ctl', [Ctl]) == 12
"""
type = format[0]
bitwidth = int(format[1:].split('/')[0])
bitmask = (1 << bitwidth)-1
if type == 's':
rval = int(data) & bitmask
elif type == 'x':
rval = int(data, 16)
elif type == 'b':
rval = int(data, 2)
elif type == 'u':
rval = int(data)
if rval < 0:
raise PyrtlError('unsigned format requested, but negative value provided')
elif type == 'e':
enumname = format.split('/')[1]
enum_inst_list = [e for e in enum_set if e.__name__ == enumname]
if len(enum_inst_list) == 0:
raise PyrtlError('enum "{}" not found in passed enum_set "{}"'
.format(enumname, enum_set))
rval = getattr(enum_inst_list[0], data).value
else:
raise PyrtlError('unknown format type {}'.format(format))
return rval | def function[formatted_str_to_val, parameter[data, format, enum_set]]:
constant[ Return an unsigned integer representation of the data given format specified.
:param data: a string holding the value to convert
:param format: a string holding a format which will be used to convert the data string
:param enum_set: an iterable of enums which are used as part of the converstion process
Given a string (not a wirevector!) covert that to an unsigned integer ready for input
to the simulation enviornment. This helps deal with signed/unsigned numbers (simulation
assumes the values have been converted via two's complement already), but it also takes
hex, binary, and enum types as inputs. It is easiest to see how it works with some
examples. ::
formatted_str_to_val('2', 's3') == 2 # 0b010
formatted_str_to_val('-1', 's3') == 7 # 0b111
formatted_str_to_val('101', 'b3') == 5
formatted_str_to_val('5', 'u3') == 5
formatted_str_to_val('-3', 's3') == 5
formatted_str_to_val('a', 'x3') == 10
class Ctl(Enum):
ADD = 5
SUB = 12
formatted_str_to_val('ADD', 'e3/Ctl', [Ctl]) == 5
formatted_str_to_val('SUB', 'e3/Ctl', [Ctl]) == 12
]
variable[type] assign[=] call[name[format]][constant[0]]
variable[bitwidth] assign[=] call[name[int], parameter[call[call[call[name[format]][<ast.Slice object at 0x7da1b0578700>].split, parameter[constant[/]]]][constant[0]]]]
variable[bitmask] assign[=] binary_operation[binary_operation[constant[1] <ast.LShift object at 0x7da2590d69e0> name[bitwidth]] - constant[1]]
if compare[name[type] equal[==] constant[s]] begin[:]
variable[rval] assign[=] binary_operation[call[name[int], parameter[name[data]]] <ast.BitAnd object at 0x7da2590d6b60> name[bitmask]]
return[name[rval]] | keyword[def] identifier[formatted_str_to_val] ( identifier[data] , identifier[format] , identifier[enum_set] = keyword[None] ):
literal[string]
identifier[type] = identifier[format] [ literal[int] ]
identifier[bitwidth] = identifier[int] ( identifier[format] [ literal[int] :]. identifier[split] ( literal[string] )[ literal[int] ])
identifier[bitmask] =( literal[int] << identifier[bitwidth] )- literal[int]
keyword[if] identifier[type] == literal[string] :
identifier[rval] = identifier[int] ( identifier[data] )& identifier[bitmask]
keyword[elif] identifier[type] == literal[string] :
identifier[rval] = identifier[int] ( identifier[data] , literal[int] )
keyword[elif] identifier[type] == literal[string] :
identifier[rval] = identifier[int] ( identifier[data] , literal[int] )
keyword[elif] identifier[type] == literal[string] :
identifier[rval] = identifier[int] ( identifier[data] )
keyword[if] identifier[rval] < literal[int] :
keyword[raise] identifier[PyrtlError] ( literal[string] )
keyword[elif] identifier[type] == literal[string] :
identifier[enumname] = identifier[format] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[enum_inst_list] =[ identifier[e] keyword[for] identifier[e] keyword[in] identifier[enum_set] keyword[if] identifier[e] . identifier[__name__] == identifier[enumname] ]
keyword[if] identifier[len] ( identifier[enum_inst_list] )== literal[int] :
keyword[raise] identifier[PyrtlError] ( literal[string]
. identifier[format] ( identifier[enumname] , identifier[enum_set] ))
identifier[rval] = identifier[getattr] ( identifier[enum_inst_list] [ literal[int] ], identifier[data] ). identifier[value]
keyword[else] :
keyword[raise] identifier[PyrtlError] ( literal[string] . identifier[format] ( identifier[format] ))
keyword[return] identifier[rval] | def formatted_str_to_val(data, format, enum_set=None):
""" Return an unsigned integer representation of the data given format specified.
:param data: a string holding the value to convert
:param format: a string holding a format which will be used to convert the data string
:param enum_set: an iterable of enums which are used as part of the converstion process
Given a string (not a wirevector!) covert that to an unsigned integer ready for input
to the simulation enviornment. This helps deal with signed/unsigned numbers (simulation
assumes the values have been converted via two's complement already), but it also takes
hex, binary, and enum types as inputs. It is easiest to see how it works with some
examples. ::
formatted_str_to_val('2', 's3') == 2 # 0b010
formatted_str_to_val('-1', 's3') == 7 # 0b111
formatted_str_to_val('101', 'b3') == 5
formatted_str_to_val('5', 'u3') == 5
formatted_str_to_val('-3', 's3') == 5
formatted_str_to_val('a', 'x3') == 10
class Ctl(Enum):
ADD = 5
SUB = 12
formatted_str_to_val('ADD', 'e3/Ctl', [Ctl]) == 5
formatted_str_to_val('SUB', 'e3/Ctl', [Ctl]) == 12
"""
type = format[0]
bitwidth = int(format[1:].split('/')[0])
bitmask = (1 << bitwidth) - 1
if type == 's':
rval = int(data) & bitmask # depends on [control=['if'], data=[]]
elif type == 'x':
rval = int(data, 16) # depends on [control=['if'], data=[]]
elif type == 'b':
rval = int(data, 2) # depends on [control=['if'], data=[]]
elif type == 'u':
rval = int(data)
if rval < 0:
raise PyrtlError('unsigned format requested, but negative value provided') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif type == 'e':
enumname = format.split('/')[1]
enum_inst_list = [e for e in enum_set if e.__name__ == enumname]
if len(enum_inst_list) == 0:
raise PyrtlError('enum "{}" not found in passed enum_set "{}"'.format(enumname, enum_set)) # depends on [control=['if'], data=[]]
rval = getattr(enum_inst_list[0], data).value # depends on [control=['if'], data=[]]
else:
raise PyrtlError('unknown format type {}'.format(format))
return rval |
def add_walls(self):
"Put walls around the entire perimeter of the grid."
for x in range(self.width):
self.add_thing(Wall(), (x, 0))
self.add_thing(Wall(), (x, self.height-1))
for y in range(self.height):
self.add_thing(Wall(), (0, y))
self.add_thing(Wall(), (self.width-1, y)) | def function[add_walls, parameter[self]]:
constant[Put walls around the entire perimeter of the grid.]
for taget[name[x]] in starred[call[name[range], parameter[name[self].width]]] begin[:]
call[name[self].add_thing, parameter[call[name[Wall], parameter[]], tuple[[<ast.Name object at 0x7da1afe72c50>, <ast.Constant object at 0x7da204564f70>]]]]
call[name[self].add_thing, parameter[call[name[Wall], parameter[]], tuple[[<ast.Name object at 0x7da18bcca6e0>, <ast.BinOp object at 0x7da18bcc9a80>]]]]
for taget[name[y]] in starred[call[name[range], parameter[name[self].height]]] begin[:]
call[name[self].add_thing, parameter[call[name[Wall], parameter[]], tuple[[<ast.Constant object at 0x7da2054a5600>, <ast.Name object at 0x7da2054a4e80>]]]]
call[name[self].add_thing, parameter[call[name[Wall], parameter[]], tuple[[<ast.BinOp object at 0x7da2054a7f10>, <ast.Name object at 0x7da2054a72b0>]]]] | keyword[def] identifier[add_walls] ( identifier[self] ):
literal[string]
keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[self] . identifier[width] ):
identifier[self] . identifier[add_thing] ( identifier[Wall] (),( identifier[x] , literal[int] ))
identifier[self] . identifier[add_thing] ( identifier[Wall] (),( identifier[x] , identifier[self] . identifier[height] - literal[int] ))
keyword[for] identifier[y] keyword[in] identifier[range] ( identifier[self] . identifier[height] ):
identifier[self] . identifier[add_thing] ( identifier[Wall] (),( literal[int] , identifier[y] ))
identifier[self] . identifier[add_thing] ( identifier[Wall] (),( identifier[self] . identifier[width] - literal[int] , identifier[y] )) | def add_walls(self):
"""Put walls around the entire perimeter of the grid."""
for x in range(self.width):
self.add_thing(Wall(), (x, 0))
self.add_thing(Wall(), (x, self.height - 1)) # depends on [control=['for'], data=['x']]
for y in range(self.height):
self.add_thing(Wall(), (0, y))
self.add_thing(Wall(), (self.width - 1, y)) # depends on [control=['for'], data=['y']] |
def prepare_replicant_order_object(manager, snapshot_schedule, location,
tier, volume, volume_type):
"""Prepare the order object which is submitted to the placeOrder() method
:param manager: The File or Block manager calling this function
:param snapshot_schedule: The primary volume's snapshot
schedule to use for replication
:param location: The location for the ordered replicant volume
:param tier: The tier (IOPS per GB) of the primary volume
:param volume: The primary volume as a SoftLayer_Network_Storage object
:param volume_type: The type of the primary volume ('file' or 'block')
:return: Returns the order object for the
Product_Order service's placeOrder() method
"""
# Ensure the primary volume and snapshot space are not set for cancellation
if 'billingItem' not in volume\
or volume['billingItem']['cancellationDate'] != '':
raise exceptions.SoftLayerError(
'This volume is set for cancellation; '
'unable to order replicant volume')
for child in volume['billingItem']['activeChildren']:
if child['categoryCode'] == 'storage_snapshot_space'\
and child['cancellationDate'] != '':
raise exceptions.SoftLayerError(
'The snapshot space for this volume is set for '
'cancellation; unable to order replicant volume')
# Find the ID for the requested location
try:
location_id = get_location_id(manager, location)
except ValueError:
raise exceptions.SoftLayerError(
"Invalid datacenter name specified. "
"Please provide the lower case short name (e.g.: dal09)")
# Get sizes and properties needed for the order
volume_size = int(volume['capacityGb'])
billing_item_category_code = volume['billingItem']['categoryCode']
if billing_item_category_code == 'storage_as_a_service':
order_type_is_saas = True
elif billing_item_category_code == 'storage_service_enterprise':
order_type_is_saas = False
else:
raise exceptions.SoftLayerError(
"A replicant volume cannot be ordered for a primary volume with a "
"billing item category code of '%s'" % billing_item_category_code)
if 'snapshotCapacityGb' in volume:
snapshot_size = int(volume['snapshotCapacityGb'])
else:
raise exceptions.SoftLayerError(
"Snapshot capacity not found for the given primary volume")
snapshot_schedule_id = find_snapshot_schedule_id(
volume,
'SNAPSHOT_' + snapshot_schedule
)
# Use the volume's billing item category code to get the product package
package = get_package(manager, billing_item_category_code)
# Find prices based on the primary volume's type and billing item category
if order_type_is_saas: # 'storage_as_a_service' package
complex_type = 'SoftLayer_Container_Product_Order_'\
'Network_Storage_AsAService'
volume_storage_type = volume['storageType']['keyName']
if 'ENDURANCE' in volume_storage_type:
volume_is_performance = False
if tier is None:
tier = find_endurance_tier_iops_per_gb(volume)
prices = [
find_price_by_category(package, billing_item_category_code),
find_price_by_category(package, 'storage_' + volume_type),
find_saas_endurance_space_price(package, volume_size, tier),
find_saas_endurance_tier_price(package, tier),
find_saas_snapshot_space_price(
package, snapshot_size, tier=tier),
find_saas_replication_price(package, tier=tier)
]
elif 'PERFORMANCE' in volume_storage_type:
if not _staas_version_is_v2_or_above(volume):
raise exceptions.SoftLayerError(
"A replica volume cannot be ordered for this performance "
"volume since it does not support Encryption at Rest.")
volume_is_performance = True
iops = int(volume['provisionedIops'])
prices = [
find_price_by_category(package, billing_item_category_code),
find_price_by_category(package, 'storage_' + volume_type),
find_saas_perform_space_price(package, volume_size),
find_saas_perform_iops_price(package, volume_size, iops),
find_saas_snapshot_space_price(
package, snapshot_size, iops=iops),
find_saas_replication_price(package, iops=iops)
]
else:
raise exceptions.SoftLayerError(
"Storage volume does not have a valid storage type "
"(with an appropriate keyName to indicate the "
"volume is a PERFORMANCE or an ENDURANCE volume)")
else: # 'storage_service_enterprise' package
complex_type = 'SoftLayer_Container_Product_Order_'\
'Network_Storage_Enterprise'
volume_is_performance = False
if tier is None:
tier = find_endurance_tier_iops_per_gb(volume)
prices = [
find_price_by_category(package, billing_item_category_code),
find_price_by_category(package, 'storage_' + volume_type),
find_ent_space_price(package, 'endurance', volume_size, tier),
find_ent_endurance_tier_price(package, tier),
find_ent_space_price(package, 'snapshot', snapshot_size, tier),
find_ent_space_price(package, 'replication', volume_size, tier)
]
# Determine if hourly billing should be used
hourly_billing_flag = utils.lookup(volume, 'billingItem', 'hourlyFlag')
if hourly_billing_flag is None:
hourly_billing_flag = False
# Build and return the order object
replicant_order = {
'complexType': complex_type,
'packageId': package['id'],
'prices': prices,
'quantity': 1,
'location': location_id,
'originVolumeId': volume['id'],
'originVolumeScheduleId': snapshot_schedule_id,
'useHourlyPricing': hourly_billing_flag
}
if order_type_is_saas:
replicant_order['volumeSize'] = volume_size
if volume_is_performance:
replicant_order['iops'] = iops
return replicant_order | def function[prepare_replicant_order_object, parameter[manager, snapshot_schedule, location, tier, volume, volume_type]]:
constant[Prepare the order object which is submitted to the placeOrder() method
:param manager: The File or Block manager calling this function
:param snapshot_schedule: The primary volume's snapshot
schedule to use for replication
:param location: The location for the ordered replicant volume
:param tier: The tier (IOPS per GB) of the primary volume
:param volume: The primary volume as a SoftLayer_Network_Storage object
:param volume_type: The type of the primary volume ('file' or 'block')
:return: Returns the order object for the
Product_Order service's placeOrder() method
]
if <ast.BoolOp object at 0x7da18fe912d0> begin[:]
<ast.Raise object at 0x7da18fe93a90>
for taget[name[child]] in starred[call[call[name[volume]][constant[billingItem]]][constant[activeChildren]]] begin[:]
if <ast.BoolOp object at 0x7da18fe93550> begin[:]
<ast.Raise object at 0x7da18fe91690>
<ast.Try object at 0x7da18fe91360>
variable[volume_size] assign[=] call[name[int], parameter[call[name[volume]][constant[capacityGb]]]]
variable[billing_item_category_code] assign[=] call[call[name[volume]][constant[billingItem]]][constant[categoryCode]]
if compare[name[billing_item_category_code] equal[==] constant[storage_as_a_service]] begin[:]
variable[order_type_is_saas] assign[=] constant[True]
if compare[constant[snapshotCapacityGb] in name[volume]] begin[:]
variable[snapshot_size] assign[=] call[name[int], parameter[call[name[volume]][constant[snapshotCapacityGb]]]]
variable[snapshot_schedule_id] assign[=] call[name[find_snapshot_schedule_id], parameter[name[volume], binary_operation[constant[SNAPSHOT_] + name[snapshot_schedule]]]]
variable[package] assign[=] call[name[get_package], parameter[name[manager], name[billing_item_category_code]]]
if name[order_type_is_saas] begin[:]
variable[complex_type] assign[=] constant[SoftLayer_Container_Product_Order_Network_Storage_AsAService]
variable[volume_storage_type] assign[=] call[call[name[volume]][constant[storageType]]][constant[keyName]]
if compare[constant[ENDURANCE] in name[volume_storage_type]] begin[:]
variable[volume_is_performance] assign[=] constant[False]
if compare[name[tier] is constant[None]] begin[:]
variable[tier] assign[=] call[name[find_endurance_tier_iops_per_gb], parameter[name[volume]]]
variable[prices] assign[=] list[[<ast.Call object at 0x7da18fe91240>, <ast.Call object at 0x7da18fe916f0>, <ast.Call object at 0x7da18fe933a0>, <ast.Call object at 0x7da18fe930a0>, <ast.Call object at 0x7da18fe91060>, <ast.Call object at 0x7da18fe902b0>]]
variable[hourly_billing_flag] assign[=] call[name[utils].lookup, parameter[name[volume], constant[billingItem], constant[hourlyFlag]]]
if compare[name[hourly_billing_flag] is constant[None]] begin[:]
variable[hourly_billing_flag] assign[=] constant[False]
variable[replicant_order] assign[=] dictionary[[<ast.Constant object at 0x7da18f723550>, <ast.Constant object at 0x7da18f723e20>, <ast.Constant object at 0x7da18f723280>, <ast.Constant object at 0x7da18f7219c0>, <ast.Constant object at 0x7da18f720d30>, <ast.Constant object at 0x7da18f723f40>, <ast.Constant object at 0x7da18f721c00>, <ast.Constant object at 0x7da18f722860>], [<ast.Name object at 0x7da18f721540>, <ast.Subscript object at 0x7da18f7205b0>, <ast.Name object at 0x7da18f721690>, <ast.Constant object at 0x7da18f722560>, <ast.Name object at 0x7da18f720fa0>, <ast.Subscript object at 0x7da18f722410>, <ast.Name object at 0x7da18f7237f0>, <ast.Name object at 0x7da18f720190>]]
if name[order_type_is_saas] begin[:]
call[name[replicant_order]][constant[volumeSize]] assign[=] name[volume_size]
if name[volume_is_performance] begin[:]
call[name[replicant_order]][constant[iops]] assign[=] name[iops]
return[name[replicant_order]] | keyword[def] identifier[prepare_replicant_order_object] ( identifier[manager] , identifier[snapshot_schedule] , identifier[location] ,
identifier[tier] , identifier[volume] , identifier[volume_type] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[volume] keyword[or] identifier[volume] [ literal[string] ][ literal[string] ]!= literal[string] :
keyword[raise] identifier[exceptions] . identifier[SoftLayerError] (
literal[string]
literal[string] )
keyword[for] identifier[child] keyword[in] identifier[volume] [ literal[string] ][ literal[string] ]:
keyword[if] identifier[child] [ literal[string] ]== literal[string] keyword[and] identifier[child] [ literal[string] ]!= literal[string] :
keyword[raise] identifier[exceptions] . identifier[SoftLayerError] (
literal[string]
literal[string] )
keyword[try] :
identifier[location_id] = identifier[get_location_id] ( identifier[manager] , identifier[location] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[exceptions] . identifier[SoftLayerError] (
literal[string]
literal[string] )
identifier[volume_size] = identifier[int] ( identifier[volume] [ literal[string] ])
identifier[billing_item_category_code] = identifier[volume] [ literal[string] ][ literal[string] ]
keyword[if] identifier[billing_item_category_code] == literal[string] :
identifier[order_type_is_saas] = keyword[True]
keyword[elif] identifier[billing_item_category_code] == literal[string] :
identifier[order_type_is_saas] = keyword[False]
keyword[else] :
keyword[raise] identifier[exceptions] . identifier[SoftLayerError] (
literal[string]
literal[string] % identifier[billing_item_category_code] )
keyword[if] literal[string] keyword[in] identifier[volume] :
identifier[snapshot_size] = identifier[int] ( identifier[volume] [ literal[string] ])
keyword[else] :
keyword[raise] identifier[exceptions] . identifier[SoftLayerError] (
literal[string] )
identifier[snapshot_schedule_id] = identifier[find_snapshot_schedule_id] (
identifier[volume] ,
literal[string] + identifier[snapshot_schedule]
)
identifier[package] = identifier[get_package] ( identifier[manager] , identifier[billing_item_category_code] )
keyword[if] identifier[order_type_is_saas] :
identifier[complex_type] = literal[string] literal[string]
identifier[volume_storage_type] = identifier[volume] [ literal[string] ][ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[volume_storage_type] :
identifier[volume_is_performance] = keyword[False]
keyword[if] identifier[tier] keyword[is] keyword[None] :
identifier[tier] = identifier[find_endurance_tier_iops_per_gb] ( identifier[volume] )
identifier[prices] =[
identifier[find_price_by_category] ( identifier[package] , identifier[billing_item_category_code] ),
identifier[find_price_by_category] ( identifier[package] , literal[string] + identifier[volume_type] ),
identifier[find_saas_endurance_space_price] ( identifier[package] , identifier[volume_size] , identifier[tier] ),
identifier[find_saas_endurance_tier_price] ( identifier[package] , identifier[tier] ),
identifier[find_saas_snapshot_space_price] (
identifier[package] , identifier[snapshot_size] , identifier[tier] = identifier[tier] ),
identifier[find_saas_replication_price] ( identifier[package] , identifier[tier] = identifier[tier] )
]
keyword[elif] literal[string] keyword[in] identifier[volume_storage_type] :
keyword[if] keyword[not] identifier[_staas_version_is_v2_or_above] ( identifier[volume] ):
keyword[raise] identifier[exceptions] . identifier[SoftLayerError] (
literal[string]
literal[string] )
identifier[volume_is_performance] = keyword[True]
identifier[iops] = identifier[int] ( identifier[volume] [ literal[string] ])
identifier[prices] =[
identifier[find_price_by_category] ( identifier[package] , identifier[billing_item_category_code] ),
identifier[find_price_by_category] ( identifier[package] , literal[string] + identifier[volume_type] ),
identifier[find_saas_perform_space_price] ( identifier[package] , identifier[volume_size] ),
identifier[find_saas_perform_iops_price] ( identifier[package] , identifier[volume_size] , identifier[iops] ),
identifier[find_saas_snapshot_space_price] (
identifier[package] , identifier[snapshot_size] , identifier[iops] = identifier[iops] ),
identifier[find_saas_replication_price] ( identifier[package] , identifier[iops] = identifier[iops] )
]
keyword[else] :
keyword[raise] identifier[exceptions] . identifier[SoftLayerError] (
literal[string]
literal[string]
literal[string] )
keyword[else] :
identifier[complex_type] = literal[string] literal[string]
identifier[volume_is_performance] = keyword[False]
keyword[if] identifier[tier] keyword[is] keyword[None] :
identifier[tier] = identifier[find_endurance_tier_iops_per_gb] ( identifier[volume] )
identifier[prices] =[
identifier[find_price_by_category] ( identifier[package] , identifier[billing_item_category_code] ),
identifier[find_price_by_category] ( identifier[package] , literal[string] + identifier[volume_type] ),
identifier[find_ent_space_price] ( identifier[package] , literal[string] , identifier[volume_size] , identifier[tier] ),
identifier[find_ent_endurance_tier_price] ( identifier[package] , identifier[tier] ),
identifier[find_ent_space_price] ( identifier[package] , literal[string] , identifier[snapshot_size] , identifier[tier] ),
identifier[find_ent_space_price] ( identifier[package] , literal[string] , identifier[volume_size] , identifier[tier] )
]
identifier[hourly_billing_flag] = identifier[utils] . identifier[lookup] ( identifier[volume] , literal[string] , literal[string] )
keyword[if] identifier[hourly_billing_flag] keyword[is] keyword[None] :
identifier[hourly_billing_flag] = keyword[False]
identifier[replicant_order] ={
literal[string] : identifier[complex_type] ,
literal[string] : identifier[package] [ literal[string] ],
literal[string] : identifier[prices] ,
literal[string] : literal[int] ,
literal[string] : identifier[location_id] ,
literal[string] : identifier[volume] [ literal[string] ],
literal[string] : identifier[snapshot_schedule_id] ,
literal[string] : identifier[hourly_billing_flag]
}
keyword[if] identifier[order_type_is_saas] :
identifier[replicant_order] [ literal[string] ]= identifier[volume_size]
keyword[if] identifier[volume_is_performance] :
identifier[replicant_order] [ literal[string] ]= identifier[iops]
keyword[return] identifier[replicant_order] | def prepare_replicant_order_object(manager, snapshot_schedule, location, tier, volume, volume_type):
"""Prepare the order object which is submitted to the placeOrder() method
:param manager: The File or Block manager calling this function
:param snapshot_schedule: The primary volume's snapshot
schedule to use for replication
:param location: The location for the ordered replicant volume
:param tier: The tier (IOPS per GB) of the primary volume
:param volume: The primary volume as a SoftLayer_Network_Storage object
:param volume_type: The type of the primary volume ('file' or 'block')
:return: Returns the order object for the
Product_Order service's placeOrder() method
"""
# Ensure the primary volume and snapshot space are not set for cancellation
if 'billingItem' not in volume or volume['billingItem']['cancellationDate'] != '':
raise exceptions.SoftLayerError('This volume is set for cancellation; unable to order replicant volume') # depends on [control=['if'], data=[]]
for child in volume['billingItem']['activeChildren']:
if child['categoryCode'] == 'storage_snapshot_space' and child['cancellationDate'] != '':
raise exceptions.SoftLayerError('The snapshot space for this volume is set for cancellation; unable to order replicant volume') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['child']]
# Find the ID for the requested location
try:
location_id = get_location_id(manager, location) # depends on [control=['try'], data=[]]
except ValueError:
raise exceptions.SoftLayerError('Invalid datacenter name specified. Please provide the lower case short name (e.g.: dal09)') # depends on [control=['except'], data=[]]
# Get sizes and properties needed for the order
volume_size = int(volume['capacityGb'])
billing_item_category_code = volume['billingItem']['categoryCode']
if billing_item_category_code == 'storage_as_a_service':
order_type_is_saas = True # depends on [control=['if'], data=[]]
elif billing_item_category_code == 'storage_service_enterprise':
order_type_is_saas = False # depends on [control=['if'], data=[]]
else:
raise exceptions.SoftLayerError("A replicant volume cannot be ordered for a primary volume with a billing item category code of '%s'" % billing_item_category_code)
if 'snapshotCapacityGb' in volume:
snapshot_size = int(volume['snapshotCapacityGb']) # depends on [control=['if'], data=['volume']]
else:
raise exceptions.SoftLayerError('Snapshot capacity not found for the given primary volume')
snapshot_schedule_id = find_snapshot_schedule_id(volume, 'SNAPSHOT_' + snapshot_schedule)
# Use the volume's billing item category code to get the product package
package = get_package(manager, billing_item_category_code)
# Find prices based on the primary volume's type and billing item category
if order_type_is_saas: # 'storage_as_a_service' package
complex_type = 'SoftLayer_Container_Product_Order_Network_Storage_AsAService'
volume_storage_type = volume['storageType']['keyName']
if 'ENDURANCE' in volume_storage_type:
volume_is_performance = False
if tier is None:
tier = find_endurance_tier_iops_per_gb(volume) # depends on [control=['if'], data=['tier']]
prices = [find_price_by_category(package, billing_item_category_code), find_price_by_category(package, 'storage_' + volume_type), find_saas_endurance_space_price(package, volume_size, tier), find_saas_endurance_tier_price(package, tier), find_saas_snapshot_space_price(package, snapshot_size, tier=tier), find_saas_replication_price(package, tier=tier)] # depends on [control=['if'], data=[]]
elif 'PERFORMANCE' in volume_storage_type:
if not _staas_version_is_v2_or_above(volume):
raise exceptions.SoftLayerError('A replica volume cannot be ordered for this performance volume since it does not support Encryption at Rest.') # depends on [control=['if'], data=[]]
volume_is_performance = True
iops = int(volume['provisionedIops'])
prices = [find_price_by_category(package, billing_item_category_code), find_price_by_category(package, 'storage_' + volume_type), find_saas_perform_space_price(package, volume_size), find_saas_perform_iops_price(package, volume_size, iops), find_saas_snapshot_space_price(package, snapshot_size, iops=iops), find_saas_replication_price(package, iops=iops)] # depends on [control=['if'], data=[]]
else:
raise exceptions.SoftLayerError('Storage volume does not have a valid storage type (with an appropriate keyName to indicate the volume is a PERFORMANCE or an ENDURANCE volume)') # depends on [control=['if'], data=[]]
else: # 'storage_service_enterprise' package
complex_type = 'SoftLayer_Container_Product_Order_Network_Storage_Enterprise'
volume_is_performance = False
if tier is None:
tier = find_endurance_tier_iops_per_gb(volume) # depends on [control=['if'], data=['tier']]
prices = [find_price_by_category(package, billing_item_category_code), find_price_by_category(package, 'storage_' + volume_type), find_ent_space_price(package, 'endurance', volume_size, tier), find_ent_endurance_tier_price(package, tier), find_ent_space_price(package, 'snapshot', snapshot_size, tier), find_ent_space_price(package, 'replication', volume_size, tier)]
# Determine if hourly billing should be used
hourly_billing_flag = utils.lookup(volume, 'billingItem', 'hourlyFlag')
if hourly_billing_flag is None:
hourly_billing_flag = False # depends on [control=['if'], data=['hourly_billing_flag']]
# Build and return the order object
replicant_order = {'complexType': complex_type, 'packageId': package['id'], 'prices': prices, 'quantity': 1, 'location': location_id, 'originVolumeId': volume['id'], 'originVolumeScheduleId': snapshot_schedule_id, 'useHourlyPricing': hourly_billing_flag}
if order_type_is_saas:
replicant_order['volumeSize'] = volume_size
if volume_is_performance:
replicant_order['iops'] = iops # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return replicant_order |
def ahrs_encode(self, omegaIx, omegaIy, omegaIz, accel_weight, renorm_val, error_rp, error_yaw):
'''
Status of DCM attitude estimator
omegaIx : X gyro drift estimate rad/s (float)
omegaIy : Y gyro drift estimate rad/s (float)
omegaIz : Z gyro drift estimate rad/s (float)
accel_weight : average accel_weight (float)
renorm_val : average renormalisation value (float)
error_rp : average error_roll_pitch value (float)
error_yaw : average error_yaw value (float)
'''
return MAVLink_ahrs_message(omegaIx, omegaIy, omegaIz, accel_weight, renorm_val, error_rp, error_yaw) | def function[ahrs_encode, parameter[self, omegaIx, omegaIy, omegaIz, accel_weight, renorm_val, error_rp, error_yaw]]:
constant[
Status of DCM attitude estimator
omegaIx : X gyro drift estimate rad/s (float)
omegaIy : Y gyro drift estimate rad/s (float)
omegaIz : Z gyro drift estimate rad/s (float)
accel_weight : average accel_weight (float)
renorm_val : average renormalisation value (float)
error_rp : average error_roll_pitch value (float)
error_yaw : average error_yaw value (float)
]
return[call[name[MAVLink_ahrs_message], parameter[name[omegaIx], name[omegaIy], name[omegaIz], name[accel_weight], name[renorm_val], name[error_rp], name[error_yaw]]]] | keyword[def] identifier[ahrs_encode] ( identifier[self] , identifier[omegaIx] , identifier[omegaIy] , identifier[omegaIz] , identifier[accel_weight] , identifier[renorm_val] , identifier[error_rp] , identifier[error_yaw] ):
literal[string]
keyword[return] identifier[MAVLink_ahrs_message] ( identifier[omegaIx] , identifier[omegaIy] , identifier[omegaIz] , identifier[accel_weight] , identifier[renorm_val] , identifier[error_rp] , identifier[error_yaw] ) | def ahrs_encode(self, omegaIx, omegaIy, omegaIz, accel_weight, renorm_val, error_rp, error_yaw):
"""
Status of DCM attitude estimator
omegaIx : X gyro drift estimate rad/s (float)
omegaIy : Y gyro drift estimate rad/s (float)
omegaIz : Z gyro drift estimate rad/s (float)
accel_weight : average accel_weight (float)
renorm_val : average renormalisation value (float)
error_rp : average error_roll_pitch value (float)
error_yaw : average error_yaw value (float)
"""
return MAVLink_ahrs_message(omegaIx, omegaIy, omegaIz, accel_weight, renorm_val, error_rp, error_yaw) |
def filename_match(fsearch, filename, width, height):
'''
<nickname>@({width}x{height}|auto).(png|jpg|jpeg)
'''
fsearch = clean_path(fsearch)
filename = clean_path(filename)
if fsearch == filename:
return True
if fsearch.find('@') == -1:
return False
basename, fileext = os.path.splitext(fsearch)
nickname, extinfo = basename.split('@', 1)
if extinfo == 'auto':
valid_names = {}.fromkeys([
nickname+'@{}x{}'.format(width, height)+fileext,
nickname+'@{}x{}'.format(height, width)+fileext,
nickname+'.{}x{}'.format(width, height)+fileext,
nickname+'.{}x{}'.format(height, width)+fileext,
])
if filename in valid_names:
return True
# if extinfo.find('x') != -1:
# cw, ch = extinfo.split('x', 1)
# if cw*width == ch*height or cw*height == ch*width:
# return True
return False | def function[filename_match, parameter[fsearch, filename, width, height]]:
constant[
<nickname>@({width}x{height}|auto).(png|jpg|jpeg)
]
variable[fsearch] assign[=] call[name[clean_path], parameter[name[fsearch]]]
variable[filename] assign[=] call[name[clean_path], parameter[name[filename]]]
if compare[name[fsearch] equal[==] name[filename]] begin[:]
return[constant[True]]
if compare[call[name[fsearch].find, parameter[constant[@]]] equal[==] <ast.UnaryOp object at 0x7da18dc9ad10>] begin[:]
return[constant[False]]
<ast.Tuple object at 0x7da18dc9b010> assign[=] call[name[os].path.splitext, parameter[name[fsearch]]]
<ast.Tuple object at 0x7da18dc98430> assign[=] call[name[basename].split, parameter[constant[@], constant[1]]]
if compare[name[extinfo] equal[==] constant[auto]] begin[:]
variable[valid_names] assign[=] call[dictionary[[], []].fromkeys, parameter[list[[<ast.BinOp object at 0x7da18dc9b5b0>, <ast.BinOp object at 0x7da18dc9a4d0>, <ast.BinOp object at 0x7da18dc9bfd0>, <ast.BinOp object at 0x7da18dc9bdc0>]]]]
if compare[name[filename] in name[valid_names]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[filename_match] ( identifier[fsearch] , identifier[filename] , identifier[width] , identifier[height] ):
literal[string]
identifier[fsearch] = identifier[clean_path] ( identifier[fsearch] )
identifier[filename] = identifier[clean_path] ( identifier[filename] )
keyword[if] identifier[fsearch] == identifier[filename] :
keyword[return] keyword[True]
keyword[if] identifier[fsearch] . identifier[find] ( literal[string] )==- literal[int] :
keyword[return] keyword[False]
identifier[basename] , identifier[fileext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[fsearch] )
identifier[nickname] , identifier[extinfo] = identifier[basename] . identifier[split] ( literal[string] , literal[int] )
keyword[if] identifier[extinfo] == literal[string] :
identifier[valid_names] ={}. identifier[fromkeys] ([
identifier[nickname] + literal[string] . identifier[format] ( identifier[width] , identifier[height] )+ identifier[fileext] ,
identifier[nickname] + literal[string] . identifier[format] ( identifier[height] , identifier[width] )+ identifier[fileext] ,
identifier[nickname] + literal[string] . identifier[format] ( identifier[width] , identifier[height] )+ identifier[fileext] ,
identifier[nickname] + literal[string] . identifier[format] ( identifier[height] , identifier[width] )+ identifier[fileext] ,
])
keyword[if] identifier[filename] keyword[in] identifier[valid_names] :
keyword[return] keyword[True]
keyword[return] keyword[False] | def filename_match(fsearch, filename, width, height):
"""
<nickname>@({width}x{height}|auto).(png|jpg|jpeg)
"""
fsearch = clean_path(fsearch)
filename = clean_path(filename)
if fsearch == filename:
return True # depends on [control=['if'], data=[]]
if fsearch.find('@') == -1:
return False # depends on [control=['if'], data=[]]
(basename, fileext) = os.path.splitext(fsearch)
(nickname, extinfo) = basename.split('@', 1)
if extinfo == 'auto':
valid_names = {}.fromkeys([nickname + '@{}x{}'.format(width, height) + fileext, nickname + '@{}x{}'.format(height, width) + fileext, nickname + '.{}x{}'.format(width, height) + fileext, nickname + '.{}x{}'.format(height, width) + fileext])
if filename in valid_names:
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# if extinfo.find('x') != -1:
# cw, ch = extinfo.split('x', 1)
# if cw*width == ch*height or cw*height == ch*width:
# return True
return False |
def up_to_date(self):
"""Check if Team Password Manager is up to date."""
VersionInfo = self.get_latest_version()
CurrentVersion = VersionInfo.get('version')
LatestVersion = VersionInfo.get('latest_version')
if CurrentVersion == LatestVersion:
log.info('TeamPasswordManager is up-to-date!')
log.debug('Current Version: {} Latest Version: {}'.format(LatestVersion, LatestVersion))
return True
else:
log.warning('TeamPasswordManager is not up-to-date!')
log.debug('Current Version: {} Latest Version: {}'.format(LatestVersion, LatestVersion))
return False | def function[up_to_date, parameter[self]]:
constant[Check if Team Password Manager is up to date.]
variable[VersionInfo] assign[=] call[name[self].get_latest_version, parameter[]]
variable[CurrentVersion] assign[=] call[name[VersionInfo].get, parameter[constant[version]]]
variable[LatestVersion] assign[=] call[name[VersionInfo].get, parameter[constant[latest_version]]]
if compare[name[CurrentVersion] equal[==] name[LatestVersion]] begin[:]
call[name[log].info, parameter[constant[TeamPasswordManager is up-to-date!]]]
call[name[log].debug, parameter[call[constant[Current Version: {} Latest Version: {}].format, parameter[name[LatestVersion], name[LatestVersion]]]]]
return[constant[True]] | keyword[def] identifier[up_to_date] ( identifier[self] ):
literal[string]
identifier[VersionInfo] = identifier[self] . identifier[get_latest_version] ()
identifier[CurrentVersion] = identifier[VersionInfo] . identifier[get] ( literal[string] )
identifier[LatestVersion] = identifier[VersionInfo] . identifier[get] ( literal[string] )
keyword[if] identifier[CurrentVersion] == identifier[LatestVersion] :
identifier[log] . identifier[info] ( literal[string] )
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[LatestVersion] , identifier[LatestVersion] ))
keyword[return] keyword[True]
keyword[else] :
identifier[log] . identifier[warning] ( literal[string] )
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[LatestVersion] , identifier[LatestVersion] ))
keyword[return] keyword[False] | def up_to_date(self):
"""Check if Team Password Manager is up to date."""
VersionInfo = self.get_latest_version()
CurrentVersion = VersionInfo.get('version')
LatestVersion = VersionInfo.get('latest_version')
if CurrentVersion == LatestVersion:
log.info('TeamPasswordManager is up-to-date!')
log.debug('Current Version: {} Latest Version: {}'.format(LatestVersion, LatestVersion))
return True # depends on [control=['if'], data=['LatestVersion']]
else:
log.warning('TeamPasswordManager is not up-to-date!')
log.debug('Current Version: {} Latest Version: {}'.format(LatestVersion, LatestVersion))
return False |
def write_table(page, headers, data, cl=''):
"""
Write table in html
"""
page.table(class_=cl)
# list
if cl=='list':
for i in range(len(headers)):
page.tr()
page.th()
page.add('%s' % headers[i])
page.th.close()
page.td()
page.add('%s' % data[i])
page.td.close()
page.tr.close()
else:
page.tr()
for n in headers:
page.th()
page.add('%s' % n)
page.th.close()
page.tr.close()
if data and not re.search('list',str(type(data[0]))):
data = [data]
for row in data:
page.tr()
for item in row:
page.td()
page.add('%s' % item)
page.td.close()
page.tr.close()
page.table.close()
return page | def function[write_table, parameter[page, headers, data, cl]]:
constant[
Write table in html
]
call[name[page].table, parameter[]]
if compare[name[cl] equal[==] constant[list]] begin[:]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[headers]]]]]] begin[:]
call[name[page].tr, parameter[]]
call[name[page].th, parameter[]]
call[name[page].add, parameter[binary_operation[constant[%s] <ast.Mod object at 0x7da2590d6920> call[name[headers]][name[i]]]]]
call[name[page].th.close, parameter[]]
call[name[page].td, parameter[]]
call[name[page].add, parameter[binary_operation[constant[%s] <ast.Mod object at 0x7da2590d6920> call[name[data]][name[i]]]]]
call[name[page].td.close, parameter[]]
call[name[page].tr.close, parameter[]]
call[name[page].table.close, parameter[]]
return[name[page]] | keyword[def] identifier[write_table] ( identifier[page] , identifier[headers] , identifier[data] , identifier[cl] = literal[string] ):
literal[string]
identifier[page] . identifier[table] ( identifier[class_] = identifier[cl] )
keyword[if] identifier[cl] == literal[string] :
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[headers] )):
identifier[page] . identifier[tr] ()
identifier[page] . identifier[th] ()
identifier[page] . identifier[add] ( literal[string] % identifier[headers] [ identifier[i] ])
identifier[page] . identifier[th] . identifier[close] ()
identifier[page] . identifier[td] ()
identifier[page] . identifier[add] ( literal[string] % identifier[data] [ identifier[i] ])
identifier[page] . identifier[td] . identifier[close] ()
identifier[page] . identifier[tr] . identifier[close] ()
keyword[else] :
identifier[page] . identifier[tr] ()
keyword[for] identifier[n] keyword[in] identifier[headers] :
identifier[page] . identifier[th] ()
identifier[page] . identifier[add] ( literal[string] % identifier[n] )
identifier[page] . identifier[th] . identifier[close] ()
identifier[page] . identifier[tr] . identifier[close] ()
keyword[if] identifier[data] keyword[and] keyword[not] identifier[re] . identifier[search] ( literal[string] , identifier[str] ( identifier[type] ( identifier[data] [ literal[int] ]))):
identifier[data] =[ identifier[data] ]
keyword[for] identifier[row] keyword[in] identifier[data] :
identifier[page] . identifier[tr] ()
keyword[for] identifier[item] keyword[in] identifier[row] :
identifier[page] . identifier[td] ()
identifier[page] . identifier[add] ( literal[string] % identifier[item] )
identifier[page] . identifier[td] . identifier[close] ()
identifier[page] . identifier[tr] . identifier[close] ()
identifier[page] . identifier[table] . identifier[close] ()
keyword[return] identifier[page] | def write_table(page, headers, data, cl=''):
"""
Write table in html
"""
page.table(class_=cl)
# list
if cl == 'list':
for i in range(len(headers)):
page.tr()
page.th()
page.add('%s' % headers[i])
page.th.close()
page.td()
page.add('%s' % data[i])
page.td.close()
page.tr.close() # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
else:
page.tr()
for n in headers:
page.th()
page.add('%s' % n)
page.th.close() # depends on [control=['for'], data=['n']]
page.tr.close()
if data and (not re.search('list', str(type(data[0])))):
data = [data] # depends on [control=['if'], data=[]]
for row in data:
page.tr()
for item in row:
page.td()
page.add('%s' % item)
page.td.close() # depends on [control=['for'], data=['item']]
page.tr.close() # depends on [control=['for'], data=['row']]
page.table.close()
return page |
def qos_red_profile_drop_probability(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
qos = ET.SubElement(config, "qos", xmlns="urn:brocade.com:mgmt:brocade-qos")
red_profile = ET.SubElement(qos, "red-profile")
profile_id_key = ET.SubElement(red_profile, "profile-id")
profile_id_key.text = kwargs.pop('profile_id')
drop_probability = ET.SubElement(red_profile, "drop-probability")
drop_probability.text = kwargs.pop('drop_probability')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[qos_red_profile_drop_probability, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[qos] assign[=] call[name[ET].SubElement, parameter[name[config], constant[qos]]]
variable[red_profile] assign[=] call[name[ET].SubElement, parameter[name[qos], constant[red-profile]]]
variable[profile_id_key] assign[=] call[name[ET].SubElement, parameter[name[red_profile], constant[profile-id]]]
name[profile_id_key].text assign[=] call[name[kwargs].pop, parameter[constant[profile_id]]]
variable[drop_probability] assign[=] call[name[ET].SubElement, parameter[name[red_profile], constant[drop-probability]]]
name[drop_probability].text assign[=] call[name[kwargs].pop, parameter[constant[drop_probability]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[qos_red_profile_drop_probability] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[qos] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[red_profile] = identifier[ET] . identifier[SubElement] ( identifier[qos] , literal[string] )
identifier[profile_id_key] = identifier[ET] . identifier[SubElement] ( identifier[red_profile] , literal[string] )
identifier[profile_id_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[drop_probability] = identifier[ET] . identifier[SubElement] ( identifier[red_profile] , literal[string] )
identifier[drop_probability] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def qos_red_profile_drop_probability(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
qos = ET.SubElement(config, 'qos', xmlns='urn:brocade.com:mgmt:brocade-qos')
red_profile = ET.SubElement(qos, 'red-profile')
profile_id_key = ET.SubElement(red_profile, 'profile-id')
profile_id_key.text = kwargs.pop('profile_id')
drop_probability = ET.SubElement(red_profile, 'drop-probability')
drop_probability.text = kwargs.pop('drop_probability')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def answers(self):
"""获取话题下所有答案(按时间降序排列)
:return: 话题下所有答案,返回生成器
:rtype: Answer.Iterable
"""
from .question import Question
from .answer import Answer
from .author import Author, ANONYMOUS
newest_url = Topic_Newest_Url.format(self.id)
params = {'start': 0, '_xsrf': self.xsrf}
res = self._session.get(newest_url)
soup = BeautifulSoup(res.content)
while True:
divs = soup.find_all('div', class_='folding')
# 如果话题下无答案,则直接返回
if len(divs) == 0:
return
last_score = divs[-1]['data-score']
for div in divs:
q = div.find('a', class_="question_link")
question_url = Zhihu_URL + q['href']
question_title = q.text.strip()
question = Question(question_url, question_title,
session=self._session)
ans = div.find('a', class_='answer-date-link')
answer_url = Zhihu_URL + ans['href']
upvote = div.find('a', class_='zm-item-vote-count').text
if upvote.isdigit():
upvote = int(upvote)
else:
upvote = None
au = div.find('div', class_='zm-item-answer-author-info')
if au.a is None:
author = ANONYMOUS
else:
author_url = Zhihu_URL + au.a['href']
author_name = au.a.text
author_motto = au.strong['title'] if au.strong else ''
author = Author(author_url, author_name, author_motto,
session=self._session)
yield Answer(answer_url, question, author, upvote,
session=self._session)
params['offset'] = last_score
res = self._session.post(newest_url, data=params)
gotten_feed_num = res.json()['msg'][0]
# 如果得到内容数量为0则返回
if gotten_feed_num == 0:
return
soup = BeautifulSoup(res.json()['msg'][1]) | def function[answers, parameter[self]]:
constant[获取话题下所有答案(按时间降序排列)
:return: 话题下所有答案,返回生成器
:rtype: Answer.Iterable
]
from relative_module[question] import module[Question]
from relative_module[answer] import module[Answer]
from relative_module[author] import module[Author], module[ANONYMOUS]
variable[newest_url] assign[=] call[name[Topic_Newest_Url].format, parameter[name[self].id]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da20cabed70>, <ast.Constant object at 0x7da20cabf280>], [<ast.Constant object at 0x7da20cabecb0>, <ast.Attribute object at 0x7da20cabd690>]]
variable[res] assign[=] call[name[self]._session.get, parameter[name[newest_url]]]
variable[soup] assign[=] call[name[BeautifulSoup], parameter[name[res].content]]
while constant[True] begin[:]
variable[divs] assign[=] call[name[soup].find_all, parameter[constant[div]]]
if compare[call[name[len], parameter[name[divs]]] equal[==] constant[0]] begin[:]
return[None]
variable[last_score] assign[=] call[call[name[divs]][<ast.UnaryOp object at 0x7da20e9b25c0>]][constant[data-score]]
for taget[name[div]] in starred[name[divs]] begin[:]
variable[q] assign[=] call[name[div].find, parameter[constant[a]]]
variable[question_url] assign[=] binary_operation[name[Zhihu_URL] + call[name[q]][constant[href]]]
variable[question_title] assign[=] call[name[q].text.strip, parameter[]]
variable[question] assign[=] call[name[Question], parameter[name[question_url], name[question_title]]]
variable[ans] assign[=] call[name[div].find, parameter[constant[a]]]
variable[answer_url] assign[=] binary_operation[name[Zhihu_URL] + call[name[ans]][constant[href]]]
variable[upvote] assign[=] call[name[div].find, parameter[constant[a]]].text
if call[name[upvote].isdigit, parameter[]] begin[:]
variable[upvote] assign[=] call[name[int], parameter[name[upvote]]]
variable[au] assign[=] call[name[div].find, parameter[constant[div]]]
if compare[name[au].a is constant[None]] begin[:]
variable[author] assign[=] name[ANONYMOUS]
<ast.Yield object at 0x7da204960c70>
call[name[params]][constant[offset]] assign[=] name[last_score]
variable[res] assign[=] call[name[self]._session.post, parameter[name[newest_url]]]
variable[gotten_feed_num] assign[=] call[call[call[name[res].json, parameter[]]][constant[msg]]][constant[0]]
if compare[name[gotten_feed_num] equal[==] constant[0]] begin[:]
return[None]
variable[soup] assign[=] call[name[BeautifulSoup], parameter[call[call[call[name[res].json, parameter[]]][constant[msg]]][constant[1]]]] | keyword[def] identifier[answers] ( identifier[self] ):
literal[string]
keyword[from] . identifier[question] keyword[import] identifier[Question]
keyword[from] . identifier[answer] keyword[import] identifier[Answer]
keyword[from] . identifier[author] keyword[import] identifier[Author] , identifier[ANONYMOUS]
identifier[newest_url] = identifier[Topic_Newest_Url] . identifier[format] ( identifier[self] . identifier[id] )
identifier[params] ={ literal[string] : literal[int] , literal[string] : identifier[self] . identifier[xsrf] }
identifier[res] = identifier[self] . identifier[_session] . identifier[get] ( identifier[newest_url] )
identifier[soup] = identifier[BeautifulSoup] ( identifier[res] . identifier[content] )
keyword[while] keyword[True] :
identifier[divs] = identifier[soup] . identifier[find_all] ( literal[string] , identifier[class_] = literal[string] )
keyword[if] identifier[len] ( identifier[divs] )== literal[int] :
keyword[return]
identifier[last_score] = identifier[divs] [- literal[int] ][ literal[string] ]
keyword[for] identifier[div] keyword[in] identifier[divs] :
identifier[q] = identifier[div] . identifier[find] ( literal[string] , identifier[class_] = literal[string] )
identifier[question_url] = identifier[Zhihu_URL] + identifier[q] [ literal[string] ]
identifier[question_title] = identifier[q] . identifier[text] . identifier[strip] ()
identifier[question] = identifier[Question] ( identifier[question_url] , identifier[question_title] ,
identifier[session] = identifier[self] . identifier[_session] )
identifier[ans] = identifier[div] . identifier[find] ( literal[string] , identifier[class_] = literal[string] )
identifier[answer_url] = identifier[Zhihu_URL] + identifier[ans] [ literal[string] ]
identifier[upvote] = identifier[div] . identifier[find] ( literal[string] , identifier[class_] = literal[string] ). identifier[text]
keyword[if] identifier[upvote] . identifier[isdigit] ():
identifier[upvote] = identifier[int] ( identifier[upvote] )
keyword[else] :
identifier[upvote] = keyword[None]
identifier[au] = identifier[div] . identifier[find] ( literal[string] , identifier[class_] = literal[string] )
keyword[if] identifier[au] . identifier[a] keyword[is] keyword[None] :
identifier[author] = identifier[ANONYMOUS]
keyword[else] :
identifier[author_url] = identifier[Zhihu_URL] + identifier[au] . identifier[a] [ literal[string] ]
identifier[author_name] = identifier[au] . identifier[a] . identifier[text]
identifier[author_motto] = identifier[au] . identifier[strong] [ literal[string] ] keyword[if] identifier[au] . identifier[strong] keyword[else] literal[string]
identifier[author] = identifier[Author] ( identifier[author_url] , identifier[author_name] , identifier[author_motto] ,
identifier[session] = identifier[self] . identifier[_session] )
keyword[yield] identifier[Answer] ( identifier[answer_url] , identifier[question] , identifier[author] , identifier[upvote] ,
identifier[session] = identifier[self] . identifier[_session] )
identifier[params] [ literal[string] ]= identifier[last_score]
identifier[res] = identifier[self] . identifier[_session] . identifier[post] ( identifier[newest_url] , identifier[data] = identifier[params] )
identifier[gotten_feed_num] = identifier[res] . identifier[json] ()[ literal[string] ][ literal[int] ]
keyword[if] identifier[gotten_feed_num] == literal[int] :
keyword[return]
identifier[soup] = identifier[BeautifulSoup] ( identifier[res] . identifier[json] ()[ literal[string] ][ literal[int] ]) | def answers(self):
"""获取话题下所有答案(按时间降序排列)
:return: 话题下所有答案,返回生成器
:rtype: Answer.Iterable
"""
from .question import Question
from .answer import Answer
from .author import Author, ANONYMOUS
newest_url = Topic_Newest_Url.format(self.id)
params = {'start': 0, '_xsrf': self.xsrf}
res = self._session.get(newest_url)
soup = BeautifulSoup(res.content)
while True:
divs = soup.find_all('div', class_='folding')
# 如果话题下无答案,则直接返回
if len(divs) == 0:
return # depends on [control=['if'], data=[]]
last_score = divs[-1]['data-score']
for div in divs:
q = div.find('a', class_='question_link')
question_url = Zhihu_URL + q['href']
question_title = q.text.strip()
question = Question(question_url, question_title, session=self._session)
ans = div.find('a', class_='answer-date-link')
answer_url = Zhihu_URL + ans['href']
upvote = div.find('a', class_='zm-item-vote-count').text
if upvote.isdigit():
upvote = int(upvote) # depends on [control=['if'], data=[]]
else:
upvote = None
au = div.find('div', class_='zm-item-answer-author-info')
if au.a is None:
author = ANONYMOUS # depends on [control=['if'], data=[]]
else:
author_url = Zhihu_URL + au.a['href']
author_name = au.a.text
author_motto = au.strong['title'] if au.strong else ''
author = Author(author_url, author_name, author_motto, session=self._session)
yield Answer(answer_url, question, author, upvote, session=self._session) # depends on [control=['for'], data=['div']]
params['offset'] = last_score
res = self._session.post(newest_url, data=params)
gotten_feed_num = res.json()['msg'][0]
# 如果得到内容数量为0则返回
if gotten_feed_num == 0:
return # depends on [control=['if'], data=[]]
soup = BeautifulSoup(res.json()['msg'][1]) # depends on [control=['while'], data=[]] |
def parse_dimension(self, node):
"""
Parses <Dimension>
@param node: Node containing the <Dimension> element
@type node: xml.etree.Element
@raise ParseError: When the name is not a string or if the
dimension is not a signed integer.
"""
try:
name = node.lattrib['name']
except:
self.raise_error('<Dimension> must specify a name')
description = node.lattrib.get('description', '')
dim = dict()
for d in ['l', 'm', 't', 'i', 'k', 'c', 'n']:
dim[d] = int(node.lattrib.get(d, 0))
self.model.add_dimension(Dimension(name, description, **dim)) | def function[parse_dimension, parameter[self, node]]:
constant[
Parses <Dimension>
@param node: Node containing the <Dimension> element
@type node: xml.etree.Element
@raise ParseError: When the name is not a string or if the
dimension is not a signed integer.
]
<ast.Try object at 0x7da1b2382980>
variable[description] assign[=] call[name[node].lattrib.get, parameter[constant[description], constant[]]]
variable[dim] assign[=] call[name[dict], parameter[]]
for taget[name[d]] in starred[list[[<ast.Constant object at 0x7da1b2504be0>, <ast.Constant object at 0x7da1b2507280>, <ast.Constant object at 0x7da1b2505060>, <ast.Constant object at 0x7da1b25046a0>, <ast.Constant object at 0x7da1b2507f10>, <ast.Constant object at 0x7da1b25064a0>, <ast.Constant object at 0x7da1b2505d80>]]] begin[:]
call[name[dim]][name[d]] assign[=] call[name[int], parameter[call[name[node].lattrib.get, parameter[name[d], constant[0]]]]]
call[name[self].model.add_dimension, parameter[call[name[Dimension], parameter[name[name], name[description]]]]] | keyword[def] identifier[parse_dimension] ( identifier[self] , identifier[node] ):
literal[string]
keyword[try] :
identifier[name] = identifier[node] . identifier[lattrib] [ literal[string] ]
keyword[except] :
identifier[self] . identifier[raise_error] ( literal[string] )
identifier[description] = identifier[node] . identifier[lattrib] . identifier[get] ( literal[string] , literal[string] )
identifier[dim] = identifier[dict] ()
keyword[for] identifier[d] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]:
identifier[dim] [ identifier[d] ]= identifier[int] ( identifier[node] . identifier[lattrib] . identifier[get] ( identifier[d] , literal[int] ))
identifier[self] . identifier[model] . identifier[add_dimension] ( identifier[Dimension] ( identifier[name] , identifier[description] ,** identifier[dim] )) | def parse_dimension(self, node):
"""
Parses <Dimension>
@param node: Node containing the <Dimension> element
@type node: xml.etree.Element
@raise ParseError: When the name is not a string or if the
dimension is not a signed integer.
"""
try:
name = node.lattrib['name'] # depends on [control=['try'], data=[]]
except:
self.raise_error('<Dimension> must specify a name') # depends on [control=['except'], data=[]]
description = node.lattrib.get('description', '')
dim = dict()
for d in ['l', 'm', 't', 'i', 'k', 'c', 'n']:
dim[d] = int(node.lattrib.get(d, 0)) # depends on [control=['for'], data=['d']]
self.model.add_dimension(Dimension(name, description, **dim)) |
def _get_setup(self, result):
"""Internal method which process the results from the server."""
self.__devices = {}
if ('setup' not in result.keys() or
'devices' not in result['setup'].keys()):
raise Exception(
"Did not find device definition.")
for device_data in result['setup']['devices']:
device = Device(self, device_data)
self.__devices[device.url] = device
self.__location = result['setup']['location']
self.__gateway = result['setup']['gateways'] | def function[_get_setup, parameter[self, result]]:
constant[Internal method which process the results from the server.]
name[self].__devices assign[=] dictionary[[], []]
if <ast.BoolOp object at 0x7da1b0c14430> begin[:]
<ast.Raise object at 0x7da1b0c150c0>
for taget[name[device_data]] in starred[call[call[name[result]][constant[setup]]][constant[devices]]] begin[:]
variable[device] assign[=] call[name[Device], parameter[name[self], name[device_data]]]
call[name[self].__devices][name[device].url] assign[=] name[device]
name[self].__location assign[=] call[call[name[result]][constant[setup]]][constant[location]]
name[self].__gateway assign[=] call[call[name[result]][constant[setup]]][constant[gateways]] | keyword[def] identifier[_get_setup] ( identifier[self] , identifier[result] ):
literal[string]
identifier[self] . identifier[__devices] ={}
keyword[if] ( literal[string] keyword[not] keyword[in] identifier[result] . identifier[keys] () keyword[or]
literal[string] keyword[not] keyword[in] identifier[result] [ literal[string] ]. identifier[keys] ()):
keyword[raise] identifier[Exception] (
literal[string] )
keyword[for] identifier[device_data] keyword[in] identifier[result] [ literal[string] ][ literal[string] ]:
identifier[device] = identifier[Device] ( identifier[self] , identifier[device_data] )
identifier[self] . identifier[__devices] [ identifier[device] . identifier[url] ]= identifier[device]
identifier[self] . identifier[__location] = identifier[result] [ literal[string] ][ literal[string] ]
identifier[self] . identifier[__gateway] = identifier[result] [ literal[string] ][ literal[string] ] | def _get_setup(self, result):
"""Internal method which process the results from the server."""
self.__devices = {}
if 'setup' not in result.keys() or 'devices' not in result['setup'].keys():
raise Exception('Did not find device definition.') # depends on [control=['if'], data=[]]
for device_data in result['setup']['devices']:
device = Device(self, device_data)
self.__devices[device.url] = device # depends on [control=['for'], data=['device_data']]
self.__location = result['setup']['location']
self.__gateway = result['setup']['gateways'] |
def get_object(self, view_kwargs, qs=None):
"""Retrieve an object through sqlalchemy
:params dict view_kwargs: kwargs from the resource view
:return DeclarativeMeta: an object from sqlalchemy
"""
self.before_get_object(view_kwargs)
id_field = getattr(self, 'id_field', inspect(self.model).primary_key[0].key)
try:
filter_field = getattr(self.model, id_field)
except Exception:
raise Exception("{} has no attribute {}".format(self.model.__name__, id_field))
url_field = getattr(self, 'url_field', 'id')
filter_value = view_kwargs[url_field]
query = self.retrieve_object_query(view_kwargs, filter_field, filter_value)
if qs is not None:
query = self.eagerload_includes(query, qs)
try:
obj = query.one()
except NoResultFound:
obj = None
self.after_get_object(obj, view_kwargs)
return obj | def function[get_object, parameter[self, view_kwargs, qs]]:
constant[Retrieve an object through sqlalchemy
:params dict view_kwargs: kwargs from the resource view
:return DeclarativeMeta: an object from sqlalchemy
]
call[name[self].before_get_object, parameter[name[view_kwargs]]]
variable[id_field] assign[=] call[name[getattr], parameter[name[self], constant[id_field], call[call[name[inspect], parameter[name[self].model]].primary_key][constant[0]].key]]
<ast.Try object at 0x7da1b1601780>
variable[url_field] assign[=] call[name[getattr], parameter[name[self], constant[url_field], constant[id]]]
variable[filter_value] assign[=] call[name[view_kwargs]][name[url_field]]
variable[query] assign[=] call[name[self].retrieve_object_query, parameter[name[view_kwargs], name[filter_field], name[filter_value]]]
if compare[name[qs] is_not constant[None]] begin[:]
variable[query] assign[=] call[name[self].eagerload_includes, parameter[name[query], name[qs]]]
<ast.Try object at 0x7da1b16322c0>
call[name[self].after_get_object, parameter[name[obj], name[view_kwargs]]]
return[name[obj]] | keyword[def] identifier[get_object] ( identifier[self] , identifier[view_kwargs] , identifier[qs] = keyword[None] ):
literal[string]
identifier[self] . identifier[before_get_object] ( identifier[view_kwargs] )
identifier[id_field] = identifier[getattr] ( identifier[self] , literal[string] , identifier[inspect] ( identifier[self] . identifier[model] ). identifier[primary_key] [ literal[int] ]. identifier[key] )
keyword[try] :
identifier[filter_field] = identifier[getattr] ( identifier[self] . identifier[model] , identifier[id_field] )
keyword[except] identifier[Exception] :
keyword[raise] identifier[Exception] ( literal[string] . identifier[format] ( identifier[self] . identifier[model] . identifier[__name__] , identifier[id_field] ))
identifier[url_field] = identifier[getattr] ( identifier[self] , literal[string] , literal[string] )
identifier[filter_value] = identifier[view_kwargs] [ identifier[url_field] ]
identifier[query] = identifier[self] . identifier[retrieve_object_query] ( identifier[view_kwargs] , identifier[filter_field] , identifier[filter_value] )
keyword[if] identifier[qs] keyword[is] keyword[not] keyword[None] :
identifier[query] = identifier[self] . identifier[eagerload_includes] ( identifier[query] , identifier[qs] )
keyword[try] :
identifier[obj] = identifier[query] . identifier[one] ()
keyword[except] identifier[NoResultFound] :
identifier[obj] = keyword[None]
identifier[self] . identifier[after_get_object] ( identifier[obj] , identifier[view_kwargs] )
keyword[return] identifier[obj] | def get_object(self, view_kwargs, qs=None):
"""Retrieve an object through sqlalchemy
:params dict view_kwargs: kwargs from the resource view
:return DeclarativeMeta: an object from sqlalchemy
"""
self.before_get_object(view_kwargs)
id_field = getattr(self, 'id_field', inspect(self.model).primary_key[0].key)
try:
filter_field = getattr(self.model, id_field) # depends on [control=['try'], data=[]]
except Exception:
raise Exception('{} has no attribute {}'.format(self.model.__name__, id_field)) # depends on [control=['except'], data=[]]
url_field = getattr(self, 'url_field', 'id')
filter_value = view_kwargs[url_field]
query = self.retrieve_object_query(view_kwargs, filter_field, filter_value)
if qs is not None:
query = self.eagerload_includes(query, qs) # depends on [control=['if'], data=['qs']]
try:
obj = query.one() # depends on [control=['try'], data=[]]
except NoResultFound:
obj = None # depends on [control=['except'], data=[]]
self.after_get_object(obj, view_kwargs)
return obj |
def crud_mutation_name(action, model):
"""
This function returns the name of a mutation that performs the specified
crud action on the given model service
"""
model_string = get_model_string(model)
# make sure the mutation name is correctly camelcases
model_string = model_string[0].upper() + model_string[1:]
# return the mutation name
return "{}{}".format(action, model_string) | def function[crud_mutation_name, parameter[action, model]]:
constant[
This function returns the name of a mutation that performs the specified
crud action on the given model service
]
variable[model_string] assign[=] call[name[get_model_string], parameter[name[model]]]
variable[model_string] assign[=] binary_operation[call[call[name[model_string]][constant[0]].upper, parameter[]] + call[name[model_string]][<ast.Slice object at 0x7da20e963610>]]
return[call[constant[{}{}].format, parameter[name[action], name[model_string]]]] | keyword[def] identifier[crud_mutation_name] ( identifier[action] , identifier[model] ):
literal[string]
identifier[model_string] = identifier[get_model_string] ( identifier[model] )
identifier[model_string] = identifier[model_string] [ literal[int] ]. identifier[upper] ()+ identifier[model_string] [ literal[int] :]
keyword[return] literal[string] . identifier[format] ( identifier[action] , identifier[model_string] ) | def crud_mutation_name(action, model):
"""
This function returns the name of a mutation that performs the specified
crud action on the given model service
"""
model_string = get_model_string(model)
# make sure the mutation name is correctly camelcases
model_string = model_string[0].upper() + model_string[1:]
# return the mutation name
return '{}{}'.format(action, model_string) |
def _faster_to_representation(self, instance):
"""Modified to_representation with optimizations.
1) Returns a plain old dict as opposed to OrderedDict.
(Constructing ordered dict is ~100x slower than `{}`.)
2) Ensure we use a cached list of fields
(this optimization exists in DRF 3.2 but not 3.1)
Arguments:
instance: a model instance or data object
Returns:
Dict of primitive datatypes.
"""
ret = {}
fields = self._readable_fields
is_fast = isinstance(instance, prefetch.FastObject)
id_fields = self._readable_id_fields
for field in fields:
attribute = None
# we exclude dynamic fields here because the proper fastquery
# dereferencing happens in the `get_attribute` method now
if (
is_fast and
not isinstance(
field,
(DynamicGenericRelationField, DynamicRelationField)
)
):
if field in id_fields and field.source not in instance:
# TODO - make better.
attribute = instance.get(field.source + '_id')
ret[field.field_name] = attribute
continue
else:
try:
attribute = instance[field.source]
except KeyError:
# slower, but does more stuff
# Also, some temp debugging
if hasattr(instance, field.source):
attribute = getattr(instance, field.source)
else:
# Fall back on DRF behavior
attribute = field.get_attribute(instance)
print(
'Missing %s from %s' % (
field.field_name,
self.__class__.__name__
)
)
else:
try:
attribute = field.get_attribute(instance)
except SkipField:
continue
if attribute is None:
# We skip `to_representation` for `None` values so that
# fields do not have to explicitly deal with that case.
ret[field.field_name] = None
else:
ret[field.field_name] = field.to_representation(attribute)
return ret | def function[_faster_to_representation, parameter[self, instance]]:
constant[Modified to_representation with optimizations.
1) Returns a plain old dict as opposed to OrderedDict.
(Constructing ordered dict is ~100x slower than `{}`.)
2) Ensure we use a cached list of fields
(this optimization exists in DRF 3.2 but not 3.1)
Arguments:
instance: a model instance or data object
Returns:
Dict of primitive datatypes.
]
variable[ret] assign[=] dictionary[[], []]
variable[fields] assign[=] name[self]._readable_fields
variable[is_fast] assign[=] call[name[isinstance], parameter[name[instance], name[prefetch].FastObject]]
variable[id_fields] assign[=] name[self]._readable_id_fields
for taget[name[field]] in starred[name[fields]] begin[:]
variable[attribute] assign[=] constant[None]
if <ast.BoolOp object at 0x7da20e962ce0> begin[:]
if <ast.BoolOp object at 0x7da20e9631c0> begin[:]
variable[attribute] assign[=] call[name[instance].get, parameter[binary_operation[name[field].source + constant[_id]]]]
call[name[ret]][name[field].field_name] assign[=] name[attribute]
continue
if compare[name[attribute] is constant[None]] begin[:]
call[name[ret]][name[field].field_name] assign[=] constant[None]
return[name[ret]] | keyword[def] identifier[_faster_to_representation] ( identifier[self] , identifier[instance] ):
literal[string]
identifier[ret] ={}
identifier[fields] = identifier[self] . identifier[_readable_fields]
identifier[is_fast] = identifier[isinstance] ( identifier[instance] , identifier[prefetch] . identifier[FastObject] )
identifier[id_fields] = identifier[self] . identifier[_readable_id_fields]
keyword[for] identifier[field] keyword[in] identifier[fields] :
identifier[attribute] = keyword[None]
keyword[if] (
identifier[is_fast] keyword[and]
keyword[not] identifier[isinstance] (
identifier[field] ,
( identifier[DynamicGenericRelationField] , identifier[DynamicRelationField] )
)
):
keyword[if] identifier[field] keyword[in] identifier[id_fields] keyword[and] identifier[field] . identifier[source] keyword[not] keyword[in] identifier[instance] :
identifier[attribute] = identifier[instance] . identifier[get] ( identifier[field] . identifier[source] + literal[string] )
identifier[ret] [ identifier[field] . identifier[field_name] ]= identifier[attribute]
keyword[continue]
keyword[else] :
keyword[try] :
identifier[attribute] = identifier[instance] [ identifier[field] . identifier[source] ]
keyword[except] identifier[KeyError] :
keyword[if] identifier[hasattr] ( identifier[instance] , identifier[field] . identifier[source] ):
identifier[attribute] = identifier[getattr] ( identifier[instance] , identifier[field] . identifier[source] )
keyword[else] :
identifier[attribute] = identifier[field] . identifier[get_attribute] ( identifier[instance] )
identifier[print] (
literal[string] %(
identifier[field] . identifier[field_name] ,
identifier[self] . identifier[__class__] . identifier[__name__]
)
)
keyword[else] :
keyword[try] :
identifier[attribute] = identifier[field] . identifier[get_attribute] ( identifier[instance] )
keyword[except] identifier[SkipField] :
keyword[continue]
keyword[if] identifier[attribute] keyword[is] keyword[None] :
identifier[ret] [ identifier[field] . identifier[field_name] ]= keyword[None]
keyword[else] :
identifier[ret] [ identifier[field] . identifier[field_name] ]= identifier[field] . identifier[to_representation] ( identifier[attribute] )
keyword[return] identifier[ret] | def _faster_to_representation(self, instance):
"""Modified to_representation with optimizations.
1) Returns a plain old dict as opposed to OrderedDict.
(Constructing ordered dict is ~100x slower than `{}`.)
2) Ensure we use a cached list of fields
(this optimization exists in DRF 3.2 but not 3.1)
Arguments:
instance: a model instance or data object
Returns:
Dict of primitive datatypes.
"""
ret = {}
fields = self._readable_fields
is_fast = isinstance(instance, prefetch.FastObject)
id_fields = self._readable_id_fields
for field in fields:
attribute = None
# we exclude dynamic fields here because the proper fastquery
# dereferencing happens in the `get_attribute` method now
if is_fast and (not isinstance(field, (DynamicGenericRelationField, DynamicRelationField))):
if field in id_fields and field.source not in instance:
# TODO - make better.
attribute = instance.get(field.source + '_id')
ret[field.field_name] = attribute
continue # depends on [control=['if'], data=[]]
else:
try:
attribute = instance[field.source] # depends on [control=['try'], data=[]]
except KeyError:
# slower, but does more stuff
# Also, some temp debugging
if hasattr(instance, field.source):
attribute = getattr(instance, field.source) # depends on [control=['if'], data=[]]
else:
# Fall back on DRF behavior
attribute = field.get_attribute(instance)
print('Missing %s from %s' % (field.field_name, self.__class__.__name__)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
try:
attribute = field.get_attribute(instance) # depends on [control=['try'], data=[]]
except SkipField:
continue # depends on [control=['except'], data=[]]
if attribute is None:
# We skip `to_representation` for `None` values so that
# fields do not have to explicitly deal with that case.
ret[field.field_name] = None # depends on [control=['if'], data=[]]
else:
ret[field.field_name] = field.to_representation(attribute) # depends on [control=['for'], data=['field']]
return ret |
def make_oracle(input_qubits,
output_qubit,
secret_factor_bits,
secret_bias_bit):
"""Gates implementing the function f(a) = a·factors + bias (mod 2)."""
if secret_bias_bit:
yield cirq.X(output_qubit)
for qubit, bit in zip(input_qubits, secret_factor_bits):
if bit:
yield cirq.CNOT(qubit, output_qubit) | def function[make_oracle, parameter[input_qubits, output_qubit, secret_factor_bits, secret_bias_bit]]:
constant[Gates implementing the function f(a) = a·factors + bias (mod 2).]
if name[secret_bias_bit] begin[:]
<ast.Yield object at 0x7da1b1cc1ed0>
for taget[tuple[[<ast.Name object at 0x7da1b1cc3790>, <ast.Name object at 0x7da1b1cc0910>]]] in starred[call[name[zip], parameter[name[input_qubits], name[secret_factor_bits]]]] begin[:]
if name[bit] begin[:]
<ast.Yield object at 0x7da1b1cc2890> | keyword[def] identifier[make_oracle] ( identifier[input_qubits] ,
identifier[output_qubit] ,
identifier[secret_factor_bits] ,
identifier[secret_bias_bit] ):
literal[string]
keyword[if] identifier[secret_bias_bit] :
keyword[yield] identifier[cirq] . identifier[X] ( identifier[output_qubit] )
keyword[for] identifier[qubit] , identifier[bit] keyword[in] identifier[zip] ( identifier[input_qubits] , identifier[secret_factor_bits] ):
keyword[if] identifier[bit] :
keyword[yield] identifier[cirq] . identifier[CNOT] ( identifier[qubit] , identifier[output_qubit] ) | def make_oracle(input_qubits, output_qubit, secret_factor_bits, secret_bias_bit):
"""Gates implementing the function f(a) = a·factors + bias (mod 2)."""
if secret_bias_bit:
yield cirq.X(output_qubit) # depends on [control=['if'], data=[]]
for (qubit, bit) in zip(input_qubits, secret_factor_bits):
if bit:
yield cirq.CNOT(qubit, output_qubit) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def shared_memory(attrs=None, where=None):
'''
Return shared_memory information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.shared_memory
'''
if __grains__['os_family'] in ['RedHat', 'Debian']:
return _osquery_cmd(table='shared_memory', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on Red Hat or Debian based systems.'} | def function[shared_memory, parameter[attrs, where]]:
constant[
Return shared_memory information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.shared_memory
]
if compare[call[name[__grains__]][constant[os_family]] in list[[<ast.Constant object at 0x7da1b20083d0>, <ast.Constant object at 0x7da1b20089d0>]]] begin[:]
return[call[name[_osquery_cmd], parameter[]]]
return[dictionary[[<ast.Constant object at 0x7da1b2008c70>, <ast.Constant object at 0x7da20c7c8370>], [<ast.Constant object at 0x7da1b1f39930>, <ast.Constant object at 0x7da1b1f3a890>]]] | keyword[def] identifier[shared_memory] ( identifier[attrs] = keyword[None] , identifier[where] = keyword[None] ):
literal[string]
keyword[if] identifier[__grains__] [ literal[string] ] keyword[in] [ literal[string] , literal[string] ]:
keyword[return] identifier[_osquery_cmd] ( identifier[table] = literal[string] , identifier[attrs] = identifier[attrs] , identifier[where] = identifier[where] )
keyword[return] { literal[string] : keyword[False] , literal[string] : literal[string] } | def shared_memory(attrs=None, where=None):
"""
Return shared_memory information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.shared_memory
"""
if __grains__['os_family'] in ['RedHat', 'Debian']:
return _osquery_cmd(table='shared_memory', attrs=attrs, where=where) # depends on [control=['if'], data=[]]
return {'result': False, 'comment': 'Only available on Red Hat or Debian based systems.'} |
def ui_open(*files):
"""Attempts to open the given files using the preferred desktop viewer or editor.
:raises :class:`OpenError`: if there is a problem opening any of the files.
"""
if files:
osname = get_os_name()
opener = _OPENER_BY_OS.get(osname)
if opener:
opener(files)
else:
raise OpenError('Open currently not supported for ' + osname) | def function[ui_open, parameter[]]:
constant[Attempts to open the given files using the preferred desktop viewer or editor.
:raises :class:`OpenError`: if there is a problem opening any of the files.
]
if name[files] begin[:]
variable[osname] assign[=] call[name[get_os_name], parameter[]]
variable[opener] assign[=] call[name[_OPENER_BY_OS].get, parameter[name[osname]]]
if name[opener] begin[:]
call[name[opener], parameter[name[files]]] | keyword[def] identifier[ui_open] (* identifier[files] ):
literal[string]
keyword[if] identifier[files] :
identifier[osname] = identifier[get_os_name] ()
identifier[opener] = identifier[_OPENER_BY_OS] . identifier[get] ( identifier[osname] )
keyword[if] identifier[opener] :
identifier[opener] ( identifier[files] )
keyword[else] :
keyword[raise] identifier[OpenError] ( literal[string] + identifier[osname] ) | def ui_open(*files):
"""Attempts to open the given files using the preferred desktop viewer or editor.
:raises :class:`OpenError`: if there is a problem opening any of the files.
"""
if files:
osname = get_os_name()
opener = _OPENER_BY_OS.get(osname)
if opener:
opener(files) # depends on [control=['if'], data=[]]
else:
raise OpenError('Open currently not supported for ' + osname) # depends on [control=['if'], data=[]] |
def beta_code(self, text):
"""Replace method. Note: regex.subn() returns a tuple (new_string,
number_of_subs_made).
"""
text = text.upper().replace('-', '')
for (pattern, repl) in self.pattern1:
text = pattern.subn(repl, text)[0]
for (pattern, repl) in self.pattern2:
text = pattern.subn(repl, text)[0]
# remove third run, if punct list not used
for (pattern, repl) in self.pattern3:
text = pattern.subn(repl, text)[0]
return text | def function[beta_code, parameter[self, text]]:
constant[Replace method. Note: regex.subn() returns a tuple (new_string,
number_of_subs_made).
]
variable[text] assign[=] call[call[name[text].upper, parameter[]].replace, parameter[constant[-], constant[]]]
for taget[tuple[[<ast.Name object at 0x7da2044c3a60>, <ast.Name object at 0x7da2044c19c0>]]] in starred[name[self].pattern1] begin[:]
variable[text] assign[=] call[call[name[pattern].subn, parameter[name[repl], name[text]]]][constant[0]]
for taget[tuple[[<ast.Name object at 0x7da2044c2c20>, <ast.Name object at 0x7da2044c1780>]]] in starred[name[self].pattern2] begin[:]
variable[text] assign[=] call[call[name[pattern].subn, parameter[name[repl], name[text]]]][constant[0]]
for taget[tuple[[<ast.Name object at 0x7da2044c2350>, <ast.Name object at 0x7da2044c16c0>]]] in starred[name[self].pattern3] begin[:]
variable[text] assign[=] call[call[name[pattern].subn, parameter[name[repl], name[text]]]][constant[0]]
return[name[text]] | keyword[def] identifier[beta_code] ( identifier[self] , identifier[text] ):
literal[string]
identifier[text] = identifier[text] . identifier[upper] (). identifier[replace] ( literal[string] , literal[string] )
keyword[for] ( identifier[pattern] , identifier[repl] ) keyword[in] identifier[self] . identifier[pattern1] :
identifier[text] = identifier[pattern] . identifier[subn] ( identifier[repl] , identifier[text] )[ literal[int] ]
keyword[for] ( identifier[pattern] , identifier[repl] ) keyword[in] identifier[self] . identifier[pattern2] :
identifier[text] = identifier[pattern] . identifier[subn] ( identifier[repl] , identifier[text] )[ literal[int] ]
keyword[for] ( identifier[pattern] , identifier[repl] ) keyword[in] identifier[self] . identifier[pattern3] :
identifier[text] = identifier[pattern] . identifier[subn] ( identifier[repl] , identifier[text] )[ literal[int] ]
keyword[return] identifier[text] | def beta_code(self, text):
"""Replace method. Note: regex.subn() returns a tuple (new_string,
number_of_subs_made).
"""
text = text.upper().replace('-', '')
for (pattern, repl) in self.pattern1:
text = pattern.subn(repl, text)[0] # depends on [control=['for'], data=[]]
for (pattern, repl) in self.pattern2:
text = pattern.subn(repl, text)[0] # depends on [control=['for'], data=[]]
# remove third run, if punct list not used
for (pattern, repl) in self.pattern3:
text = pattern.subn(repl, text)[0] # depends on [control=['for'], data=[]]
return text |
def use_all_categories(self):
'''
Returns
-------
PriorFactory
'''
term_df = self.term_ranker.get_ranks()
self.priors += term_df.sum(axis=1).fillna(0.)
return self | def function[use_all_categories, parameter[self]]:
constant[
Returns
-------
PriorFactory
]
variable[term_df] assign[=] call[name[self].term_ranker.get_ranks, parameter[]]
<ast.AugAssign object at 0x7da1b1b84fd0>
return[name[self]] | keyword[def] identifier[use_all_categories] ( identifier[self] ):
literal[string]
identifier[term_df] = identifier[self] . identifier[term_ranker] . identifier[get_ranks] ()
identifier[self] . identifier[priors] += identifier[term_df] . identifier[sum] ( identifier[axis] = literal[int] ). identifier[fillna] ( literal[int] )
keyword[return] identifier[self] | def use_all_categories(self):
"""
Returns
-------
PriorFactory
"""
term_df = self.term_ranker.get_ranks()
self.priors += term_df.sum(axis=1).fillna(0.0)
return self |
def pre_save(sender, model):
""" Hash the password if being changed """
if isinstance(model, Model) and 'password' in model.dirty_fields:
model.salt, model.password = gen_salt_and_hash(model.password) | def function[pre_save, parameter[sender, model]]:
constant[ Hash the password if being changed ]
if <ast.BoolOp object at 0x7da2054a5840> begin[:]
<ast.Tuple object at 0x7da2054a7340> assign[=] call[name[gen_salt_and_hash], parameter[name[model].password]] | keyword[def] identifier[pre_save] ( identifier[sender] , identifier[model] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[model] , identifier[Model] ) keyword[and] literal[string] keyword[in] identifier[model] . identifier[dirty_fields] :
identifier[model] . identifier[salt] , identifier[model] . identifier[password] = identifier[gen_salt_and_hash] ( identifier[model] . identifier[password] ) | def pre_save(sender, model):
""" Hash the password if being changed """
if isinstance(model, Model) and 'password' in model.dirty_fields:
(model.salt, model.password) = gen_salt_and_hash(model.password) # depends on [control=['if'], data=[]] |
def fold_text_ctx(self, line):
"""
Folds text, via :class:`DomTerm`, if available.
Notes that this temporarily overwrites self.lines.
:param str line: always visible
"""
if not self.dom_term:
self.__call__(line)
yield
return
self.lines, old_lines = [], self.lines # overwrite self.lines
yield # collect output (in new self.lines)
self.lines, new_lines = old_lines, self.lines # recover self.lines
hidden_text = "".join(new_lines)
import io
output_buf = io.StringIO()
prefix = ""
while line[:1] == " ":
prefix += " "
line = line[1:]
self.dom_term.fold_text(line, hidden=hidden_text, file=output_buf, align=len(prefix))
output_text = prefix[1:] + output_buf.getvalue()
self.lines.append(output_text) | def function[fold_text_ctx, parameter[self, line]]:
constant[
Folds text, via :class:`DomTerm`, if available.
Notes that this temporarily overwrites self.lines.
:param str line: always visible
]
if <ast.UnaryOp object at 0x7da1b24fe830> begin[:]
call[name[self].__call__, parameter[name[line]]]
<ast.Yield object at 0x7da1b24febf0>
return[None]
<ast.Tuple object at 0x7da1b24fdcf0> assign[=] tuple[[<ast.List object at 0x7da1b24fc160>, <ast.Attribute object at 0x7da1b24fee30>]]
<ast.Yield object at 0x7da1b24fd690>
<ast.Tuple object at 0x7da1b24fe0e0> assign[=] tuple[[<ast.Name object at 0x7da1b24fdd50>, <ast.Attribute object at 0x7da1b24fefb0>]]
variable[hidden_text] assign[=] call[constant[].join, parameter[name[new_lines]]]
import module[io]
variable[output_buf] assign[=] call[name[io].StringIO, parameter[]]
variable[prefix] assign[=] constant[]
while compare[call[name[line]][<ast.Slice object at 0x7da1b2445450>] equal[==] constant[ ]] begin[:]
<ast.AugAssign object at 0x7da1b2444310>
variable[line] assign[=] call[name[line]][<ast.Slice object at 0x7da1b2445f60>]
call[name[self].dom_term.fold_text, parameter[name[line]]]
variable[output_text] assign[=] binary_operation[call[name[prefix]][<ast.Slice object at 0x7da1b2444730>] + call[name[output_buf].getvalue, parameter[]]]
call[name[self].lines.append, parameter[name[output_text]]] | keyword[def] identifier[fold_text_ctx] ( identifier[self] , identifier[line] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[dom_term] :
identifier[self] . identifier[__call__] ( identifier[line] )
keyword[yield]
keyword[return]
identifier[self] . identifier[lines] , identifier[old_lines] =[], identifier[self] . identifier[lines]
keyword[yield]
identifier[self] . identifier[lines] , identifier[new_lines] = identifier[old_lines] , identifier[self] . identifier[lines]
identifier[hidden_text] = literal[string] . identifier[join] ( identifier[new_lines] )
keyword[import] identifier[io]
identifier[output_buf] = identifier[io] . identifier[StringIO] ()
identifier[prefix] = literal[string]
keyword[while] identifier[line] [: literal[int] ]== literal[string] :
identifier[prefix] += literal[string]
identifier[line] = identifier[line] [ literal[int] :]
identifier[self] . identifier[dom_term] . identifier[fold_text] ( identifier[line] , identifier[hidden] = identifier[hidden_text] , identifier[file] = identifier[output_buf] , identifier[align] = identifier[len] ( identifier[prefix] ))
identifier[output_text] = identifier[prefix] [ literal[int] :]+ identifier[output_buf] . identifier[getvalue] ()
identifier[self] . identifier[lines] . identifier[append] ( identifier[output_text] ) | def fold_text_ctx(self, line):
"""
Folds text, via :class:`DomTerm`, if available.
Notes that this temporarily overwrites self.lines.
:param str line: always visible
"""
if not self.dom_term:
self.__call__(line)
yield
return # depends on [control=['if'], data=[]]
(self.lines, old_lines) = ([], self.lines) # overwrite self.lines
yield # collect output (in new self.lines)
(self.lines, new_lines) = (old_lines, self.lines) # recover self.lines
hidden_text = ''.join(new_lines)
import io
output_buf = io.StringIO()
prefix = ''
while line[:1] == ' ':
prefix += ' '
line = line[1:] # depends on [control=['while'], data=[]]
self.dom_term.fold_text(line, hidden=hidden_text, file=output_buf, align=len(prefix))
output_text = prefix[1:] + output_buf.getvalue()
self.lines.append(output_text) |
def pin(value):
'''A small pin that represents the result of the build process'''
if value is False:
return draw_pin('Build Failed', 'red')
elif value is True:
return draw_pin('Build Passed')
elif value is NOT_FOUND:
return draw_pin('Build N / A', 'lightGray', 'black')
return draw_pin('In progress ...', 'lightGray', 'black') | def function[pin, parameter[value]]:
constant[A small pin that represents the result of the build process]
if compare[name[value] is constant[False]] begin[:]
return[call[name[draw_pin], parameter[constant[Build Failed], constant[red]]]]
return[call[name[draw_pin], parameter[constant[In progress ...], constant[lightGray], constant[black]]]] | keyword[def] identifier[pin] ( identifier[value] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[False] :
keyword[return] identifier[draw_pin] ( literal[string] , literal[string] )
keyword[elif] identifier[value] keyword[is] keyword[True] :
keyword[return] identifier[draw_pin] ( literal[string] )
keyword[elif] identifier[value] keyword[is] identifier[NOT_FOUND] :
keyword[return] identifier[draw_pin] ( literal[string] , literal[string] , literal[string] )
keyword[return] identifier[draw_pin] ( literal[string] , literal[string] , literal[string] ) | def pin(value):
"""A small pin that represents the result of the build process"""
if value is False:
return draw_pin('Build Failed', 'red') # depends on [control=['if'], data=[]]
elif value is True:
return draw_pin('Build Passed') # depends on [control=['if'], data=[]]
elif value is NOT_FOUND:
return draw_pin('Build N / A', 'lightGray', 'black') # depends on [control=['if'], data=[]]
return draw_pin('In progress ...', 'lightGray', 'black') |
def set_hyperparams(self, new_params):
"""Set the (free) hyperparameters.
Parameters
----------
new_params : :py:class:`Array` or other Array-like
New values of the free parameters.
Raises
------
ValueError
If the length of `new_params` is not consistent with :py:attr:`self.params`.
"""
new_params = scipy.asarray(new_params, dtype=float)
if len(new_params) == len(self.free_params):
num_free_k = sum(~self.k.fixed_params)
self.k.set_hyperparams(new_params[:num_free_k])
self.w.set_hyperparams(new_params[num_free_k:])
else:
raise ValueError("Length of new_params must be %s!" % (len(self.free_params),)) | def function[set_hyperparams, parameter[self, new_params]]:
constant[Set the (free) hyperparameters.
Parameters
----------
new_params : :py:class:`Array` or other Array-like
New values of the free parameters.
Raises
------
ValueError
If the length of `new_params` is not consistent with :py:attr:`self.params`.
]
variable[new_params] assign[=] call[name[scipy].asarray, parameter[name[new_params]]]
if compare[call[name[len], parameter[name[new_params]]] equal[==] call[name[len], parameter[name[self].free_params]]] begin[:]
variable[num_free_k] assign[=] call[name[sum], parameter[<ast.UnaryOp object at 0x7da20c992fe0>]]
call[name[self].k.set_hyperparams, parameter[call[name[new_params]][<ast.Slice object at 0x7da20c993820>]]]
call[name[self].w.set_hyperparams, parameter[call[name[new_params]][<ast.Slice object at 0x7da20c990f40>]]] | keyword[def] identifier[set_hyperparams] ( identifier[self] , identifier[new_params] ):
literal[string]
identifier[new_params] = identifier[scipy] . identifier[asarray] ( identifier[new_params] , identifier[dtype] = identifier[float] )
keyword[if] identifier[len] ( identifier[new_params] )== identifier[len] ( identifier[self] . identifier[free_params] ):
identifier[num_free_k] = identifier[sum] (~ identifier[self] . identifier[k] . identifier[fixed_params] )
identifier[self] . identifier[k] . identifier[set_hyperparams] ( identifier[new_params] [: identifier[num_free_k] ])
identifier[self] . identifier[w] . identifier[set_hyperparams] ( identifier[new_params] [ identifier[num_free_k] :])
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[len] ( identifier[self] . identifier[free_params] ),)) | def set_hyperparams(self, new_params):
"""Set the (free) hyperparameters.
Parameters
----------
new_params : :py:class:`Array` or other Array-like
New values of the free parameters.
Raises
------
ValueError
If the length of `new_params` is not consistent with :py:attr:`self.params`.
"""
new_params = scipy.asarray(new_params, dtype=float)
if len(new_params) == len(self.free_params):
num_free_k = sum(~self.k.fixed_params)
self.k.set_hyperparams(new_params[:num_free_k])
self.w.set_hyperparams(new_params[num_free_k:]) # depends on [control=['if'], data=[]]
else:
raise ValueError('Length of new_params must be %s!' % (len(self.free_params),)) |
def _get_admin_app_list_url(self, model, context):
"""
Returns the admin change url.
"""
app_label = model._meta.app_label
return reverse('%s:app_list' % get_admin_site_name(context),
args=(app_label,)) | def function[_get_admin_app_list_url, parameter[self, model, context]]:
constant[
Returns the admin change url.
]
variable[app_label] assign[=] name[model]._meta.app_label
return[call[name[reverse], parameter[binary_operation[constant[%s:app_list] <ast.Mod object at 0x7da2590d6920> call[name[get_admin_site_name], parameter[name[context]]]]]]] | keyword[def] identifier[_get_admin_app_list_url] ( identifier[self] , identifier[model] , identifier[context] ):
literal[string]
identifier[app_label] = identifier[model] . identifier[_meta] . identifier[app_label]
keyword[return] identifier[reverse] ( literal[string] % identifier[get_admin_site_name] ( identifier[context] ),
identifier[args] =( identifier[app_label] ,)) | def _get_admin_app_list_url(self, model, context):
"""
Returns the admin change url.
"""
app_label = model._meta.app_label
return reverse('%s:app_list' % get_admin_site_name(context), args=(app_label,)) |
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper | def function[add_metaclass, parameter[metaclass]]:
constant[Class decorator for creating a class with a metaclass.]
def function[wrapper, parameter[cls]]:
variable[orig_vars] assign[=] call[name[cls].__dict__.copy, parameter[]]
call[name[orig_vars].pop, parameter[constant[__dict__], constant[None]]]
call[name[orig_vars].pop, parameter[constant[__weakref__], constant[None]]]
return[call[name[metaclass], parameter[name[cls].__name__, name[cls].__bases__, name[orig_vars]]]]
return[name[wrapper]] | keyword[def] identifier[add_metaclass] ( identifier[metaclass] ):
literal[string]
keyword[def] identifier[wrapper] ( identifier[cls] ):
identifier[orig_vars] = identifier[cls] . identifier[__dict__] . identifier[copy] ()
identifier[orig_vars] . identifier[pop] ( literal[string] , keyword[None] )
identifier[orig_vars] . identifier[pop] ( literal[string] , keyword[None] )
keyword[return] identifier[metaclass] ( identifier[cls] . identifier[__name__] , identifier[cls] . identifier[__bases__] , identifier[orig_vars] )
keyword[return] identifier[wrapper] | def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper |
def write_table(self):
"""
|write_table|.
.. note::
- |None| values are written as an empty string.
"""
super(TextTableWriter, self).write_table()
if self.is_write_null_line_after_table:
self.write_null_line() | def function[write_table, parameter[self]]:
constant[
|write_table|.
.. note::
- |None| values are written as an empty string.
]
call[call[name[super], parameter[name[TextTableWriter], name[self]]].write_table, parameter[]]
if name[self].is_write_null_line_after_table begin[:]
call[name[self].write_null_line, parameter[]] | keyword[def] identifier[write_table] ( identifier[self] ):
literal[string]
identifier[super] ( identifier[TextTableWriter] , identifier[self] ). identifier[write_table] ()
keyword[if] identifier[self] . identifier[is_write_null_line_after_table] :
identifier[self] . identifier[write_null_line] () | def write_table(self):
"""
|write_table|.
.. note::
- |None| values are written as an empty string.
"""
super(TextTableWriter, self).write_table()
if self.is_write_null_line_after_table:
self.write_null_line() # depends on [control=['if'], data=[]] |
def _filter_values(vals, vlist=None, must=False):
""" Removes values from *vals* that does not appear in vlist
:param vals: The values that are to be filtered
:param vlist: required or optional value
:param must: Whether the allowed values must appear
:return: The set of values after filtering
"""
if not vlist: # No value specified equals any value
return vals
if isinstance(vlist, six.string_types):
vlist = [vlist]
res = []
for val in vlist:
if val in vals:
res.append(val)
if must:
if res:
return res
else:
raise MissingValue("Required attribute value missing")
else:
return res | def function[_filter_values, parameter[vals, vlist, must]]:
constant[ Removes values from *vals* that does not appear in vlist
:param vals: The values that are to be filtered
:param vlist: required or optional value
:param must: Whether the allowed values must appear
:return: The set of values after filtering
]
if <ast.UnaryOp object at 0x7da1b1d55390> begin[:]
return[name[vals]]
if call[name[isinstance], parameter[name[vlist], name[six].string_types]] begin[:]
variable[vlist] assign[=] list[[<ast.Name object at 0x7da1b1d56230>]]
variable[res] assign[=] list[[]]
for taget[name[val]] in starred[name[vlist]] begin[:]
if compare[name[val] in name[vals]] begin[:]
call[name[res].append, parameter[name[val]]]
if name[must] begin[:]
if name[res] begin[:]
return[name[res]] | keyword[def] identifier[_filter_values] ( identifier[vals] , identifier[vlist] = keyword[None] , identifier[must] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[vlist] :
keyword[return] identifier[vals]
keyword[if] identifier[isinstance] ( identifier[vlist] , identifier[six] . identifier[string_types] ):
identifier[vlist] =[ identifier[vlist] ]
identifier[res] =[]
keyword[for] identifier[val] keyword[in] identifier[vlist] :
keyword[if] identifier[val] keyword[in] identifier[vals] :
identifier[res] . identifier[append] ( identifier[val] )
keyword[if] identifier[must] :
keyword[if] identifier[res] :
keyword[return] identifier[res]
keyword[else] :
keyword[raise] identifier[MissingValue] ( literal[string] )
keyword[else] :
keyword[return] identifier[res] | def _filter_values(vals, vlist=None, must=False):
""" Removes values from *vals* that does not appear in vlist
:param vals: The values that are to be filtered
:param vlist: required or optional value
:param must: Whether the allowed values must appear
:return: The set of values after filtering
"""
if not vlist: # No value specified equals any value
return vals # depends on [control=['if'], data=[]]
if isinstance(vlist, six.string_types):
vlist = [vlist] # depends on [control=['if'], data=[]]
res = []
for val in vlist:
if val in vals:
res.append(val) # depends on [control=['if'], data=['val']] # depends on [control=['for'], data=['val']]
if must:
if res:
return res # depends on [control=['if'], data=[]]
else:
raise MissingValue('Required attribute value missing') # depends on [control=['if'], data=[]]
else:
return res |
def add_formats_by_name(self, rfmt_list):
"""
adds formats by short label descriptors, such as 'txt', 'json', or
'html'
"""
for fmt in rfmt_list:
if fmt == "json":
self.add_report_format(JSONReportFormat)
elif fmt in ("txt", "text"):
self.add_report_format(TextReportFormat)
elif fmt in ("htm", "html"):
self.add_report_format(CheetahReportFormat) | def function[add_formats_by_name, parameter[self, rfmt_list]]:
constant[
adds formats by short label descriptors, such as 'txt', 'json', or
'html'
]
for taget[name[fmt]] in starred[name[rfmt_list]] begin[:]
if compare[name[fmt] equal[==] constant[json]] begin[:]
call[name[self].add_report_format, parameter[name[JSONReportFormat]]] | keyword[def] identifier[add_formats_by_name] ( identifier[self] , identifier[rfmt_list] ):
literal[string]
keyword[for] identifier[fmt] keyword[in] identifier[rfmt_list] :
keyword[if] identifier[fmt] == literal[string] :
identifier[self] . identifier[add_report_format] ( identifier[JSONReportFormat] )
keyword[elif] identifier[fmt] keyword[in] ( literal[string] , literal[string] ):
identifier[self] . identifier[add_report_format] ( identifier[TextReportFormat] )
keyword[elif] identifier[fmt] keyword[in] ( literal[string] , literal[string] ):
identifier[self] . identifier[add_report_format] ( identifier[CheetahReportFormat] ) | def add_formats_by_name(self, rfmt_list):
"""
adds formats by short label descriptors, such as 'txt', 'json', or
'html'
"""
for fmt in rfmt_list:
if fmt == 'json':
self.add_report_format(JSONReportFormat) # depends on [control=['if'], data=[]]
elif fmt in ('txt', 'text'):
self.add_report_format(TextReportFormat) # depends on [control=['if'], data=[]]
elif fmt in ('htm', 'html'):
self.add_report_format(CheetahReportFormat) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fmt']] |
def remove(self, key, value):
"""
Transactional implementation of :func:`MultiMap.remove(key, value)
<hazelcast.proxy.multi_map.MultiMap.remove>`
:param key: (object), the key of the entry to remove.
:param value: (object), the value of the entry to remove.
:return:
"""
check_not_none(key, "key can't be none")
check_not_none(value, "value can't be none")
return self._encode_invoke(transactional_multi_map_remove_entry_codec, key=self._to_data(key),
value=self._to_data(value)) | def function[remove, parameter[self, key, value]]:
constant[
Transactional implementation of :func:`MultiMap.remove(key, value)
<hazelcast.proxy.multi_map.MultiMap.remove>`
:param key: (object), the key of the entry to remove.
:param value: (object), the value of the entry to remove.
:return:
]
call[name[check_not_none], parameter[name[key], constant[key can't be none]]]
call[name[check_not_none], parameter[name[value], constant[value can't be none]]]
return[call[name[self]._encode_invoke, parameter[name[transactional_multi_map_remove_entry_codec]]]] | keyword[def] identifier[remove] ( identifier[self] , identifier[key] , identifier[value] ):
literal[string]
identifier[check_not_none] ( identifier[key] , literal[string] )
identifier[check_not_none] ( identifier[value] , literal[string] )
keyword[return] identifier[self] . identifier[_encode_invoke] ( identifier[transactional_multi_map_remove_entry_codec] , identifier[key] = identifier[self] . identifier[_to_data] ( identifier[key] ),
identifier[value] = identifier[self] . identifier[_to_data] ( identifier[value] )) | def remove(self, key, value):
"""
Transactional implementation of :func:`MultiMap.remove(key, value)
<hazelcast.proxy.multi_map.MultiMap.remove>`
:param key: (object), the key of the entry to remove.
:param value: (object), the value of the entry to remove.
:return:
"""
check_not_none(key, "key can't be none")
check_not_none(value, "value can't be none")
return self._encode_invoke(transactional_multi_map_remove_entry_codec, key=self._to_data(key), value=self._to_data(value)) |
def transmit(self, what=None, to_whom=None):
"""Transmit one or more infos from one node to another.
"what" dictates which infos are sent, it can be:
(1) None (in which case the node's _what method is called).
(2) an Info (in which case the node transmits the info)
(3) a subclass of Info (in which case the node transmits all
its infos of that type)
(4) a list of any combination of the above
"to_whom" dictates which node(s) the infos are sent to, it can be:
(1) None (in which case the node's _to_whom method is called)
(2) a Node (in which case the node transmits to that node)
(3) a subclass of Node (in which case the node transmits to all
nodes of that type it is connected to)
(4) a list of any combination of the above
Will additionally raise an error if:
(1) _what() or _to_whom() returns None or a list containing None.
(2) what is/contains an info that does not originate from the
transmitting node
(3) to_whom is/contains a node that the transmitting node does not
have a not-failed connection with.
"""
# make the list of what
what = self.flatten([what])
for i in range(len(what)):
if what[i] is None:
what[i] = self._what()
elif inspect.isclass(what[i]) and issubclass(what[i], Info):
what[i] = self.infos(type=what[i])
what = self.flatten(what)
for i in range(len(what)):
if inspect.isclass(what[i]) and issubclass(what[i], Info):
what[i] = self.infos(type=what[i])
what = list(set(self.flatten(what)))
# make the list of to_whom
to_whom = self.flatten([to_whom])
for i in range(len(to_whom)):
if to_whom[i] is None:
to_whom[i] = self._to_whom()
elif inspect.isclass(to_whom[i]) and issubclass(to_whom[i], Node):
to_whom[i] = self.neighbors(direction="to", type=to_whom[i])
to_whom = self.flatten(to_whom)
for i in range(len(to_whom)):
if inspect.isclass(to_whom[i]) and issubclass(to_whom[i], Node):
to_whom[i] = self.neighbors(direction="to", type=to_whom[i])
to_whom = list(set(self.flatten(to_whom)))
transmissions = []
vectors = self.vectors(direction="outgoing")
for w in what:
for tw in to_whom:
try:
vector = [v for v in vectors
if v.destination_id == tw.id][0]
except:
raise ValueError(
"{} cannot transmit to {} as it does not have "
"a connection to them".format(self, tw))
t = Transmission(info=w, vector=vector)
transmissions.append(t)
if len(transmissions) == 1:
return transmissions[0]
else:
return transmissions | def function[transmit, parameter[self, what, to_whom]]:
constant[Transmit one or more infos from one node to another.
"what" dictates which infos are sent, it can be:
(1) None (in which case the node's _what method is called).
(2) an Info (in which case the node transmits the info)
(3) a subclass of Info (in which case the node transmits all
its infos of that type)
(4) a list of any combination of the above
"to_whom" dictates which node(s) the infos are sent to, it can be:
(1) None (in which case the node's _to_whom method is called)
(2) a Node (in which case the node transmits to that node)
(3) a subclass of Node (in which case the node transmits to all
nodes of that type it is connected to)
(4) a list of any combination of the above
Will additionally raise an error if:
(1) _what() or _to_whom() returns None or a list containing None.
(2) what is/contains an info that does not originate from the
transmitting node
(3) to_whom is/contains a node that the transmitting node does not
have a not-failed connection with.
]
variable[what] assign[=] call[name[self].flatten, parameter[list[[<ast.Name object at 0x7da1b24c9870>]]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[what]]]]]] begin[:]
if compare[call[name[what]][name[i]] is constant[None]] begin[:]
call[name[what]][name[i]] assign[=] call[name[self]._what, parameter[]]
variable[what] assign[=] call[name[self].flatten, parameter[name[what]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[what]]]]]] begin[:]
if <ast.BoolOp object at 0x7da1b24cb0d0> begin[:]
call[name[what]][name[i]] assign[=] call[name[self].infos, parameter[]]
variable[what] assign[=] call[name[list], parameter[call[name[set], parameter[call[name[self].flatten, parameter[name[what]]]]]]]
variable[to_whom] assign[=] call[name[self].flatten, parameter[list[[<ast.Name object at 0x7da1b24cbf70>]]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[to_whom]]]]]] begin[:]
if compare[call[name[to_whom]][name[i]] is constant[None]] begin[:]
call[name[to_whom]][name[i]] assign[=] call[name[self]._to_whom, parameter[]]
variable[to_whom] assign[=] call[name[self].flatten, parameter[name[to_whom]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[to_whom]]]]]] begin[:]
if <ast.BoolOp object at 0x7da2046227a0> begin[:]
call[name[to_whom]][name[i]] assign[=] call[name[self].neighbors, parameter[]]
variable[to_whom] assign[=] call[name[list], parameter[call[name[set], parameter[call[name[self].flatten, parameter[name[to_whom]]]]]]]
variable[transmissions] assign[=] list[[]]
variable[vectors] assign[=] call[name[self].vectors, parameter[]]
for taget[name[w]] in starred[name[what]] begin[:]
for taget[name[tw]] in starred[name[to_whom]] begin[:]
<ast.Try object at 0x7da18bcc9ba0>
variable[t] assign[=] call[name[Transmission], parameter[]]
call[name[transmissions].append, parameter[name[t]]]
if compare[call[name[len], parameter[name[transmissions]]] equal[==] constant[1]] begin[:]
return[call[name[transmissions]][constant[0]]] | keyword[def] identifier[transmit] ( identifier[self] , identifier[what] = keyword[None] , identifier[to_whom] = keyword[None] ):
literal[string]
identifier[what] = identifier[self] . identifier[flatten] ([ identifier[what] ])
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[what] )):
keyword[if] identifier[what] [ identifier[i] ] keyword[is] keyword[None] :
identifier[what] [ identifier[i] ]= identifier[self] . identifier[_what] ()
keyword[elif] identifier[inspect] . identifier[isclass] ( identifier[what] [ identifier[i] ]) keyword[and] identifier[issubclass] ( identifier[what] [ identifier[i] ], identifier[Info] ):
identifier[what] [ identifier[i] ]= identifier[self] . identifier[infos] ( identifier[type] = identifier[what] [ identifier[i] ])
identifier[what] = identifier[self] . identifier[flatten] ( identifier[what] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[what] )):
keyword[if] identifier[inspect] . identifier[isclass] ( identifier[what] [ identifier[i] ]) keyword[and] identifier[issubclass] ( identifier[what] [ identifier[i] ], identifier[Info] ):
identifier[what] [ identifier[i] ]= identifier[self] . identifier[infos] ( identifier[type] = identifier[what] [ identifier[i] ])
identifier[what] = identifier[list] ( identifier[set] ( identifier[self] . identifier[flatten] ( identifier[what] )))
identifier[to_whom] = identifier[self] . identifier[flatten] ([ identifier[to_whom] ])
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[to_whom] )):
keyword[if] identifier[to_whom] [ identifier[i] ] keyword[is] keyword[None] :
identifier[to_whom] [ identifier[i] ]= identifier[self] . identifier[_to_whom] ()
keyword[elif] identifier[inspect] . identifier[isclass] ( identifier[to_whom] [ identifier[i] ]) keyword[and] identifier[issubclass] ( identifier[to_whom] [ identifier[i] ], identifier[Node] ):
identifier[to_whom] [ identifier[i] ]= identifier[self] . identifier[neighbors] ( identifier[direction] = literal[string] , identifier[type] = identifier[to_whom] [ identifier[i] ])
identifier[to_whom] = identifier[self] . identifier[flatten] ( identifier[to_whom] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[to_whom] )):
keyword[if] identifier[inspect] . identifier[isclass] ( identifier[to_whom] [ identifier[i] ]) keyword[and] identifier[issubclass] ( identifier[to_whom] [ identifier[i] ], identifier[Node] ):
identifier[to_whom] [ identifier[i] ]= identifier[self] . identifier[neighbors] ( identifier[direction] = literal[string] , identifier[type] = identifier[to_whom] [ identifier[i] ])
identifier[to_whom] = identifier[list] ( identifier[set] ( identifier[self] . identifier[flatten] ( identifier[to_whom] )))
identifier[transmissions] =[]
identifier[vectors] = identifier[self] . identifier[vectors] ( identifier[direction] = literal[string] )
keyword[for] identifier[w] keyword[in] identifier[what] :
keyword[for] identifier[tw] keyword[in] identifier[to_whom] :
keyword[try] :
identifier[vector] =[ identifier[v] keyword[for] identifier[v] keyword[in] identifier[vectors]
keyword[if] identifier[v] . identifier[destination_id] == identifier[tw] . identifier[id] ][ literal[int] ]
keyword[except] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] . identifier[format] ( identifier[self] , identifier[tw] ))
identifier[t] = identifier[Transmission] ( identifier[info] = identifier[w] , identifier[vector] = identifier[vector] )
identifier[transmissions] . identifier[append] ( identifier[t] )
keyword[if] identifier[len] ( identifier[transmissions] )== literal[int] :
keyword[return] identifier[transmissions] [ literal[int] ]
keyword[else] :
keyword[return] identifier[transmissions] | def transmit(self, what=None, to_whom=None):
"""Transmit one or more infos from one node to another.
"what" dictates which infos are sent, it can be:
(1) None (in which case the node's _what method is called).
(2) an Info (in which case the node transmits the info)
(3) a subclass of Info (in which case the node transmits all
its infos of that type)
(4) a list of any combination of the above
"to_whom" dictates which node(s) the infos are sent to, it can be:
(1) None (in which case the node's _to_whom method is called)
(2) a Node (in which case the node transmits to that node)
(3) a subclass of Node (in which case the node transmits to all
nodes of that type it is connected to)
(4) a list of any combination of the above
Will additionally raise an error if:
(1) _what() or _to_whom() returns None or a list containing None.
(2) what is/contains an info that does not originate from the
transmitting node
(3) to_whom is/contains a node that the transmitting node does not
have a not-failed connection with.
"""
# make the list of what
what = self.flatten([what])
for i in range(len(what)):
if what[i] is None:
what[i] = self._what() # depends on [control=['if'], data=[]]
elif inspect.isclass(what[i]) and issubclass(what[i], Info):
what[i] = self.infos(type=what[i]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
what = self.flatten(what)
for i in range(len(what)):
if inspect.isclass(what[i]) and issubclass(what[i], Info):
what[i] = self.infos(type=what[i]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
what = list(set(self.flatten(what)))
# make the list of to_whom
to_whom = self.flatten([to_whom])
for i in range(len(to_whom)):
if to_whom[i] is None:
to_whom[i] = self._to_whom() # depends on [control=['if'], data=[]]
elif inspect.isclass(to_whom[i]) and issubclass(to_whom[i], Node):
to_whom[i] = self.neighbors(direction='to', type=to_whom[i]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
to_whom = self.flatten(to_whom)
for i in range(len(to_whom)):
if inspect.isclass(to_whom[i]) and issubclass(to_whom[i], Node):
to_whom[i] = self.neighbors(direction='to', type=to_whom[i]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
to_whom = list(set(self.flatten(to_whom)))
transmissions = []
vectors = self.vectors(direction='outgoing')
for w in what:
for tw in to_whom:
try:
vector = [v for v in vectors if v.destination_id == tw.id][0] # depends on [control=['try'], data=[]]
except:
raise ValueError('{} cannot transmit to {} as it does not have a connection to them'.format(self, tw)) # depends on [control=['except'], data=[]]
t = Transmission(info=w, vector=vector)
transmissions.append(t) # depends on [control=['for'], data=['tw']] # depends on [control=['for'], data=['w']]
if len(transmissions) == 1:
return transmissions[0] # depends on [control=['if'], data=[]]
else:
return transmissions |
def published(self, for_user=None):
"""
For non-staff users, return items with a published status and
whose publish and expiry dates fall before and after the
current date when specified.
"""
from yacms.core.models import CONTENT_STATUS_PUBLISHED
if for_user is not None and for_user.is_staff:
return self.all()
return self.filter(
Q(publish_date__lte=now()) | Q(publish_date__isnull=True),
Q(expiry_date__gte=now()) | Q(expiry_date__isnull=True),
Q(status=CONTENT_STATUS_PUBLISHED)) | def function[published, parameter[self, for_user]]:
constant[
For non-staff users, return items with a published status and
whose publish and expiry dates fall before and after the
current date when specified.
]
from relative_module[yacms.core.models] import module[CONTENT_STATUS_PUBLISHED]
if <ast.BoolOp object at 0x7da2047ea650> begin[:]
return[call[name[self].all, parameter[]]]
return[call[name[self].filter, parameter[binary_operation[call[name[Q], parameter[]] <ast.BitOr object at 0x7da2590d6aa0> call[name[Q], parameter[]]], binary_operation[call[name[Q], parameter[]] <ast.BitOr object at 0x7da2590d6aa0> call[name[Q], parameter[]]], call[name[Q], parameter[]]]]] | keyword[def] identifier[published] ( identifier[self] , identifier[for_user] = keyword[None] ):
literal[string]
keyword[from] identifier[yacms] . identifier[core] . identifier[models] keyword[import] identifier[CONTENT_STATUS_PUBLISHED]
keyword[if] identifier[for_user] keyword[is] keyword[not] keyword[None] keyword[and] identifier[for_user] . identifier[is_staff] :
keyword[return] identifier[self] . identifier[all] ()
keyword[return] identifier[self] . identifier[filter] (
identifier[Q] ( identifier[publish_date__lte] = identifier[now] ())| identifier[Q] ( identifier[publish_date__isnull] = keyword[True] ),
identifier[Q] ( identifier[expiry_date__gte] = identifier[now] ())| identifier[Q] ( identifier[expiry_date__isnull] = keyword[True] ),
identifier[Q] ( identifier[status] = identifier[CONTENT_STATUS_PUBLISHED] )) | def published(self, for_user=None):
"""
For non-staff users, return items with a published status and
whose publish and expiry dates fall before and after the
current date when specified.
"""
from yacms.core.models import CONTENT_STATUS_PUBLISHED
if for_user is not None and for_user.is_staff:
return self.all() # depends on [control=['if'], data=[]]
return self.filter(Q(publish_date__lte=now()) | Q(publish_date__isnull=True), Q(expiry_date__gte=now()) | Q(expiry_date__isnull=True), Q(status=CONTENT_STATUS_PUBLISHED)) |
def calc_buffered_bounds(
format, bounds, meters_per_pixel_dim, layer_name, geometry_type,
buffer_cfg):
"""
Calculate the buffered bounds per format per layer based on config.
"""
if not buffer_cfg:
return bounds
format_buffer_cfg = buffer_cfg.get(format.extension)
if format_buffer_cfg is None:
return bounds
geometry_type = normalize_geometry_type(geometry_type)
per_layer_cfg = format_buffer_cfg.get('layer', {}).get(layer_name)
if per_layer_cfg is not None:
layer_geom_pixels = per_layer_cfg.get(geometry_type)
if layer_geom_pixels is not None:
assert isinstance(layer_geom_pixels, Number)
result = bounds_buffer(
bounds, meters_per_pixel_dim * layer_geom_pixels)
return result
by_geometry_pixels = format_buffer_cfg.get('geometry', {}).get(
geometry_type)
if by_geometry_pixels is not None:
assert isinstance(by_geometry_pixels, Number)
result = bounds_buffer(
bounds, meters_per_pixel_dim * by_geometry_pixels)
return result
return bounds | def function[calc_buffered_bounds, parameter[format, bounds, meters_per_pixel_dim, layer_name, geometry_type, buffer_cfg]]:
constant[
Calculate the buffered bounds per format per layer based on config.
]
if <ast.UnaryOp object at 0x7da20c6c65c0> begin[:]
return[name[bounds]]
variable[format_buffer_cfg] assign[=] call[name[buffer_cfg].get, parameter[name[format].extension]]
if compare[name[format_buffer_cfg] is constant[None]] begin[:]
return[name[bounds]]
variable[geometry_type] assign[=] call[name[normalize_geometry_type], parameter[name[geometry_type]]]
variable[per_layer_cfg] assign[=] call[call[name[format_buffer_cfg].get, parameter[constant[layer], dictionary[[], []]]].get, parameter[name[layer_name]]]
if compare[name[per_layer_cfg] is_not constant[None]] begin[:]
variable[layer_geom_pixels] assign[=] call[name[per_layer_cfg].get, parameter[name[geometry_type]]]
if compare[name[layer_geom_pixels] is_not constant[None]] begin[:]
assert[call[name[isinstance], parameter[name[layer_geom_pixels], name[Number]]]]
variable[result] assign[=] call[name[bounds_buffer], parameter[name[bounds], binary_operation[name[meters_per_pixel_dim] * name[layer_geom_pixels]]]]
return[name[result]]
variable[by_geometry_pixels] assign[=] call[call[name[format_buffer_cfg].get, parameter[constant[geometry], dictionary[[], []]]].get, parameter[name[geometry_type]]]
if compare[name[by_geometry_pixels] is_not constant[None]] begin[:]
assert[call[name[isinstance], parameter[name[by_geometry_pixels], name[Number]]]]
variable[result] assign[=] call[name[bounds_buffer], parameter[name[bounds], binary_operation[name[meters_per_pixel_dim] * name[by_geometry_pixels]]]]
return[name[result]]
return[name[bounds]] | keyword[def] identifier[calc_buffered_bounds] (
identifier[format] , identifier[bounds] , identifier[meters_per_pixel_dim] , identifier[layer_name] , identifier[geometry_type] ,
identifier[buffer_cfg] ):
literal[string]
keyword[if] keyword[not] identifier[buffer_cfg] :
keyword[return] identifier[bounds]
identifier[format_buffer_cfg] = identifier[buffer_cfg] . identifier[get] ( identifier[format] . identifier[extension] )
keyword[if] identifier[format_buffer_cfg] keyword[is] keyword[None] :
keyword[return] identifier[bounds]
identifier[geometry_type] = identifier[normalize_geometry_type] ( identifier[geometry_type] )
identifier[per_layer_cfg] = identifier[format_buffer_cfg] . identifier[get] ( literal[string] ,{}). identifier[get] ( identifier[layer_name] )
keyword[if] identifier[per_layer_cfg] keyword[is] keyword[not] keyword[None] :
identifier[layer_geom_pixels] = identifier[per_layer_cfg] . identifier[get] ( identifier[geometry_type] )
keyword[if] identifier[layer_geom_pixels] keyword[is] keyword[not] keyword[None] :
keyword[assert] identifier[isinstance] ( identifier[layer_geom_pixels] , identifier[Number] )
identifier[result] = identifier[bounds_buffer] (
identifier[bounds] , identifier[meters_per_pixel_dim] * identifier[layer_geom_pixels] )
keyword[return] identifier[result]
identifier[by_geometry_pixels] = identifier[format_buffer_cfg] . identifier[get] ( literal[string] ,{}). identifier[get] (
identifier[geometry_type] )
keyword[if] identifier[by_geometry_pixels] keyword[is] keyword[not] keyword[None] :
keyword[assert] identifier[isinstance] ( identifier[by_geometry_pixels] , identifier[Number] )
identifier[result] = identifier[bounds_buffer] (
identifier[bounds] , identifier[meters_per_pixel_dim] * identifier[by_geometry_pixels] )
keyword[return] identifier[result]
keyword[return] identifier[bounds] | def calc_buffered_bounds(format, bounds, meters_per_pixel_dim, layer_name, geometry_type, buffer_cfg):
"""
Calculate the buffered bounds per format per layer based on config.
"""
if not buffer_cfg:
return bounds # depends on [control=['if'], data=[]]
format_buffer_cfg = buffer_cfg.get(format.extension)
if format_buffer_cfg is None:
return bounds # depends on [control=['if'], data=[]]
geometry_type = normalize_geometry_type(geometry_type)
per_layer_cfg = format_buffer_cfg.get('layer', {}).get(layer_name)
if per_layer_cfg is not None:
layer_geom_pixels = per_layer_cfg.get(geometry_type)
if layer_geom_pixels is not None:
assert isinstance(layer_geom_pixels, Number)
result = bounds_buffer(bounds, meters_per_pixel_dim * layer_geom_pixels)
return result # depends on [control=['if'], data=['layer_geom_pixels']] # depends on [control=['if'], data=['per_layer_cfg']]
by_geometry_pixels = format_buffer_cfg.get('geometry', {}).get(geometry_type)
if by_geometry_pixels is not None:
assert isinstance(by_geometry_pixels, Number)
result = bounds_buffer(bounds, meters_per_pixel_dim * by_geometry_pixels)
return result # depends on [control=['if'], data=['by_geometry_pixels']]
return bounds |
def is_invalid_marker(cls, text):
"""
Validate text as a PEP 426 environment marker; return an exception
if invalid or False otherwise.
"""
try:
cls.evaluate_marker(text)
except SyntaxError as e:
return cls.normalize_exception(e)
return False | def function[is_invalid_marker, parameter[cls, text]]:
constant[
Validate text as a PEP 426 environment marker; return an exception
if invalid or False otherwise.
]
<ast.Try object at 0x7da18f720070>
return[constant[False]] | keyword[def] identifier[is_invalid_marker] ( identifier[cls] , identifier[text] ):
literal[string]
keyword[try] :
identifier[cls] . identifier[evaluate_marker] ( identifier[text] )
keyword[except] identifier[SyntaxError] keyword[as] identifier[e] :
keyword[return] identifier[cls] . identifier[normalize_exception] ( identifier[e] )
keyword[return] keyword[False] | def is_invalid_marker(cls, text):
"""
Validate text as a PEP 426 environment marker; return an exception
if invalid or False otherwise.
"""
try:
cls.evaluate_marker(text) # depends on [control=['try'], data=[]]
except SyntaxError as e:
return cls.normalize_exception(e) # depends on [control=['except'], data=['e']]
return False |
def write_byte(self, address, value):
"""Writes the byte to unaddressed register in a device. """
LOGGER.debug("Writing byte %s to device %s!", bin(value), hex(address))
return self.driver.write_byte(address, value) | def function[write_byte, parameter[self, address, value]]:
constant[Writes the byte to unaddressed register in a device. ]
call[name[LOGGER].debug, parameter[constant[Writing byte %s to device %s!], call[name[bin], parameter[name[value]]], call[name[hex], parameter[name[address]]]]]
return[call[name[self].driver.write_byte, parameter[name[address], name[value]]]] | keyword[def] identifier[write_byte] ( identifier[self] , identifier[address] , identifier[value] ):
literal[string]
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[bin] ( identifier[value] ), identifier[hex] ( identifier[address] ))
keyword[return] identifier[self] . identifier[driver] . identifier[write_byte] ( identifier[address] , identifier[value] ) | def write_byte(self, address, value):
"""Writes the byte to unaddressed register in a device. """
LOGGER.debug('Writing byte %s to device %s!', bin(value), hex(address))
return self.driver.write_byte(address, value) |
def params(dirnames, param_func, t_steady=None):
"""Calculate a parameter of a set of model output directories,
for a measure function which returns an associated uncertainty.
Parameters
----------
dirnames: list[str]
Model output directory paths.
param_func: function
Function which takes a :class:`Model` instance as a single argument,
and returns the parameter of interest.
Returns
-------
params: numpy.ndarray
Parameters.
"""
return np.array([param_func(get_recent_model(d)) for d in dirnames]) | def function[params, parameter[dirnames, param_func, t_steady]]:
constant[Calculate a parameter of a set of model output directories,
for a measure function which returns an associated uncertainty.
Parameters
----------
dirnames: list[str]
Model output directory paths.
param_func: function
Function which takes a :class:`Model` instance as a single argument,
and returns the parameter of interest.
Returns
-------
params: numpy.ndarray
Parameters.
]
return[call[name[np].array, parameter[<ast.ListComp object at 0x7da1b149ffa0>]]] | keyword[def] identifier[params] ( identifier[dirnames] , identifier[param_func] , identifier[t_steady] = keyword[None] ):
literal[string]
keyword[return] identifier[np] . identifier[array] ([ identifier[param_func] ( identifier[get_recent_model] ( identifier[d] )) keyword[for] identifier[d] keyword[in] identifier[dirnames] ]) | def params(dirnames, param_func, t_steady=None):
"""Calculate a parameter of a set of model output directories,
for a measure function which returns an associated uncertainty.
Parameters
----------
dirnames: list[str]
Model output directory paths.
param_func: function
Function which takes a :class:`Model` instance as a single argument,
and returns the parameter of interest.
Returns
-------
params: numpy.ndarray
Parameters.
"""
return np.array([param_func(get_recent_model(d)) for d in dirnames]) |
def find_vc_pdir_vswhere(msvc_version):
"""
Find the MSVC product directory using vswhere.exe .
Run it asking for specified version and get MSVS install location
:param msvc_version:
:return: MSVC install dir
"""
vswhere_path = os.path.join(
'C:\\',
'Program Files (x86)',
'Microsoft Visual Studio',
'Installer',
'vswhere.exe'
)
vswhere_cmd = [vswhere_path, '-version', msvc_version, '-property', 'installationPath']
if os.path.exists(vswhere_path):
sp = subprocess.Popen(vswhere_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
vsdir, err = sp.communicate()
vsdir = vsdir.decode("mbcs")
vsdir = vsdir.rstrip()
vc_pdir = os.path.join(vsdir, 'VC')
return vc_pdir
else:
# No vswhere on system, no install info available
return None | def function[find_vc_pdir_vswhere, parameter[msvc_version]]:
constant[
Find the MSVC product directory using vswhere.exe .
Run it asking for specified version and get MSVS install location
:param msvc_version:
:return: MSVC install dir
]
variable[vswhere_path] assign[=] call[name[os].path.join, parameter[constant[C:\], constant[Program Files (x86)], constant[Microsoft Visual Studio], constant[Installer], constant[vswhere.exe]]]
variable[vswhere_cmd] assign[=] list[[<ast.Name object at 0x7da20c992da0>, <ast.Constant object at 0x7da20c990e50>, <ast.Name object at 0x7da20c9924d0>, <ast.Constant object at 0x7da20c991510>, <ast.Constant object at 0x7da20c991f60>]]
if call[name[os].path.exists, parameter[name[vswhere_path]]] begin[:]
variable[sp] assign[=] call[name[subprocess].Popen, parameter[name[vswhere_cmd]]]
<ast.Tuple object at 0x7da20c9915a0> assign[=] call[name[sp].communicate, parameter[]]
variable[vsdir] assign[=] call[name[vsdir].decode, parameter[constant[mbcs]]]
variable[vsdir] assign[=] call[name[vsdir].rstrip, parameter[]]
variable[vc_pdir] assign[=] call[name[os].path.join, parameter[name[vsdir], constant[VC]]]
return[name[vc_pdir]] | keyword[def] identifier[find_vc_pdir_vswhere] ( identifier[msvc_version] ):
literal[string]
identifier[vswhere_path] = identifier[os] . identifier[path] . identifier[join] (
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string]
)
identifier[vswhere_cmd] =[ identifier[vswhere_path] , literal[string] , identifier[msvc_version] , literal[string] , literal[string] ]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[vswhere_path] ):
identifier[sp] = identifier[subprocess] . identifier[Popen] ( identifier[vswhere_cmd] , identifier[stdout] = identifier[subprocess] . identifier[PIPE] , identifier[stderr] = identifier[subprocess] . identifier[PIPE] )
identifier[vsdir] , identifier[err] = identifier[sp] . identifier[communicate] ()
identifier[vsdir] = identifier[vsdir] . identifier[decode] ( literal[string] )
identifier[vsdir] = identifier[vsdir] . identifier[rstrip] ()
identifier[vc_pdir] = identifier[os] . identifier[path] . identifier[join] ( identifier[vsdir] , literal[string] )
keyword[return] identifier[vc_pdir]
keyword[else] :
keyword[return] keyword[None] | def find_vc_pdir_vswhere(msvc_version):
"""
Find the MSVC product directory using vswhere.exe .
Run it asking for specified version and get MSVS install location
:param msvc_version:
:return: MSVC install dir
"""
vswhere_path = os.path.join('C:\\', 'Program Files (x86)', 'Microsoft Visual Studio', 'Installer', 'vswhere.exe')
vswhere_cmd = [vswhere_path, '-version', msvc_version, '-property', 'installationPath']
if os.path.exists(vswhere_path):
sp = subprocess.Popen(vswhere_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(vsdir, err) = sp.communicate()
vsdir = vsdir.decode('mbcs')
vsdir = vsdir.rstrip()
vc_pdir = os.path.join(vsdir, 'VC')
return vc_pdir # depends on [control=['if'], data=[]]
else:
# No vswhere on system, no install info available
return None |
def from_name(cls, name):
"""Create an author by name, automatically populating the hash."""
return Author(name=name, sha512=cls.hash_name(name)) | def function[from_name, parameter[cls, name]]:
constant[Create an author by name, automatically populating the hash.]
return[call[name[Author], parameter[]]] | keyword[def] identifier[from_name] ( identifier[cls] , identifier[name] ):
literal[string]
keyword[return] identifier[Author] ( identifier[name] = identifier[name] , identifier[sha512] = identifier[cls] . identifier[hash_name] ( identifier[name] )) | def from_name(cls, name):
"""Create an author by name, automatically populating the hash."""
return Author(name=name, sha512=cls.hash_name(name)) |
def add(env, securitygroup_id, remote_ip, remote_group,
direction, ethertype, port_max, port_min, protocol):
"""Add a security group rule to a security group.
\b
Examples:
# Add an SSH rule (TCP port 22) to a security group
slcli sg rule-add 384727 \\
--direction ingress \\
--protocol tcp \\
--port-min 22 \\
--port-max 22
\b
# Add a ping rule (ICMP type 8 code 0) to a security group
slcli sg rule-add 384727 \\
--direction ingress \\
--protocol icmp \\
--port-min 8 \\
--port-max 0
"""
mgr = SoftLayer.NetworkManager(env.client)
ret = mgr.add_securitygroup_rule(securitygroup_id, remote_ip, remote_group,
direction, ethertype, port_max,
port_min, protocol)
if not ret:
raise exceptions.CLIAbort("Failed to add security group rule")
table = formatting.Table(REQUEST_RULES_COLUMNS)
table.add_row([ret['requestId'], str(ret['rules'])])
env.fout(table) | def function[add, parameter[env, securitygroup_id, remote_ip, remote_group, direction, ethertype, port_max, port_min, protocol]]:
constant[Add a security group rule to a security group.
Examples:
# Add an SSH rule (TCP port 22) to a security group
slcli sg rule-add 384727 \
--direction ingress \
--protocol tcp \
--port-min 22 \
--port-max 22
# Add a ping rule (ICMP type 8 code 0) to a security group
slcli sg rule-add 384727 \
--direction ingress \
--protocol icmp \
--port-min 8 \
--port-max 0
]
variable[mgr] assign[=] call[name[SoftLayer].NetworkManager, parameter[name[env].client]]
variable[ret] assign[=] call[name[mgr].add_securitygroup_rule, parameter[name[securitygroup_id], name[remote_ip], name[remote_group], name[direction], name[ethertype], name[port_max], name[port_min], name[protocol]]]
if <ast.UnaryOp object at 0x7da18f58cc10> begin[:]
<ast.Raise object at 0x7da18f58d960>
variable[table] assign[=] call[name[formatting].Table, parameter[name[REQUEST_RULES_COLUMNS]]]
call[name[table].add_row, parameter[list[[<ast.Subscript object at 0x7da18ede5240>, <ast.Call object at 0x7da18ede53c0>]]]]
call[name[env].fout, parameter[name[table]]] | keyword[def] identifier[add] ( identifier[env] , identifier[securitygroup_id] , identifier[remote_ip] , identifier[remote_group] ,
identifier[direction] , identifier[ethertype] , identifier[port_max] , identifier[port_min] , identifier[protocol] ):
literal[string]
identifier[mgr] = identifier[SoftLayer] . identifier[NetworkManager] ( identifier[env] . identifier[client] )
identifier[ret] = identifier[mgr] . identifier[add_securitygroup_rule] ( identifier[securitygroup_id] , identifier[remote_ip] , identifier[remote_group] ,
identifier[direction] , identifier[ethertype] , identifier[port_max] ,
identifier[port_min] , identifier[protocol] )
keyword[if] keyword[not] identifier[ret] :
keyword[raise] identifier[exceptions] . identifier[CLIAbort] ( literal[string] )
identifier[table] = identifier[formatting] . identifier[Table] ( identifier[REQUEST_RULES_COLUMNS] )
identifier[table] . identifier[add_row] ([ identifier[ret] [ literal[string] ], identifier[str] ( identifier[ret] [ literal[string] ])])
identifier[env] . identifier[fout] ( identifier[table] ) | def add(env, securitygroup_id, remote_ip, remote_group, direction, ethertype, port_max, port_min, protocol):
"""Add a security group rule to a security group.
\x08
Examples:
# Add an SSH rule (TCP port 22) to a security group
slcli sg rule-add 384727 \\
--direction ingress \\
--protocol tcp \\
--port-min 22 \\
--port-max 22
\x08
# Add a ping rule (ICMP type 8 code 0) to a security group
slcli sg rule-add 384727 \\
--direction ingress \\
--protocol icmp \\
--port-min 8 \\
--port-max 0
"""
mgr = SoftLayer.NetworkManager(env.client)
ret = mgr.add_securitygroup_rule(securitygroup_id, remote_ip, remote_group, direction, ethertype, port_max, port_min, protocol)
if not ret:
raise exceptions.CLIAbort('Failed to add security group rule') # depends on [control=['if'], data=[]]
table = formatting.Table(REQUEST_RULES_COLUMNS)
table.add_row([ret['requestId'], str(ret['rules'])])
env.fout(table) |
def make(parser):
"""
Prepare a data disk on remote host.
"""
sub_command_help = dedent("""
Create OSDs from a data disk on a remote host:
ceph-deploy osd create {node} --data /path/to/device
For bluestore, optional devices can be used::
ceph-deploy osd create {node} --data /path/to/data --block-db /path/to/db-device
ceph-deploy osd create {node} --data /path/to/data --block-wal /path/to/wal-device
ceph-deploy osd create {node} --data /path/to/data --block-db /path/to/db-device --block-wal /path/to/wal-device
For filestore, the journal must be specified, as well as the objectstore::
ceph-deploy osd create {node} --filestore --data /path/to/data --journal /path/to/journal
For data devices, it can be an existing logical volume in the format of:
vg/lv, or a device. For other OSD components like wal, db, and journal, it
can be logical volume (in vg/lv format) or it must be a GPT partition.
"""
)
parser.formatter_class = argparse.RawDescriptionHelpFormatter
parser.description = sub_command_help
osd_parser = parser.add_subparsers(dest='subcommand')
osd_parser.required = True
osd_list = osd_parser.add_parser(
'list',
help='List OSD info from remote host(s)'
)
osd_list.add_argument(
'host',
nargs='+',
metavar='HOST',
help='remote host(s) to list OSDs from'
)
osd_list.add_argument(
'--debug',
action='store_true',
help='Enable debug mode on remote ceph-volume calls',
)
osd_create = osd_parser.add_parser(
'create',
help='Create new Ceph OSD daemon by preparing and activating a device'
)
osd_create.add_argument(
'--data',
metavar='DATA',
help='The OSD data logical volume (vg/lv) or absolute path to device'
)
osd_create.add_argument(
'--journal',
help='Logical Volume (vg/lv) or path to GPT partition',
)
osd_create.add_argument(
'--zap-disk',
action='store_true',
help='DEPRECATED - cannot zap when creating an OSD'
)
osd_create.add_argument(
'--fs-type',
metavar='FS_TYPE',
choices=['xfs',
'btrfs'
],
default='xfs',
help='filesystem to use to format DEVICE (xfs, btrfs)',
)
osd_create.add_argument(
'--dmcrypt',
action='store_true',
help='use dm-crypt on DEVICE',
)
osd_create.add_argument(
'--dmcrypt-key-dir',
metavar='KEYDIR',
default='/etc/ceph/dmcrypt-keys',
help='directory where dm-crypt keys are stored',
)
osd_create.add_argument(
'--filestore',
action='store_true', default=None,
help='filestore objectstore',
)
osd_create.add_argument(
'--bluestore',
action='store_true', default=None,
help='bluestore objectstore',
)
osd_create.add_argument(
'--block-db',
default=None,
help='bluestore block.db path'
)
osd_create.add_argument(
'--block-wal',
default=None,
help='bluestore block.wal path'
)
osd_create.add_argument(
'host',
nargs='?',
metavar='HOST',
help='Remote host to connect'
)
osd_create.add_argument(
'--debug',
action='store_true',
help='Enable debug mode on remote ceph-volume calls',
)
parser.set_defaults(
func=osd,
) | def function[make, parameter[parser]]:
constant[
Prepare a data disk on remote host.
]
variable[sub_command_help] assign[=] call[name[dedent], parameter[constant[
Create OSDs from a data disk on a remote host:
ceph-deploy osd create {node} --data /path/to/device
For bluestore, optional devices can be used::
ceph-deploy osd create {node} --data /path/to/data --block-db /path/to/db-device
ceph-deploy osd create {node} --data /path/to/data --block-wal /path/to/wal-device
ceph-deploy osd create {node} --data /path/to/data --block-db /path/to/db-device --block-wal /path/to/wal-device
For filestore, the journal must be specified, as well as the objectstore::
ceph-deploy osd create {node} --filestore --data /path/to/data --journal /path/to/journal
For data devices, it can be an existing logical volume in the format of:
vg/lv, or a device. For other OSD components like wal, db, and journal, it
can be logical volume (in vg/lv format) or it must be a GPT partition.
]]]
name[parser].formatter_class assign[=] name[argparse].RawDescriptionHelpFormatter
name[parser].description assign[=] name[sub_command_help]
variable[osd_parser] assign[=] call[name[parser].add_subparsers, parameter[]]
name[osd_parser].required assign[=] constant[True]
variable[osd_list] assign[=] call[name[osd_parser].add_parser, parameter[constant[list]]]
call[name[osd_list].add_argument, parameter[constant[host]]]
call[name[osd_list].add_argument, parameter[constant[--debug]]]
variable[osd_create] assign[=] call[name[osd_parser].add_parser, parameter[constant[create]]]
call[name[osd_create].add_argument, parameter[constant[--data]]]
call[name[osd_create].add_argument, parameter[constant[--journal]]]
call[name[osd_create].add_argument, parameter[constant[--zap-disk]]]
call[name[osd_create].add_argument, parameter[constant[--fs-type]]]
call[name[osd_create].add_argument, parameter[constant[--dmcrypt]]]
call[name[osd_create].add_argument, parameter[constant[--dmcrypt-key-dir]]]
call[name[osd_create].add_argument, parameter[constant[--filestore]]]
call[name[osd_create].add_argument, parameter[constant[--bluestore]]]
call[name[osd_create].add_argument, parameter[constant[--block-db]]]
call[name[osd_create].add_argument, parameter[constant[--block-wal]]]
call[name[osd_create].add_argument, parameter[constant[host]]]
call[name[osd_create].add_argument, parameter[constant[--debug]]]
call[name[parser].set_defaults, parameter[]] | keyword[def] identifier[make] ( identifier[parser] ):
literal[string]
identifier[sub_command_help] = identifier[dedent] ( literal[string]
)
identifier[parser] . identifier[formatter_class] = identifier[argparse] . identifier[RawDescriptionHelpFormatter]
identifier[parser] . identifier[description] = identifier[sub_command_help]
identifier[osd_parser] = identifier[parser] . identifier[add_subparsers] ( identifier[dest] = literal[string] )
identifier[osd_parser] . identifier[required] = keyword[True]
identifier[osd_list] = identifier[osd_parser] . identifier[add_parser] (
literal[string] ,
identifier[help] = literal[string]
)
identifier[osd_list] . identifier[add_argument] (
literal[string] ,
identifier[nargs] = literal[string] ,
identifier[metavar] = literal[string] ,
identifier[help] = literal[string]
)
identifier[osd_list] . identifier[add_argument] (
literal[string] ,
identifier[action] = literal[string] ,
identifier[help] = literal[string] ,
)
identifier[osd_create] = identifier[osd_parser] . identifier[add_parser] (
literal[string] ,
identifier[help] = literal[string]
)
identifier[osd_create] . identifier[add_argument] (
literal[string] ,
identifier[metavar] = literal[string] ,
identifier[help] = literal[string]
)
identifier[osd_create] . identifier[add_argument] (
literal[string] ,
identifier[help] = literal[string] ,
)
identifier[osd_create] . identifier[add_argument] (
literal[string] ,
identifier[action] = literal[string] ,
identifier[help] = literal[string]
)
identifier[osd_create] . identifier[add_argument] (
literal[string] ,
identifier[metavar] = literal[string] ,
identifier[choices] =[ literal[string] ,
literal[string]
],
identifier[default] = literal[string] ,
identifier[help] = literal[string] ,
)
identifier[osd_create] . identifier[add_argument] (
literal[string] ,
identifier[action] = literal[string] ,
identifier[help] = literal[string] ,
)
identifier[osd_create] . identifier[add_argument] (
literal[string] ,
identifier[metavar] = literal[string] ,
identifier[default] = literal[string] ,
identifier[help] = literal[string] ,
)
identifier[osd_create] . identifier[add_argument] (
literal[string] ,
identifier[action] = literal[string] , identifier[default] = keyword[None] ,
identifier[help] = literal[string] ,
)
identifier[osd_create] . identifier[add_argument] (
literal[string] ,
identifier[action] = literal[string] , identifier[default] = keyword[None] ,
identifier[help] = literal[string] ,
)
identifier[osd_create] . identifier[add_argument] (
literal[string] ,
identifier[default] = keyword[None] ,
identifier[help] = literal[string]
)
identifier[osd_create] . identifier[add_argument] (
literal[string] ,
identifier[default] = keyword[None] ,
identifier[help] = literal[string]
)
identifier[osd_create] . identifier[add_argument] (
literal[string] ,
identifier[nargs] = literal[string] ,
identifier[metavar] = literal[string] ,
identifier[help] = literal[string]
)
identifier[osd_create] . identifier[add_argument] (
literal[string] ,
identifier[action] = literal[string] ,
identifier[help] = literal[string] ,
)
identifier[parser] . identifier[set_defaults] (
identifier[func] = identifier[osd] ,
) | def make(parser):
"""
Prepare a data disk on remote host.
"""
sub_command_help = dedent('\n Create OSDs from a data disk on a remote host:\n\n ceph-deploy osd create {node} --data /path/to/device\n\n For bluestore, optional devices can be used::\n\n ceph-deploy osd create {node} --data /path/to/data --block-db /path/to/db-device\n ceph-deploy osd create {node} --data /path/to/data --block-wal /path/to/wal-device\n ceph-deploy osd create {node} --data /path/to/data --block-db /path/to/db-device --block-wal /path/to/wal-device\n\n For filestore, the journal must be specified, as well as the objectstore::\n\n ceph-deploy osd create {node} --filestore --data /path/to/data --journal /path/to/journal\n\n For data devices, it can be an existing logical volume in the format of:\n vg/lv, or a device. For other OSD components like wal, db, and journal, it\n can be logical volume (in vg/lv format) or it must be a GPT partition.\n ')
parser.formatter_class = argparse.RawDescriptionHelpFormatter
parser.description = sub_command_help
osd_parser = parser.add_subparsers(dest='subcommand')
osd_parser.required = True
osd_list = osd_parser.add_parser('list', help='List OSD info from remote host(s)')
osd_list.add_argument('host', nargs='+', metavar='HOST', help='remote host(s) to list OSDs from')
osd_list.add_argument('--debug', action='store_true', help='Enable debug mode on remote ceph-volume calls')
osd_create = osd_parser.add_parser('create', help='Create new Ceph OSD daemon by preparing and activating a device')
osd_create.add_argument('--data', metavar='DATA', help='The OSD data logical volume (vg/lv) or absolute path to device')
osd_create.add_argument('--journal', help='Logical Volume (vg/lv) or path to GPT partition')
osd_create.add_argument('--zap-disk', action='store_true', help='DEPRECATED - cannot zap when creating an OSD')
osd_create.add_argument('--fs-type', metavar='FS_TYPE', choices=['xfs', 'btrfs'], default='xfs', help='filesystem to use to format DEVICE (xfs, btrfs)')
osd_create.add_argument('--dmcrypt', action='store_true', help='use dm-crypt on DEVICE')
osd_create.add_argument('--dmcrypt-key-dir', metavar='KEYDIR', default='/etc/ceph/dmcrypt-keys', help='directory where dm-crypt keys are stored')
osd_create.add_argument('--filestore', action='store_true', default=None, help='filestore objectstore')
osd_create.add_argument('--bluestore', action='store_true', default=None, help='bluestore objectstore')
osd_create.add_argument('--block-db', default=None, help='bluestore block.db path')
osd_create.add_argument('--block-wal', default=None, help='bluestore block.wal path')
osd_create.add_argument('host', nargs='?', metavar='HOST', help='Remote host to connect')
osd_create.add_argument('--debug', action='store_true', help='Enable debug mode on remote ceph-volume calls')
parser.set_defaults(func=osd) |
async def get_active(self, *args, **kwargs):
"""
Get active users balance
Accepts:
- uid [integer] (users id)
- types [list | string] (array with needed types or "all")
Returns:
{
type [string] (blockchain type): amount
}
"""
# Get daya from request
coinids = kwargs.get("coinids")
uid = kwargs.get("uid",0)
address = kwargs.get("address")
try:
coinid = coinid.replace("TEST", "")
except:
pass
try:
uid = int(uid)
except:
return await self.error_400("User id must be integer. ")
if not uid and address:
uid = await self.get_uid_by_address(address=address, coinid=coinid)
if isinstance(uid, dict):
return uid
# Check if required fields exists
if not all([coinids, uid]):
return await self.error_400("Get active. Missed required fields.")
if isinstance(coinids, list):
actives = {}
for coinid in coinids:
database = self.client[self.collection]
collection = database[coinid]
# Get current balance
balance = await collection.find_one({"uid":uid})
if not balance:
return await self.error_404(
"Get active. Balance with uid:%s and type:%s not found" % (uid, coinid))
# Collect actives
actives[coinid] = int(balance["amount_active"])
if isinstance(coinids, str):
actives = {}
for coinid in self.coinids:
database = self.client[coinid]
collection = database[self.collection]
# Get current balance
balance = await collection.find_one({"uid":uid})
if not balance:
return await self.error_404(
"Get active. Balance with uid:%s and type:%s not found" % (uid, coinid))
# Collect actives
actives[coinid] = int(balance["amount_active"])
return actives | <ast.AsyncFunctionDef object at 0x7da1b0915ae0> | keyword[async] keyword[def] identifier[get_active] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[coinids] = identifier[kwargs] . identifier[get] ( literal[string] )
identifier[uid] = identifier[kwargs] . identifier[get] ( literal[string] , literal[int] )
identifier[address] = identifier[kwargs] . identifier[get] ( literal[string] )
keyword[try] :
identifier[coinid] = identifier[coinid] . identifier[replace] ( literal[string] , literal[string] )
keyword[except] :
keyword[pass]
keyword[try] :
identifier[uid] = identifier[int] ( identifier[uid] )
keyword[except] :
keyword[return] keyword[await] identifier[self] . identifier[error_400] ( literal[string] )
keyword[if] keyword[not] identifier[uid] keyword[and] identifier[address] :
identifier[uid] = keyword[await] identifier[self] . identifier[get_uid_by_address] ( identifier[address] = identifier[address] , identifier[coinid] = identifier[coinid] )
keyword[if] identifier[isinstance] ( identifier[uid] , identifier[dict] ):
keyword[return] identifier[uid]
keyword[if] keyword[not] identifier[all] ([ identifier[coinids] , identifier[uid] ]):
keyword[return] keyword[await] identifier[self] . identifier[error_400] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[coinids] , identifier[list] ):
identifier[actives] ={}
keyword[for] identifier[coinid] keyword[in] identifier[coinids] :
identifier[database] = identifier[self] . identifier[client] [ identifier[self] . identifier[collection] ]
identifier[collection] = identifier[database] [ identifier[coinid] ]
identifier[balance] = keyword[await] identifier[collection] . identifier[find_one] ({ literal[string] : identifier[uid] })
keyword[if] keyword[not] identifier[balance] :
keyword[return] keyword[await] identifier[self] . identifier[error_404] (
literal[string] %( identifier[uid] , identifier[coinid] ))
identifier[actives] [ identifier[coinid] ]= identifier[int] ( identifier[balance] [ literal[string] ])
keyword[if] identifier[isinstance] ( identifier[coinids] , identifier[str] ):
identifier[actives] ={}
keyword[for] identifier[coinid] keyword[in] identifier[self] . identifier[coinids] :
identifier[database] = identifier[self] . identifier[client] [ identifier[coinid] ]
identifier[collection] = identifier[database] [ identifier[self] . identifier[collection] ]
identifier[balance] = keyword[await] identifier[collection] . identifier[find_one] ({ literal[string] : identifier[uid] })
keyword[if] keyword[not] identifier[balance] :
keyword[return] keyword[await] identifier[self] . identifier[error_404] (
literal[string] %( identifier[uid] , identifier[coinid] ))
identifier[actives] [ identifier[coinid] ]= identifier[int] ( identifier[balance] [ literal[string] ])
keyword[return] identifier[actives] | async def get_active(self, *args, **kwargs):
"""
Get active users balance
Accepts:
- uid [integer] (users id)
- types [list | string] (array with needed types or "all")
Returns:
{
type [string] (blockchain type): amount
}
""" # Get daya from request
coinids = kwargs.get('coinids')
uid = kwargs.get('uid', 0)
address = kwargs.get('address')
try:
coinid = coinid.replace('TEST', '') # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]]
try:
uid = int(uid) # depends on [control=['try'], data=[]]
except:
return await self.error_400('User id must be integer. ') # depends on [control=['except'], data=[]]
if not uid and address:
uid = await self.get_uid_by_address(address=address, coinid=coinid)
if isinstance(uid, dict):
return uid # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # Check if required fields exists
if not all([coinids, uid]):
return await self.error_400('Get active. Missed required fields.') # depends on [control=['if'], data=[]]
if isinstance(coinids, list):
actives = {}
for coinid in coinids:
database = self.client[self.collection]
collection = database[coinid] # Get current balance
balance = await collection.find_one({'uid': uid})
if not balance:
return await self.error_404('Get active. Balance with uid:%s and type:%s not found' % (uid, coinid)) # depends on [control=['if'], data=[]] # Collect actives
actives[coinid] = int(balance['amount_active']) # depends on [control=['for'], data=['coinid']] # depends on [control=['if'], data=[]]
if isinstance(coinids, str):
actives = {}
for coinid in self.coinids:
database = self.client[coinid]
collection = database[self.collection] # Get current balance
balance = await collection.find_one({'uid': uid})
if not balance:
return await self.error_404('Get active. Balance with uid:%s and type:%s not found' % (uid, coinid)) # depends on [control=['if'], data=[]] # Collect actives
actives[coinid] = int(balance['amount_active']) # depends on [control=['for'], data=['coinid']] # depends on [control=['if'], data=[]]
return actives |
def calc_A_hat(A, S):
'''Return the A_hat matrix of A given the skew matrix S'''
return np.dot(S, np.dot(A, np.transpose(S))) | def function[calc_A_hat, parameter[A, S]]:
constant[Return the A_hat matrix of A given the skew matrix S]
return[call[name[np].dot, parameter[name[S], call[name[np].dot, parameter[name[A], call[name[np].transpose, parameter[name[S]]]]]]]] | keyword[def] identifier[calc_A_hat] ( identifier[A] , identifier[S] ):
literal[string]
keyword[return] identifier[np] . identifier[dot] ( identifier[S] , identifier[np] . identifier[dot] ( identifier[A] , identifier[np] . identifier[transpose] ( identifier[S] ))) | def calc_A_hat(A, S):
"""Return the A_hat matrix of A given the skew matrix S"""
return np.dot(S, np.dot(A, np.transpose(S))) |
def convert_conv(params, w_name, scope_name, inputs, layers, weights, names):
"""
Convert convolution layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
"""
print('Converting convolution ...')
if names == 'short':
tf_name = 'C' + random_string(7)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
bias_name = '{0}.bias'.format(w_name)
weights_name = '{0}.weight'.format(w_name)
input_name = inputs[0]
if len(weights[weights_name].numpy().shape) == 5: # 3D conv
W = weights[weights_name].numpy().transpose(2, 3, 4, 1, 0)
height, width, channels, n_layers, n_filters = W.shape
if bias_name in weights:
biases = weights[bias_name].numpy()
has_bias = True
else:
biases = None
has_bias = False
if params['pads'][0] > 0 or params['pads'][1] > 0:
padding_name = tf_name + '_pad'
padding_layer = keras.layers.ZeroPadding3D(
padding=(params['pads'][0],
params['pads'][1],
params['pads'][2]),
name=padding_name
)
layers[padding_name] = padding_layer(layers[input_name])
input_name = padding_name
if has_bias:
weights = [W, biases]
else:
weights = [W]
conv = keras.layers.Conv3D(
filters=n_filters,
kernel_size=(channels, height, width),
strides=(params['strides'][0],
params['strides'][1],
params['strides'][2]),
padding='valid',
weights=weights,
use_bias=has_bias,
activation=None,
dilation_rate=params['dilations'][0],
bias_initializer='zeros', kernel_initializer='zeros',
name=tf_name
)
layers[scope_name] = conv(layers[input_name])
elif len(weights[weights_name].numpy().shape) == 4: # 2D conv
if params['pads'][0] > 0 or params['pads'][1] > 0:
padding_name = tf_name + '_pad'
padding_layer = keras.layers.ZeroPadding2D(
padding=(params['pads'][0], params['pads'][1]),
name=padding_name
)
layers[padding_name] = padding_layer(layers[input_name])
input_name = padding_name
W = weights[weights_name].numpy().transpose(2, 3, 1, 0)
height, width, channels_per_group, out_channels = W.shape
n_groups = params['group']
in_channels = channels_per_group * n_groups
if n_groups == in_channels and n_groups != 1:
if bias_name in weights:
biases = weights[bias_name].numpy()
has_bias = True
else:
biases = None
has_bias = False
W = W.transpose(0, 1, 3, 2)
if has_bias:
weights = [W, biases]
else:
weights = [W]
conv = keras.layers.DepthwiseConv2D(
kernel_size=(height, width),
strides=(params['strides'][0], params['strides'][1]),
padding='valid',
use_bias=has_bias,
activation=None,
depth_multiplier=1,
weights = weights,
dilation_rate=params['dilations'][0],
bias_initializer='zeros', kernel_initializer='zeros'
)
layers[scope_name] = conv(layers[input_name])
elif n_groups != 1:
# Example from https://kratzert.github.io/2017/02/24/finetuning-alexnet-with-tensorflow.html
# # Split input and weights and convolve them separately
# input_groups = tf.split(axis=3, num_or_size_splits=groups, value=x)
# weight_groups = tf.split(axis=3, num_or_size_splits=groups, value=weights)
# output_groups = [convolve(i, k) for i, k in zip(input_groups, weight_groups)]
# # Concat the convolved output together again
# conv = tf.concat(axis=3, values=output_groups)
def target_layer(x, groups=params['group'], stride_y=params['strides'][0], stride_x=params['strides'][1]):
x = tf.transpose(x, [0, 2, 3, 1])
def convolve_lambda(i, k):
return tf.nn.conv2d(i, k, strides=[1, stride_y, stride_x, 1], padding='VALID')
input_groups = tf.split(axis=3, num_or_size_splits=groups, value=x)
weight_groups = tf.split(axis=3, num_or_size_splits=groups, value=W.transpose(0, 1, 2, 3))
output_groups = [convolve_lambda(i, k) for i, k in zip(input_groups, weight_groups)]
layer = tf.concat(axis=3, values=output_groups)
layer = tf.transpose(layer, [0, 3, 1, 2])
return layer
lambda_layer = keras.layers.Lambda(target_layer)
layers[scope_name] = lambda_layer(layers[input_name])
else:
if bias_name in weights:
biases = weights[bias_name].numpy()
has_bias = True
else:
biases = None
has_bias = False
if has_bias:
weights = [W, biases]
else:
weights = [W]
conv = keras.layers.Conv2D(
filters=out_channels,
kernel_size=(height, width),
strides=(params['strides'][0], params['strides'][1]),
padding='valid',
weights=weights,
use_bias=has_bias,
activation=None,
dilation_rate=params['dilations'][0],
bias_initializer='zeros', kernel_initializer='zeros',
name=tf_name
)
layers[scope_name] = conv(layers[input_name])
else: # 1D conv
W = weights[weights_name].numpy().transpose(2, 1, 0)
width, channels, n_filters = W.shape
n_groups = params['group']
if n_groups > 1:
raise AssertionError('Cannot convert conv1d with groups != 1')
if bias_name in weights:
biases = weights[bias_name].numpy()
has_bias = True
else:
biases = None
has_bias = False
padding_name = tf_name + '_pad'
padding_layer = keras.layers.ZeroPadding1D(
padding=params['pads'][0],
name=padding_name
)
layers[padding_name] = padding_layer(layers[inputs[0]])
input_name = padding_name
if has_bias:
weights = [W, biases]
else:
weights = [W]
conv = keras.layers.Conv1D(
filters=channels,
kernel_size=width,
strides=params['strides'],
padding='valid',
weights=weights,
use_bias=has_bias,
activation=None,
data_format='channels_first',
dilation_rate=params['dilations'],
bias_initializer='zeros', kernel_initializer='zeros',
name=tf_name
)
layers[scope_name] = conv(layers[input_name]) | def function[convert_conv, parameter[params, w_name, scope_name, inputs, layers, weights, names]]:
constant[
Convert convolution layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
]
call[name[print], parameter[constant[Converting convolution ...]]]
if compare[name[names] equal[==] constant[short]] begin[:]
variable[tf_name] assign[=] binary_operation[constant[C] + call[name[random_string], parameter[constant[7]]]]
variable[bias_name] assign[=] call[constant[{0}.bias].format, parameter[name[w_name]]]
variable[weights_name] assign[=] call[constant[{0}.weight].format, parameter[name[w_name]]]
variable[input_name] assign[=] call[name[inputs]][constant[0]]
if compare[call[name[len], parameter[call[call[name[weights]][name[weights_name]].numpy, parameter[]].shape]] equal[==] constant[5]] begin[:]
variable[W] assign[=] call[call[call[name[weights]][name[weights_name]].numpy, parameter[]].transpose, parameter[constant[2], constant[3], constant[4], constant[1], constant[0]]]
<ast.Tuple object at 0x7da1b01ff0a0> assign[=] name[W].shape
if compare[name[bias_name] in name[weights]] begin[:]
variable[biases] assign[=] call[call[name[weights]][name[bias_name]].numpy, parameter[]]
variable[has_bias] assign[=] constant[True]
if <ast.BoolOp object at 0x7da18ede44f0> begin[:]
variable[padding_name] assign[=] binary_operation[name[tf_name] + constant[_pad]]
variable[padding_layer] assign[=] call[name[keras].layers.ZeroPadding3D, parameter[]]
call[name[layers]][name[padding_name]] assign[=] call[name[padding_layer], parameter[call[name[layers]][name[input_name]]]]
variable[input_name] assign[=] name[padding_name]
if name[has_bias] begin[:]
variable[weights] assign[=] list[[<ast.Name object at 0x7da1b019b760>, <ast.Name object at 0x7da1b0199b10>]]
variable[conv] assign[=] call[name[keras].layers.Conv3D, parameter[]]
call[name[layers]][name[scope_name]] assign[=] call[name[conv], parameter[call[name[layers]][name[input_name]]]] | keyword[def] identifier[convert_conv] ( identifier[params] , identifier[w_name] , identifier[scope_name] , identifier[inputs] , identifier[layers] , identifier[weights] , identifier[names] ):
literal[string]
identifier[print] ( literal[string] )
keyword[if] identifier[names] == literal[string] :
identifier[tf_name] = literal[string] + identifier[random_string] ( literal[int] )
keyword[elif] identifier[names] == literal[string] :
identifier[tf_name] = identifier[w_name]
keyword[else] :
identifier[tf_name] = identifier[w_name] + identifier[str] ( identifier[random] . identifier[random] ())
identifier[bias_name] = literal[string] . identifier[format] ( identifier[w_name] )
identifier[weights_name] = literal[string] . identifier[format] ( identifier[w_name] )
identifier[input_name] = identifier[inputs] [ literal[int] ]
keyword[if] identifier[len] ( identifier[weights] [ identifier[weights_name] ]. identifier[numpy] (). identifier[shape] )== literal[int] :
identifier[W] = identifier[weights] [ identifier[weights_name] ]. identifier[numpy] (). identifier[transpose] ( literal[int] , literal[int] , literal[int] , literal[int] , literal[int] )
identifier[height] , identifier[width] , identifier[channels] , identifier[n_layers] , identifier[n_filters] = identifier[W] . identifier[shape]
keyword[if] identifier[bias_name] keyword[in] identifier[weights] :
identifier[biases] = identifier[weights] [ identifier[bias_name] ]. identifier[numpy] ()
identifier[has_bias] = keyword[True]
keyword[else] :
identifier[biases] = keyword[None]
identifier[has_bias] = keyword[False]
keyword[if] identifier[params] [ literal[string] ][ literal[int] ]> literal[int] keyword[or] identifier[params] [ literal[string] ][ literal[int] ]> literal[int] :
identifier[padding_name] = identifier[tf_name] + literal[string]
identifier[padding_layer] = identifier[keras] . identifier[layers] . identifier[ZeroPadding3D] (
identifier[padding] =( identifier[params] [ literal[string] ][ literal[int] ],
identifier[params] [ literal[string] ][ literal[int] ],
identifier[params] [ literal[string] ][ literal[int] ]),
identifier[name] = identifier[padding_name]
)
identifier[layers] [ identifier[padding_name] ]= identifier[padding_layer] ( identifier[layers] [ identifier[input_name] ])
identifier[input_name] = identifier[padding_name]
keyword[if] identifier[has_bias] :
identifier[weights] =[ identifier[W] , identifier[biases] ]
keyword[else] :
identifier[weights] =[ identifier[W] ]
identifier[conv] = identifier[keras] . identifier[layers] . identifier[Conv3D] (
identifier[filters] = identifier[n_filters] ,
identifier[kernel_size] =( identifier[channels] , identifier[height] , identifier[width] ),
identifier[strides] =( identifier[params] [ literal[string] ][ literal[int] ],
identifier[params] [ literal[string] ][ literal[int] ],
identifier[params] [ literal[string] ][ literal[int] ]),
identifier[padding] = literal[string] ,
identifier[weights] = identifier[weights] ,
identifier[use_bias] = identifier[has_bias] ,
identifier[activation] = keyword[None] ,
identifier[dilation_rate] = identifier[params] [ literal[string] ][ literal[int] ],
identifier[bias_initializer] = literal[string] , identifier[kernel_initializer] = literal[string] ,
identifier[name] = identifier[tf_name]
)
identifier[layers] [ identifier[scope_name] ]= identifier[conv] ( identifier[layers] [ identifier[input_name] ])
keyword[elif] identifier[len] ( identifier[weights] [ identifier[weights_name] ]. identifier[numpy] (). identifier[shape] )== literal[int] :
keyword[if] identifier[params] [ literal[string] ][ literal[int] ]> literal[int] keyword[or] identifier[params] [ literal[string] ][ literal[int] ]> literal[int] :
identifier[padding_name] = identifier[tf_name] + literal[string]
identifier[padding_layer] = identifier[keras] . identifier[layers] . identifier[ZeroPadding2D] (
identifier[padding] =( identifier[params] [ literal[string] ][ literal[int] ], identifier[params] [ literal[string] ][ literal[int] ]),
identifier[name] = identifier[padding_name]
)
identifier[layers] [ identifier[padding_name] ]= identifier[padding_layer] ( identifier[layers] [ identifier[input_name] ])
identifier[input_name] = identifier[padding_name]
identifier[W] = identifier[weights] [ identifier[weights_name] ]. identifier[numpy] (). identifier[transpose] ( literal[int] , literal[int] , literal[int] , literal[int] )
identifier[height] , identifier[width] , identifier[channels_per_group] , identifier[out_channels] = identifier[W] . identifier[shape]
identifier[n_groups] = identifier[params] [ literal[string] ]
identifier[in_channels] = identifier[channels_per_group] * identifier[n_groups]
keyword[if] identifier[n_groups] == identifier[in_channels] keyword[and] identifier[n_groups] != literal[int] :
keyword[if] identifier[bias_name] keyword[in] identifier[weights] :
identifier[biases] = identifier[weights] [ identifier[bias_name] ]. identifier[numpy] ()
identifier[has_bias] = keyword[True]
keyword[else] :
identifier[biases] = keyword[None]
identifier[has_bias] = keyword[False]
identifier[W] = identifier[W] . identifier[transpose] ( literal[int] , literal[int] , literal[int] , literal[int] )
keyword[if] identifier[has_bias] :
identifier[weights] =[ identifier[W] , identifier[biases] ]
keyword[else] :
identifier[weights] =[ identifier[W] ]
identifier[conv] = identifier[keras] . identifier[layers] . identifier[DepthwiseConv2D] (
identifier[kernel_size] =( identifier[height] , identifier[width] ),
identifier[strides] =( identifier[params] [ literal[string] ][ literal[int] ], identifier[params] [ literal[string] ][ literal[int] ]),
identifier[padding] = literal[string] ,
identifier[use_bias] = identifier[has_bias] ,
identifier[activation] = keyword[None] ,
identifier[depth_multiplier] = literal[int] ,
identifier[weights] = identifier[weights] ,
identifier[dilation_rate] = identifier[params] [ literal[string] ][ literal[int] ],
identifier[bias_initializer] = literal[string] , identifier[kernel_initializer] = literal[string]
)
identifier[layers] [ identifier[scope_name] ]= identifier[conv] ( identifier[layers] [ identifier[input_name] ])
keyword[elif] identifier[n_groups] != literal[int] :
keyword[def] identifier[target_layer] ( identifier[x] , identifier[groups] = identifier[params] [ literal[string] ], identifier[stride_y] = identifier[params] [ literal[string] ][ literal[int] ], identifier[stride_x] = identifier[params] [ literal[string] ][ literal[int] ]):
identifier[x] = identifier[tf] . identifier[transpose] ( identifier[x] ,[ literal[int] , literal[int] , literal[int] , literal[int] ])
keyword[def] identifier[convolve_lambda] ( identifier[i] , identifier[k] ):
keyword[return] identifier[tf] . identifier[nn] . identifier[conv2d] ( identifier[i] , identifier[k] , identifier[strides] =[ literal[int] , identifier[stride_y] , identifier[stride_x] , literal[int] ], identifier[padding] = literal[string] )
identifier[input_groups] = identifier[tf] . identifier[split] ( identifier[axis] = literal[int] , identifier[num_or_size_splits] = identifier[groups] , identifier[value] = identifier[x] )
identifier[weight_groups] = identifier[tf] . identifier[split] ( identifier[axis] = literal[int] , identifier[num_or_size_splits] = identifier[groups] , identifier[value] = identifier[W] . identifier[transpose] ( literal[int] , literal[int] , literal[int] , literal[int] ))
identifier[output_groups] =[ identifier[convolve_lambda] ( identifier[i] , identifier[k] ) keyword[for] identifier[i] , identifier[k] keyword[in] identifier[zip] ( identifier[input_groups] , identifier[weight_groups] )]
identifier[layer] = identifier[tf] . identifier[concat] ( identifier[axis] = literal[int] , identifier[values] = identifier[output_groups] )
identifier[layer] = identifier[tf] . identifier[transpose] ( identifier[layer] ,[ literal[int] , literal[int] , literal[int] , literal[int] ])
keyword[return] identifier[layer]
identifier[lambda_layer] = identifier[keras] . identifier[layers] . identifier[Lambda] ( identifier[target_layer] )
identifier[layers] [ identifier[scope_name] ]= identifier[lambda_layer] ( identifier[layers] [ identifier[input_name] ])
keyword[else] :
keyword[if] identifier[bias_name] keyword[in] identifier[weights] :
identifier[biases] = identifier[weights] [ identifier[bias_name] ]. identifier[numpy] ()
identifier[has_bias] = keyword[True]
keyword[else] :
identifier[biases] = keyword[None]
identifier[has_bias] = keyword[False]
keyword[if] identifier[has_bias] :
identifier[weights] =[ identifier[W] , identifier[biases] ]
keyword[else] :
identifier[weights] =[ identifier[W] ]
identifier[conv] = identifier[keras] . identifier[layers] . identifier[Conv2D] (
identifier[filters] = identifier[out_channels] ,
identifier[kernel_size] =( identifier[height] , identifier[width] ),
identifier[strides] =( identifier[params] [ literal[string] ][ literal[int] ], identifier[params] [ literal[string] ][ literal[int] ]),
identifier[padding] = literal[string] ,
identifier[weights] = identifier[weights] ,
identifier[use_bias] = identifier[has_bias] ,
identifier[activation] = keyword[None] ,
identifier[dilation_rate] = identifier[params] [ literal[string] ][ literal[int] ],
identifier[bias_initializer] = literal[string] , identifier[kernel_initializer] = literal[string] ,
identifier[name] = identifier[tf_name]
)
identifier[layers] [ identifier[scope_name] ]= identifier[conv] ( identifier[layers] [ identifier[input_name] ])
keyword[else] :
identifier[W] = identifier[weights] [ identifier[weights_name] ]. identifier[numpy] (). identifier[transpose] ( literal[int] , literal[int] , literal[int] )
identifier[width] , identifier[channels] , identifier[n_filters] = identifier[W] . identifier[shape]
identifier[n_groups] = identifier[params] [ literal[string] ]
keyword[if] identifier[n_groups] > literal[int] :
keyword[raise] identifier[AssertionError] ( literal[string] )
keyword[if] identifier[bias_name] keyword[in] identifier[weights] :
identifier[biases] = identifier[weights] [ identifier[bias_name] ]. identifier[numpy] ()
identifier[has_bias] = keyword[True]
keyword[else] :
identifier[biases] = keyword[None]
identifier[has_bias] = keyword[False]
identifier[padding_name] = identifier[tf_name] + literal[string]
identifier[padding_layer] = identifier[keras] . identifier[layers] . identifier[ZeroPadding1D] (
identifier[padding] = identifier[params] [ literal[string] ][ literal[int] ],
identifier[name] = identifier[padding_name]
)
identifier[layers] [ identifier[padding_name] ]= identifier[padding_layer] ( identifier[layers] [ identifier[inputs] [ literal[int] ]])
identifier[input_name] = identifier[padding_name]
keyword[if] identifier[has_bias] :
identifier[weights] =[ identifier[W] , identifier[biases] ]
keyword[else] :
identifier[weights] =[ identifier[W] ]
identifier[conv] = identifier[keras] . identifier[layers] . identifier[Conv1D] (
identifier[filters] = identifier[channels] ,
identifier[kernel_size] = identifier[width] ,
identifier[strides] = identifier[params] [ literal[string] ],
identifier[padding] = literal[string] ,
identifier[weights] = identifier[weights] ,
identifier[use_bias] = identifier[has_bias] ,
identifier[activation] = keyword[None] ,
identifier[data_format] = literal[string] ,
identifier[dilation_rate] = identifier[params] [ literal[string] ],
identifier[bias_initializer] = literal[string] , identifier[kernel_initializer] = literal[string] ,
identifier[name] = identifier[tf_name]
)
identifier[layers] [ identifier[scope_name] ]= identifier[conv] ( identifier[layers] [ identifier[input_name] ]) | def convert_conv(params, w_name, scope_name, inputs, layers, weights, names):
"""
Convert convolution layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
"""
print('Converting convolution ...')
if names == 'short':
tf_name = 'C' + random_string(7) # depends on [control=['if'], data=[]]
elif names == 'keep':
tf_name = w_name # depends on [control=['if'], data=[]]
else:
tf_name = w_name + str(random.random())
bias_name = '{0}.bias'.format(w_name)
weights_name = '{0}.weight'.format(w_name)
input_name = inputs[0]
if len(weights[weights_name].numpy().shape) == 5: # 3D conv
W = weights[weights_name].numpy().transpose(2, 3, 4, 1, 0)
(height, width, channels, n_layers, n_filters) = W.shape
if bias_name in weights:
biases = weights[bias_name].numpy()
has_bias = True # depends on [control=['if'], data=['bias_name', 'weights']]
else:
biases = None
has_bias = False
if params['pads'][0] > 0 or params['pads'][1] > 0:
padding_name = tf_name + '_pad'
padding_layer = keras.layers.ZeroPadding3D(padding=(params['pads'][0], params['pads'][1], params['pads'][2]), name=padding_name)
layers[padding_name] = padding_layer(layers[input_name])
input_name = padding_name # depends on [control=['if'], data=[]]
if has_bias:
weights = [W, biases] # depends on [control=['if'], data=[]]
else:
weights = [W]
conv = keras.layers.Conv3D(filters=n_filters, kernel_size=(channels, height, width), strides=(params['strides'][0], params['strides'][1], params['strides'][2]), padding='valid', weights=weights, use_bias=has_bias, activation=None, dilation_rate=params['dilations'][0], bias_initializer='zeros', kernel_initializer='zeros', name=tf_name)
layers[scope_name] = conv(layers[input_name]) # depends on [control=['if'], data=[]]
elif len(weights[weights_name].numpy().shape) == 4: # 2D conv
if params['pads'][0] > 0 or params['pads'][1] > 0:
padding_name = tf_name + '_pad'
padding_layer = keras.layers.ZeroPadding2D(padding=(params['pads'][0], params['pads'][1]), name=padding_name)
layers[padding_name] = padding_layer(layers[input_name])
input_name = padding_name # depends on [control=['if'], data=[]]
W = weights[weights_name].numpy().transpose(2, 3, 1, 0)
(height, width, channels_per_group, out_channels) = W.shape
n_groups = params['group']
in_channels = channels_per_group * n_groups
if n_groups == in_channels and n_groups != 1:
if bias_name in weights:
biases = weights[bias_name].numpy()
has_bias = True # depends on [control=['if'], data=['bias_name', 'weights']]
else:
biases = None
has_bias = False
W = W.transpose(0, 1, 3, 2)
if has_bias:
weights = [W, biases] # depends on [control=['if'], data=[]]
else:
weights = [W]
conv = keras.layers.DepthwiseConv2D(kernel_size=(height, width), strides=(params['strides'][0], params['strides'][1]), padding='valid', use_bias=has_bias, activation=None, depth_multiplier=1, weights=weights, dilation_rate=params['dilations'][0], bias_initializer='zeros', kernel_initializer='zeros')
layers[scope_name] = conv(layers[input_name]) # depends on [control=['if'], data=[]]
elif n_groups != 1:
# Example from https://kratzert.github.io/2017/02/24/finetuning-alexnet-with-tensorflow.html
# # Split input and weights and convolve them separately
# input_groups = tf.split(axis=3, num_or_size_splits=groups, value=x)
# weight_groups = tf.split(axis=3, num_or_size_splits=groups, value=weights)
# output_groups = [convolve(i, k) for i, k in zip(input_groups, weight_groups)]
# # Concat the convolved output together again
# conv = tf.concat(axis=3, values=output_groups)
def target_layer(x, groups=params['group'], stride_y=params['strides'][0], stride_x=params['strides'][1]):
x = tf.transpose(x, [0, 2, 3, 1])
def convolve_lambda(i, k):
return tf.nn.conv2d(i, k, strides=[1, stride_y, stride_x, 1], padding='VALID')
input_groups = tf.split(axis=3, num_or_size_splits=groups, value=x)
weight_groups = tf.split(axis=3, num_or_size_splits=groups, value=W.transpose(0, 1, 2, 3))
output_groups = [convolve_lambda(i, k) for (i, k) in zip(input_groups, weight_groups)]
layer = tf.concat(axis=3, values=output_groups)
layer = tf.transpose(layer, [0, 3, 1, 2])
return layer
lambda_layer = keras.layers.Lambda(target_layer)
layers[scope_name] = lambda_layer(layers[input_name]) # depends on [control=['if'], data=[]]
else:
if bias_name in weights:
biases = weights[bias_name].numpy()
has_bias = True # depends on [control=['if'], data=['bias_name', 'weights']]
else:
biases = None
has_bias = False
if has_bias:
weights = [W, biases] # depends on [control=['if'], data=[]]
else:
weights = [W]
conv = keras.layers.Conv2D(filters=out_channels, kernel_size=(height, width), strides=(params['strides'][0], params['strides'][1]), padding='valid', weights=weights, use_bias=has_bias, activation=None, dilation_rate=params['dilations'][0], bias_initializer='zeros', kernel_initializer='zeros', name=tf_name)
layers[scope_name] = conv(layers[input_name]) # depends on [control=['if'], data=[]]
else: # 1D conv
W = weights[weights_name].numpy().transpose(2, 1, 0)
(width, channels, n_filters) = W.shape
n_groups = params['group']
if n_groups > 1:
raise AssertionError('Cannot convert conv1d with groups != 1') # depends on [control=['if'], data=[]]
if bias_name in weights:
biases = weights[bias_name].numpy()
has_bias = True # depends on [control=['if'], data=['bias_name', 'weights']]
else:
biases = None
has_bias = False
padding_name = tf_name + '_pad'
padding_layer = keras.layers.ZeroPadding1D(padding=params['pads'][0], name=padding_name)
layers[padding_name] = padding_layer(layers[inputs[0]])
input_name = padding_name
if has_bias:
weights = [W, biases] # depends on [control=['if'], data=[]]
else:
weights = [W]
conv = keras.layers.Conv1D(filters=channels, kernel_size=width, strides=params['strides'], padding='valid', weights=weights, use_bias=has_bias, activation=None, data_format='channels_first', dilation_rate=params['dilations'], bias_initializer='zeros', kernel_initializer='zeros', name=tf_name)
layers[scope_name] = conv(layers[input_name]) |
def create_indexes(self, indexes, session=None, **kwargs):
"""Create one or more indexes on this collection.
>>> from pymongo import IndexModel, ASCENDING, DESCENDING
>>> index1 = IndexModel([("hello", DESCENDING),
... ("world", ASCENDING)], name="hello_world")
>>> index2 = IndexModel([("goodbye", DESCENDING)])
>>> db.test.create_indexes([index1, index2])
["hello_world", "goodbye_-1"]
:Parameters:
- `indexes`: A list of :class:`~pymongo.operations.IndexModel`
instances.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): optional arguments to the createIndexes
command (like maxTimeMS) can be passed as keyword arguments.
.. note:: `create_indexes` uses the `createIndexes`_ command
introduced in MongoDB **2.6** and cannot be used with earlier
versions.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for arbitrary keyword
arguments.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
.. versionadded:: 3.0
.. _createIndexes: https://docs.mongodb.com/manual/reference/command/createIndexes/
"""
common.validate_list('indexes', indexes)
names = []
with self._socket_for_writes(session) as sock_info:
supports_collations = sock_info.max_wire_version >= 5
def gen_indexes():
for index in indexes:
if not isinstance(index, IndexModel):
raise TypeError(
"%r is not an instance of "
"pymongo.operations.IndexModel" % (index,))
document = index.document
if "collation" in document and not supports_collations:
raise ConfigurationError(
"Must be connected to MongoDB "
"3.4+ to use collations.")
names.append(document["name"])
yield document
cmd = SON([('createIndexes', self.name),
('indexes', list(gen_indexes()))])
cmd.update(kwargs)
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
codec_options=_UNICODE_REPLACE_CODEC_OPTIONS,
write_concern=self._write_concern_for(session),
session=session)
return names | def function[create_indexes, parameter[self, indexes, session]]:
constant[Create one or more indexes on this collection.
>>> from pymongo import IndexModel, ASCENDING, DESCENDING
>>> index1 = IndexModel([("hello", DESCENDING),
... ("world", ASCENDING)], name="hello_world")
>>> index2 = IndexModel([("goodbye", DESCENDING)])
>>> db.test.create_indexes([index1, index2])
["hello_world", "goodbye_-1"]
:Parameters:
- `indexes`: A list of :class:`~pymongo.operations.IndexModel`
instances.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): optional arguments to the createIndexes
command (like maxTimeMS) can be passed as keyword arguments.
.. note:: `create_indexes` uses the `createIndexes`_ command
introduced in MongoDB **2.6** and cannot be used with earlier
versions.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for arbitrary keyword
arguments.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
.. versionadded:: 3.0
.. _createIndexes: https://docs.mongodb.com/manual/reference/command/createIndexes/
]
call[name[common].validate_list, parameter[constant[indexes], name[indexes]]]
variable[names] assign[=] list[[]]
with call[name[self]._socket_for_writes, parameter[name[session]]] begin[:]
variable[supports_collations] assign[=] compare[name[sock_info].max_wire_version greater_or_equal[>=] constant[5]]
def function[gen_indexes, parameter[]]:
for taget[name[index]] in starred[name[indexes]] begin[:]
if <ast.UnaryOp object at 0x7da20c6a8ee0> begin[:]
<ast.Raise object at 0x7da20c6ab670>
variable[document] assign[=] name[index].document
if <ast.BoolOp object at 0x7da20c6a8250> begin[:]
<ast.Raise object at 0x7da20c7caec0>
call[name[names].append, parameter[call[name[document]][constant[name]]]]
<ast.Yield object at 0x7da20c7ca410>
variable[cmd] assign[=] call[name[SON], parameter[list[[<ast.Tuple object at 0x7da20c7cb190>, <ast.Tuple object at 0x7da20c7c9090>]]]]
call[name[cmd].update, parameter[name[kwargs]]]
call[name[self]._command, parameter[name[sock_info], name[cmd]]]
return[name[names]] | keyword[def] identifier[create_indexes] ( identifier[self] , identifier[indexes] , identifier[session] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[common] . identifier[validate_list] ( literal[string] , identifier[indexes] )
identifier[names] =[]
keyword[with] identifier[self] . identifier[_socket_for_writes] ( identifier[session] ) keyword[as] identifier[sock_info] :
identifier[supports_collations] = identifier[sock_info] . identifier[max_wire_version] >= literal[int]
keyword[def] identifier[gen_indexes] ():
keyword[for] identifier[index] keyword[in] identifier[indexes] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[index] , identifier[IndexModel] ):
keyword[raise] identifier[TypeError] (
literal[string]
literal[string] %( identifier[index] ,))
identifier[document] = identifier[index] . identifier[document]
keyword[if] literal[string] keyword[in] identifier[document] keyword[and] keyword[not] identifier[supports_collations] :
keyword[raise] identifier[ConfigurationError] (
literal[string]
literal[string] )
identifier[names] . identifier[append] ( identifier[document] [ literal[string] ])
keyword[yield] identifier[document]
identifier[cmd] = identifier[SON] ([( literal[string] , identifier[self] . identifier[name] ),
( literal[string] , identifier[list] ( identifier[gen_indexes] ()))])
identifier[cmd] . identifier[update] ( identifier[kwargs] )
identifier[self] . identifier[_command] (
identifier[sock_info] , identifier[cmd] , identifier[read_preference] = identifier[ReadPreference] . identifier[PRIMARY] ,
identifier[codec_options] = identifier[_UNICODE_REPLACE_CODEC_OPTIONS] ,
identifier[write_concern] = identifier[self] . identifier[_write_concern_for] ( identifier[session] ),
identifier[session] = identifier[session] )
keyword[return] identifier[names] | def create_indexes(self, indexes, session=None, **kwargs):
"""Create one or more indexes on this collection.
>>> from pymongo import IndexModel, ASCENDING, DESCENDING
>>> index1 = IndexModel([("hello", DESCENDING),
... ("world", ASCENDING)], name="hello_world")
>>> index2 = IndexModel([("goodbye", DESCENDING)])
>>> db.test.create_indexes([index1, index2])
["hello_world", "goodbye_-1"]
:Parameters:
- `indexes`: A list of :class:`~pymongo.operations.IndexModel`
instances.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): optional arguments to the createIndexes
command (like maxTimeMS) can be passed as keyword arguments.
.. note:: `create_indexes` uses the `createIndexes`_ command
introduced in MongoDB **2.6** and cannot be used with earlier
versions.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for arbitrary keyword
arguments.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
.. versionadded:: 3.0
.. _createIndexes: https://docs.mongodb.com/manual/reference/command/createIndexes/
"""
common.validate_list('indexes', indexes)
names = []
with self._socket_for_writes(session) as sock_info:
supports_collations = sock_info.max_wire_version >= 5
def gen_indexes():
for index in indexes:
if not isinstance(index, IndexModel):
raise TypeError('%r is not an instance of pymongo.operations.IndexModel' % (index,)) # depends on [control=['if'], data=[]]
document = index.document
if 'collation' in document and (not supports_collations):
raise ConfigurationError('Must be connected to MongoDB 3.4+ to use collations.') # depends on [control=['if'], data=[]]
names.append(document['name'])
yield document # depends on [control=['for'], data=['index']]
cmd = SON([('createIndexes', self.name), ('indexes', list(gen_indexes()))])
cmd.update(kwargs)
self._command(sock_info, cmd, read_preference=ReadPreference.PRIMARY, codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, write_concern=self._write_concern_for(session), session=session) # depends on [control=['with'], data=['sock_info']]
return names |
async def get_instances(self, **kwargs) -> List[ApiResource]:
"""Returns a list of resource instances.
:raises PvApiError when a hub problem occurs."""
raw_resources = await self.get_resources(**kwargs)
_instances = [
self._resource_factory(_raw)
for _raw in self._loop_raw(raw_resources)
]
return _instances | <ast.AsyncFunctionDef object at 0x7da20c795fc0> | keyword[async] keyword[def] identifier[get_instances] ( identifier[self] ,** identifier[kwargs] )-> identifier[List] [ identifier[ApiResource] ]:
literal[string]
identifier[raw_resources] = keyword[await] identifier[self] . identifier[get_resources] (** identifier[kwargs] )
identifier[_instances] =[
identifier[self] . identifier[_resource_factory] ( identifier[_raw] )
keyword[for] identifier[_raw] keyword[in] identifier[self] . identifier[_loop_raw] ( identifier[raw_resources] )
]
keyword[return] identifier[_instances] | async def get_instances(self, **kwargs) -> List[ApiResource]:
"""Returns a list of resource instances.
:raises PvApiError when a hub problem occurs."""
raw_resources = await self.get_resources(**kwargs)
_instances = [self._resource_factory(_raw) for _raw in self._loop_raw(raw_resources)]
return _instances |
def kill(restriction=None, connection=None): # pragma: no cover
"""
view and kill database connections.
:param restriction: restriction to be applied to processlist
:param connection: a datajoint.Connection object. Default calls datajoint.conn()
Restrictions are specified as strings and can involve any of the attributes of
information_schema.processlist: ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO.
Examples:
dj.kill('HOST LIKE "%compute%"') lists only connections from hosts containing "compute".
dj.kill('TIME > 600') lists only connections older than 10 minutes.
"""
if connection is None:
connection = conn()
query = 'SELECT * FROM information_schema.processlist WHERE id <> CONNECTION_ID()' + (
"" if restriction is None else ' AND (%s)' % restriction)
while True:
print(' ID USER STATE TIME INFO')
print('+--+ +----------+ +-----------+ +--+')
cur = connection.query(query, as_dict=True)
for process in cur:
try:
print('{ID:>4d} {USER:<12s} {STATE:<12s} {TIME:>5d} {INFO}'.format(**process))
except TypeError:
print(process)
response = input('process to kill or "q" to quit > ')
if response == 'q':
break
if response:
try:
pid = int(response)
except ValueError:
pass # ignore non-numeric input
else:
try:
connection.query('kill %d' % pid)
except pymysql.err.InternalError:
print('Process not found') | def function[kill, parameter[restriction, connection]]:
constant[
view and kill database connections.
:param restriction: restriction to be applied to processlist
:param connection: a datajoint.Connection object. Default calls datajoint.conn()
Restrictions are specified as strings and can involve any of the attributes of
information_schema.processlist: ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO.
Examples:
dj.kill('HOST LIKE "%compute%"') lists only connections from hosts containing "compute".
dj.kill('TIME > 600') lists only connections older than 10 minutes.
]
if compare[name[connection] is constant[None]] begin[:]
variable[connection] assign[=] call[name[conn], parameter[]]
variable[query] assign[=] binary_operation[constant[SELECT * FROM information_schema.processlist WHERE id <> CONNECTION_ID()] + <ast.IfExp object at 0x7da1b12da0e0>]
while constant[True] begin[:]
call[name[print], parameter[constant[ ID USER STATE TIME INFO]]]
call[name[print], parameter[constant[+--+ +----------+ +-----------+ +--+]]]
variable[cur] assign[=] call[name[connection].query, parameter[name[query]]]
for taget[name[process]] in starred[name[cur]] begin[:]
<ast.Try object at 0x7da1b12da0b0>
variable[response] assign[=] call[name[input], parameter[constant[process to kill or "q" to quit > ]]]
if compare[name[response] equal[==] constant[q]] begin[:]
break
if name[response] begin[:]
<ast.Try object at 0x7da1b12d8820> | keyword[def] identifier[kill] ( identifier[restriction] = keyword[None] , identifier[connection] = keyword[None] ):
literal[string]
keyword[if] identifier[connection] keyword[is] keyword[None] :
identifier[connection] = identifier[conn] ()
identifier[query] = literal[string] +(
literal[string] keyword[if] identifier[restriction] keyword[is] keyword[None] keyword[else] literal[string] % identifier[restriction] )
keyword[while] keyword[True] :
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[cur] = identifier[connection] . identifier[query] ( identifier[query] , identifier[as_dict] = keyword[True] )
keyword[for] identifier[process] keyword[in] identifier[cur] :
keyword[try] :
identifier[print] ( literal[string] . identifier[format] (** identifier[process] ))
keyword[except] identifier[TypeError] :
identifier[print] ( identifier[process] )
identifier[response] = identifier[input] ( literal[string] )
keyword[if] identifier[response] == literal[string] :
keyword[break]
keyword[if] identifier[response] :
keyword[try] :
identifier[pid] = identifier[int] ( identifier[response] )
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[else] :
keyword[try] :
identifier[connection] . identifier[query] ( literal[string] % identifier[pid] )
keyword[except] identifier[pymysql] . identifier[err] . identifier[InternalError] :
identifier[print] ( literal[string] ) | def kill(restriction=None, connection=None): # pragma: no cover
'\n view and kill database connections.\n :param restriction: restriction to be applied to processlist\n :param connection: a datajoint.Connection object. Default calls datajoint.conn()\n\n Restrictions are specified as strings and can involve any of the attributes of\n information_schema.processlist: ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO.\n\n Examples:\n dj.kill(\'HOST LIKE "%compute%"\') lists only connections from hosts containing "compute".\n dj.kill(\'TIME > 600\') lists only connections older than 10 minutes.\n '
if connection is None:
connection = conn() # depends on [control=['if'], data=['connection']]
query = 'SELECT * FROM information_schema.processlist WHERE id <> CONNECTION_ID()' + ('' if restriction is None else ' AND (%s)' % restriction)
while True:
print(' ID USER STATE TIME INFO')
print('+--+ +----------+ +-----------+ +--+')
cur = connection.query(query, as_dict=True)
for process in cur:
try:
print('{ID:>4d} {USER:<12s} {STATE:<12s} {TIME:>5d} {INFO}'.format(**process)) # depends on [control=['try'], data=[]]
except TypeError:
print(process) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['process']]
response = input('process to kill or "q" to quit > ')
if response == 'q':
break # depends on [control=['if'], data=[]]
if response:
try:
pid = int(response) # depends on [control=['try'], data=[]]
except ValueError:
pass # ignore non-numeric input # depends on [control=['except'], data=[]]
else:
try:
connection.query('kill %d' % pid) # depends on [control=['try'], data=[]]
except pymysql.err.InternalError:
print('Process not found') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] |
def init_reddit(generator):
"""
this is a hack to make sure the reddit object keeps track of a session
trough article scanning, speeding up networking as the connection can be
kept alive.
"""
auth_dict = generator.settings.get('REDDIT_POSTER_AUTH')
if auth_dict is None:
log.info("Could not find REDDIT_POSTER_AUTH key in settings, reddit plugin won't function")
generator.get_reddit = lambda: None
return
reddit = praw.Reddit(**auth_dict)
generator.get_reddit = lambda: reddit | def function[init_reddit, parameter[generator]]:
constant[
this is a hack to make sure the reddit object keeps track of a session
trough article scanning, speeding up networking as the connection can be
kept alive.
]
variable[auth_dict] assign[=] call[name[generator].settings.get, parameter[constant[REDDIT_POSTER_AUTH]]]
if compare[name[auth_dict] is constant[None]] begin[:]
call[name[log].info, parameter[constant[Could not find REDDIT_POSTER_AUTH key in settings, reddit plugin won't function]]]
name[generator].get_reddit assign[=] <ast.Lambda object at 0x7da1b1d39ae0>
return[None]
variable[reddit] assign[=] call[name[praw].Reddit, parameter[]]
name[generator].get_reddit assign[=] <ast.Lambda object at 0x7da1b1d38c10> | keyword[def] identifier[init_reddit] ( identifier[generator] ):
literal[string]
identifier[auth_dict] = identifier[generator] . identifier[settings] . identifier[get] ( literal[string] )
keyword[if] identifier[auth_dict] keyword[is] keyword[None] :
identifier[log] . identifier[info] ( literal[string] )
identifier[generator] . identifier[get_reddit] = keyword[lambda] : keyword[None]
keyword[return]
identifier[reddit] = identifier[praw] . identifier[Reddit] (** identifier[auth_dict] )
identifier[generator] . identifier[get_reddit] = keyword[lambda] : identifier[reddit] | def init_reddit(generator):
"""
this is a hack to make sure the reddit object keeps track of a session
trough article scanning, speeding up networking as the connection can be
kept alive.
"""
auth_dict = generator.settings.get('REDDIT_POSTER_AUTH')
if auth_dict is None:
log.info("Could not find REDDIT_POSTER_AUTH key in settings, reddit plugin won't function")
generator.get_reddit = lambda : None
return # depends on [control=['if'], data=[]]
reddit = praw.Reddit(**auth_dict)
generator.get_reddit = lambda : reddit |
def scan(ctx, sources=None, endpoint=False, raw=False, extra=False):
"""SCAN: get ontology data from RDF source and print out a report.
"""
verbose = ctx.obj['VERBOSE']
sTime = ctx.obj['STIME']
print_opts = {
'labels': verbose,
'extra': extra,
}
if sources or (sources and endpoint):
action_analyze(sources, endpoint, print_opts, verbose, extra, raw)
eTime = time.time()
tTime = eTime - sTime
printDebug("\n-----------\n" + "Time: %0.2fs" % tTime, "comment")
else:
click.echo(ctx.get_help()) | def function[scan, parameter[ctx, sources, endpoint, raw, extra]]:
constant[SCAN: get ontology data from RDF source and print out a report.
]
variable[verbose] assign[=] call[name[ctx].obj][constant[VERBOSE]]
variable[sTime] assign[=] call[name[ctx].obj][constant[STIME]]
variable[print_opts] assign[=] dictionary[[<ast.Constant object at 0x7da1b11a1f30>, <ast.Constant object at 0x7da1b11a23e0>], [<ast.Name object at 0x7da1b11a3370>, <ast.Name object at 0x7da1b11a1480>]]
if <ast.BoolOp object at 0x7da1b11a1f00> begin[:]
call[name[action_analyze], parameter[name[sources], name[endpoint], name[print_opts], name[verbose], name[extra], name[raw]]]
variable[eTime] assign[=] call[name[time].time, parameter[]]
variable[tTime] assign[=] binary_operation[name[eTime] - name[sTime]]
call[name[printDebug], parameter[binary_operation[constant[
-----------
] + binary_operation[constant[Time: %0.2fs] <ast.Mod object at 0x7da2590d6920> name[tTime]]], constant[comment]]] | keyword[def] identifier[scan] ( identifier[ctx] , identifier[sources] = keyword[None] , identifier[endpoint] = keyword[False] , identifier[raw] = keyword[False] , identifier[extra] = keyword[False] ):
literal[string]
identifier[verbose] = identifier[ctx] . identifier[obj] [ literal[string] ]
identifier[sTime] = identifier[ctx] . identifier[obj] [ literal[string] ]
identifier[print_opts] ={
literal[string] : identifier[verbose] ,
literal[string] : identifier[extra] ,
}
keyword[if] identifier[sources] keyword[or] ( identifier[sources] keyword[and] identifier[endpoint] ):
identifier[action_analyze] ( identifier[sources] , identifier[endpoint] , identifier[print_opts] , identifier[verbose] , identifier[extra] , identifier[raw] )
identifier[eTime] = identifier[time] . identifier[time] ()
identifier[tTime] = identifier[eTime] - identifier[sTime]
identifier[printDebug] ( literal[string] + literal[string] % identifier[tTime] , literal[string] )
keyword[else] :
identifier[click] . identifier[echo] ( identifier[ctx] . identifier[get_help] ()) | def scan(ctx, sources=None, endpoint=False, raw=False, extra=False):
"""SCAN: get ontology data from RDF source and print out a report.
"""
verbose = ctx.obj['VERBOSE']
sTime = ctx.obj['STIME']
print_opts = {'labels': verbose, 'extra': extra}
if sources or (sources and endpoint):
action_analyze(sources, endpoint, print_opts, verbose, extra, raw)
eTime = time.time()
tTime = eTime - sTime
printDebug('\n-----------\n' + 'Time:\t %0.2fs' % tTime, 'comment') # depends on [control=['if'], data=[]]
else:
click.echo(ctx.get_help()) |
def uploadCsvConfiguration(self, conf_filename):
"""
NOT NEEDED. JSON can be POSTed to IM instead of sending a CSV that is locally parsed and converted to JSON.
Remote Address:192.168.100.51:443
Request URL:https://192.168.100.51/types/Configuration/instances/actions/parseFromCSV
Request Method:POST
Status Code:200 OK
Request Headersview source
Accept:*/*
Accept-Encoding:gzip, deflate
Accept-Language:en-US,en;q=0.8,sv;q=0.6
Connection:keep-alive
Content-Length:433
Content-Type:multipart/form-data; boundary=----WebKitFormBoundaryY1f2eTo1mOvh744k
Cookie:JSESSIONID=A0823886072B2CEBA327A9185AC2BFE0
Host:192.168.100.51
Origin:https://192.168.100.51
Referer:https://192.168.100.51/install.jsp
User-Agent:Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.111 Safari/537.36
X-Requested-With:XMLHttpRequest
Request Payload
------WebKitFormBoundaryY1f2eTo1mOvh744k
Content-Disposition: form-data; name="file"; filename="ScaleIO_Minimal_Config_51.csv"
Content-Type: text/csv
"""
parameters = {'selectInstallOrExtend':'install', #'install' or 'extend'
'name':'file',
'id':'fileToUpload',
'filename':'config.csv'
}
file_dict = {'file':('config.csv', open(conf_filename, 'rb'), 'text/csv')}
"""
files = {'file': ('report.csv', 'some,data,to,send\nanother,row,to,send\n')}
"""
temp_username = self._username
temp_password = self._password
temp_im_api_url = self._im_api_url
temp_im_session = requests.Session()
#self._im_session.headers.update({'Accept': 'application/json', 'Version': '1.0'}) # Accept only json
temp_im_session.mount('https://', TLS1Adapter())
temp_im_verify_ssl = self._im_verify_ssl
resp = temp_im_session.post(
#resp = self._do_post(
"{}/{}".format(temp_im_api_url,"types/Configuration/instances/actions/parseFromCSV"),
auth=HTTPBasicAuth('admin', 'Password1!'),
#headers = m.content_type,
files = file_dict,
verify = False,
data = parameters
) | def function[uploadCsvConfiguration, parameter[self, conf_filename]]:
constant[
NOT NEEDED. JSON can be POSTed to IM instead of sending a CSV that is locally parsed and converted to JSON.
Remote Address:192.168.100.51:443
Request URL:https://192.168.100.51/types/Configuration/instances/actions/parseFromCSV
Request Method:POST
Status Code:200 OK
Request Headersview source
Accept:*/*
Accept-Encoding:gzip, deflate
Accept-Language:en-US,en;q=0.8,sv;q=0.6
Connection:keep-alive
Content-Length:433
Content-Type:multipart/form-data; boundary=----WebKitFormBoundaryY1f2eTo1mOvh744k
Cookie:JSESSIONID=A0823886072B2CEBA327A9185AC2BFE0
Host:192.168.100.51
Origin:https://192.168.100.51
Referer:https://192.168.100.51/install.jsp
User-Agent:Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.111 Safari/537.36
X-Requested-With:XMLHttpRequest
Request Payload
------WebKitFormBoundaryY1f2eTo1mOvh744k
Content-Disposition: form-data; name="file"; filename="ScaleIO_Minimal_Config_51.csv"
Content-Type: text/csv
]
variable[parameters] assign[=] dictionary[[<ast.Constant object at 0x7da1b246f6d0>, <ast.Constant object at 0x7da1b246f640>, <ast.Constant object at 0x7da1b246f460>, <ast.Constant object at 0x7da1b246f0d0>], [<ast.Constant object at 0x7da1b246f730>, <ast.Constant object at 0x7da1b246f400>, <ast.Constant object at 0x7da1b246f130>, <ast.Constant object at 0x7da1b246f8b0>]]
variable[file_dict] assign[=] dictionary[[<ast.Constant object at 0x7da1b246efe0>], [<ast.Tuple object at 0x7da1b246f3d0>]]
constant[
files = {'file': ('report.csv', 'some,data,to,send
another,row,to,send
')}
]
variable[temp_username] assign[=] name[self]._username
variable[temp_password] assign[=] name[self]._password
variable[temp_im_api_url] assign[=] name[self]._im_api_url
variable[temp_im_session] assign[=] call[name[requests].Session, parameter[]]
call[name[temp_im_session].mount, parameter[constant[https://], call[name[TLS1Adapter], parameter[]]]]
variable[temp_im_verify_ssl] assign[=] name[self]._im_verify_ssl
variable[resp] assign[=] call[name[temp_im_session].post, parameter[call[constant[{}/{}].format, parameter[name[temp_im_api_url], constant[types/Configuration/instances/actions/parseFromCSV]]]]] | keyword[def] identifier[uploadCsvConfiguration] ( identifier[self] , identifier[conf_filename] ):
literal[string]
identifier[parameters] ={ literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string]
}
identifier[file_dict] ={ literal[string] :( literal[string] , identifier[open] ( identifier[conf_filename] , literal[string] ), literal[string] )}
literal[string]
identifier[temp_username] = identifier[self] . identifier[_username]
identifier[temp_password] = identifier[self] . identifier[_password]
identifier[temp_im_api_url] = identifier[self] . identifier[_im_api_url]
identifier[temp_im_session] = identifier[requests] . identifier[Session] ()
identifier[temp_im_session] . identifier[mount] ( literal[string] , identifier[TLS1Adapter] ())
identifier[temp_im_verify_ssl] = identifier[self] . identifier[_im_verify_ssl]
identifier[resp] = identifier[temp_im_session] . identifier[post] (
literal[string] . identifier[format] ( identifier[temp_im_api_url] , literal[string] ),
identifier[auth] = identifier[HTTPBasicAuth] ( literal[string] , literal[string] ),
identifier[files] = identifier[file_dict] ,
identifier[verify] = keyword[False] ,
identifier[data] = identifier[parameters]
) | def uploadCsvConfiguration(self, conf_filename):
"""
NOT NEEDED. JSON can be POSTed to IM instead of sending a CSV that is locally parsed and converted to JSON.
Remote Address:192.168.100.51:443
Request URL:https://192.168.100.51/types/Configuration/instances/actions/parseFromCSV
Request Method:POST
Status Code:200 OK
Request Headersview source
Accept:*/*
Accept-Encoding:gzip, deflate
Accept-Language:en-US,en;q=0.8,sv;q=0.6
Connection:keep-alive
Content-Length:433
Content-Type:multipart/form-data; boundary=----WebKitFormBoundaryY1f2eTo1mOvh744k
Cookie:JSESSIONID=A0823886072B2CEBA327A9185AC2BFE0
Host:192.168.100.51
Origin:https://192.168.100.51
Referer:https://192.168.100.51/install.jsp
User-Agent:Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.111 Safari/537.36
X-Requested-With:XMLHttpRequest
Request Payload
------WebKitFormBoundaryY1f2eTo1mOvh744k
Content-Disposition: form-data; name="file"; filename="ScaleIO_Minimal_Config_51.csv"
Content-Type: text/csv
""" #'install' or 'extend'
parameters = {'selectInstallOrExtend': 'install', 'name': 'file', 'id': 'fileToUpload', 'filename': 'config.csv'}
file_dict = {'file': ('config.csv', open(conf_filename, 'rb'), 'text/csv')}
"\n files = {'file': ('report.csv', 'some,data,to,send\nanother,row,to,send\n')}\n "
temp_username = self._username
temp_password = self._password
temp_im_api_url = self._im_api_url
temp_im_session = requests.Session()
#self._im_session.headers.update({'Accept': 'application/json', 'Version': '1.0'}) # Accept only json
temp_im_session.mount('https://', TLS1Adapter())
temp_im_verify_ssl = self._im_verify_ssl
#resp = self._do_post(
#headers = m.content_type,
resp = temp_im_session.post('{}/{}'.format(temp_im_api_url, 'types/Configuration/instances/actions/parseFromCSV'), auth=HTTPBasicAuth('admin', 'Password1!'), files=file_dict, verify=False, data=parameters) |
def expand_path(path):
"""Returns ``path`` as an absolute path with ~user and env var expansion applied.
:API: public
"""
return os.path.abspath(os.path.expandvars(os.path.expanduser(path))) | def function[expand_path, parameter[path]]:
constant[Returns ``path`` as an absolute path with ~user and env var expansion applied.
:API: public
]
return[call[name[os].path.abspath, parameter[call[name[os].path.expandvars, parameter[call[name[os].path.expanduser, parameter[name[path]]]]]]]] | keyword[def] identifier[expand_path] ( identifier[path] ):
literal[string]
keyword[return] identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[expandvars] ( identifier[os] . identifier[path] . identifier[expanduser] ( identifier[path] ))) | def expand_path(path):
"""Returns ``path`` as an absolute path with ~user and env var expansion applied.
:API: public
"""
return os.path.abspath(os.path.expandvars(os.path.expanduser(path))) |
def from_prev_calc(cls, prev_calc_dir, copy_chgcar=True,
nbands_factor=1.2, standardize=False, sym_prec=0.1,
international_monoclinic=True, reciprocal_density=100,
small_gap_multiply=None, **kwargs):
"""
Generate a set of Vasp input files for SOC calculations from a
directory of previous static Vasp run. SOC calc requires all 3
components for MAGMOM for each atom in the structure.
Args:
prev_calc_dir (str): The directory contains the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
copy_chgcar: Whether to copy the old CHGCAR. Defaults to True.
nbands_factor (float): Multiplicative factor for NBANDS. Choose a
higher number if you are doing an LOPTICS calculation.
standardize (float): Whether to standardize to a primitive
standard cell. Defaults to False.
sym_prec (float): Tolerance for symmetry finding. If not 0,
the final structure from the previous run will be symmetrized
to get a primitive standard cell. Set to 0 if you don't want
that.
international_monoclinic (bool): Whether to use international
convention (vs Curtarolo) for monoclinic. Defaults True.
reciprocal_density (int): density of k-mesh by reciprocal
volume (defaults to 100)
small_gap_multiply ([float, float]): If the gap is less than
1st index, multiply the default reciprocal_density by the 2nd
index.
\\*\\*kwargs: All kwargs supported by MPSOCSet,
other than structure, prev_incar and prev_chgcar which
are determined from the prev_calc_dir.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
incar = vasprun.incar
# Remove magmoms from previous INCAR, since we will prefer
# the final calculated magmoms
# TODO: revisit in context of MPStaticSet incar logic
if 'MAGMOM' in incar:
del incar['magmom']
# Get a magmom-decorated structure
structure = get_structure_from_prev_run(
vasprun, outcar, sym_prec=standardize and sym_prec,
international_monoclinic=international_monoclinic)
# override magmom if provided
if kwargs.get("magmom", None):
structure = structure.copy(
site_properties={"magmom": kwargs["magmom"]})
kwargs.pop("magmom", None)
# magmom has to be 3D for SOC calculation.
if hasattr(structure[0], "magmom"):
if not isinstance(structure[0].magmom, list):
structure = structure.copy(site_properties={
"magmom": [[0, 0, site.magmom] for site in structure]})
else:
raise ValueError("Neither the previous structure has mamgom "
"property nor magmom provided")
nbands = int(np.ceil(vasprun.parameters["NBANDS"] * nbands_factor))
incar.update({"NBANDS": nbands})
if standardize:
warnings.warn("Use of standardize=True with from_prev_run is not "
"recommended as there is no guarantee the copied "
"files will be appropriate for the standardized"
" structure. copy_chgcar is enforced to be false.")
copy_chgcar = False
files_to_transfer = {}
if copy_chgcar:
chgcars = sorted(glob.glob(str(Path(prev_calc_dir) / "CHGCAR*")))
if chgcars:
files_to_transfer["CHGCAR"] = str(chgcars[-1])
# multiply the reciprocal density if needed:
if small_gap_multiply:
gap = vasprun.eigenvalue_band_properties[0]
if gap <= small_gap_multiply[0]:
reciprocal_density = reciprocal_density * small_gap_multiply[1]
return cls(structure, prev_incar=incar,
files_to_transfer=files_to_transfer,
reciprocal_density=reciprocal_density, **kwargs) | def function[from_prev_calc, parameter[cls, prev_calc_dir, copy_chgcar, nbands_factor, standardize, sym_prec, international_monoclinic, reciprocal_density, small_gap_multiply]]:
constant[
Generate a set of Vasp input files for SOC calculations from a
directory of previous static Vasp run. SOC calc requires all 3
components for MAGMOM for each atom in the structure.
Args:
prev_calc_dir (str): The directory contains the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
copy_chgcar: Whether to copy the old CHGCAR. Defaults to True.
nbands_factor (float): Multiplicative factor for NBANDS. Choose a
higher number if you are doing an LOPTICS calculation.
standardize (float): Whether to standardize to a primitive
standard cell. Defaults to False.
sym_prec (float): Tolerance for symmetry finding. If not 0,
the final structure from the previous run will be symmetrized
to get a primitive standard cell. Set to 0 if you don't want
that.
international_monoclinic (bool): Whether to use international
convention (vs Curtarolo) for monoclinic. Defaults True.
reciprocal_density (int): density of k-mesh by reciprocal
volume (defaults to 100)
small_gap_multiply ([float, float]): If the gap is less than
1st index, multiply the default reciprocal_density by the 2nd
index.
\*\*kwargs: All kwargs supported by MPSOCSet,
other than structure, prev_incar and prev_chgcar which
are determined from the prev_calc_dir.
]
<ast.Tuple object at 0x7da207f02a10> assign[=] call[name[get_vasprun_outcar], parameter[name[prev_calc_dir]]]
variable[incar] assign[=] name[vasprun].incar
if compare[constant[MAGMOM] in name[incar]] begin[:]
<ast.Delete object at 0x7da207f00dc0>
variable[structure] assign[=] call[name[get_structure_from_prev_run], parameter[name[vasprun], name[outcar]]]
if call[name[kwargs].get, parameter[constant[magmom], constant[None]]] begin[:]
variable[structure] assign[=] call[name[structure].copy, parameter[]]
call[name[kwargs].pop, parameter[constant[magmom], constant[None]]]
if call[name[hasattr], parameter[call[name[structure]][constant[0]], constant[magmom]]] begin[:]
if <ast.UnaryOp object at 0x7da207f03940> begin[:]
variable[structure] assign[=] call[name[structure].copy, parameter[]]
variable[nbands] assign[=] call[name[int], parameter[call[name[np].ceil, parameter[binary_operation[call[name[vasprun].parameters][constant[NBANDS]] * name[nbands_factor]]]]]]
call[name[incar].update, parameter[dictionary[[<ast.Constant object at 0x7da207f01e70>], [<ast.Name object at 0x7da207f016f0>]]]]
if name[standardize] begin[:]
call[name[warnings].warn, parameter[constant[Use of standardize=True with from_prev_run is not recommended as there is no guarantee the copied files will be appropriate for the standardized structure. copy_chgcar is enforced to be false.]]]
variable[copy_chgcar] assign[=] constant[False]
variable[files_to_transfer] assign[=] dictionary[[], []]
if name[copy_chgcar] begin[:]
variable[chgcars] assign[=] call[name[sorted], parameter[call[name[glob].glob, parameter[call[name[str], parameter[binary_operation[call[name[Path], parameter[name[prev_calc_dir]]] / constant[CHGCAR*]]]]]]]]
if name[chgcars] begin[:]
call[name[files_to_transfer]][constant[CHGCAR]] assign[=] call[name[str], parameter[call[name[chgcars]][<ast.UnaryOp object at 0x7da20c6e72e0>]]]
if name[small_gap_multiply] begin[:]
variable[gap] assign[=] call[name[vasprun].eigenvalue_band_properties][constant[0]]
if compare[name[gap] less_or_equal[<=] call[name[small_gap_multiply]][constant[0]]] begin[:]
variable[reciprocal_density] assign[=] binary_operation[name[reciprocal_density] * call[name[small_gap_multiply]][constant[1]]]
return[call[name[cls], parameter[name[structure]]]] | keyword[def] identifier[from_prev_calc] ( identifier[cls] , identifier[prev_calc_dir] , identifier[copy_chgcar] = keyword[True] ,
identifier[nbands_factor] = literal[int] , identifier[standardize] = keyword[False] , identifier[sym_prec] = literal[int] ,
identifier[international_monoclinic] = keyword[True] , identifier[reciprocal_density] = literal[int] ,
identifier[small_gap_multiply] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[vasprun] , identifier[outcar] = identifier[get_vasprun_outcar] ( identifier[prev_calc_dir] )
identifier[incar] = identifier[vasprun] . identifier[incar]
keyword[if] literal[string] keyword[in] identifier[incar] :
keyword[del] identifier[incar] [ literal[string] ]
identifier[structure] = identifier[get_structure_from_prev_run] (
identifier[vasprun] , identifier[outcar] , identifier[sym_prec] = identifier[standardize] keyword[and] identifier[sym_prec] ,
identifier[international_monoclinic] = identifier[international_monoclinic] )
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] ):
identifier[structure] = identifier[structure] . identifier[copy] (
identifier[site_properties] ={ literal[string] : identifier[kwargs] [ literal[string] ]})
identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
keyword[if] identifier[hasattr] ( identifier[structure] [ literal[int] ], literal[string] ):
keyword[if] keyword[not] identifier[isinstance] ( identifier[structure] [ literal[int] ]. identifier[magmom] , identifier[list] ):
identifier[structure] = identifier[structure] . identifier[copy] ( identifier[site_properties] ={
literal[string] :[[ literal[int] , literal[int] , identifier[site] . identifier[magmom] ] keyword[for] identifier[site] keyword[in] identifier[structure] ]})
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[nbands] = identifier[int] ( identifier[np] . identifier[ceil] ( identifier[vasprun] . identifier[parameters] [ literal[string] ]* identifier[nbands_factor] ))
identifier[incar] . identifier[update] ({ literal[string] : identifier[nbands] })
keyword[if] identifier[standardize] :
identifier[warnings] . identifier[warn] ( literal[string]
literal[string]
literal[string]
literal[string] )
identifier[copy_chgcar] = keyword[False]
identifier[files_to_transfer] ={}
keyword[if] identifier[copy_chgcar] :
identifier[chgcars] = identifier[sorted] ( identifier[glob] . identifier[glob] ( identifier[str] ( identifier[Path] ( identifier[prev_calc_dir] )/ literal[string] )))
keyword[if] identifier[chgcars] :
identifier[files_to_transfer] [ literal[string] ]= identifier[str] ( identifier[chgcars] [- literal[int] ])
keyword[if] identifier[small_gap_multiply] :
identifier[gap] = identifier[vasprun] . identifier[eigenvalue_band_properties] [ literal[int] ]
keyword[if] identifier[gap] <= identifier[small_gap_multiply] [ literal[int] ]:
identifier[reciprocal_density] = identifier[reciprocal_density] * identifier[small_gap_multiply] [ literal[int] ]
keyword[return] identifier[cls] ( identifier[structure] , identifier[prev_incar] = identifier[incar] ,
identifier[files_to_transfer] = identifier[files_to_transfer] ,
identifier[reciprocal_density] = identifier[reciprocal_density] ,** identifier[kwargs] ) | def from_prev_calc(cls, prev_calc_dir, copy_chgcar=True, nbands_factor=1.2, standardize=False, sym_prec=0.1, international_monoclinic=True, reciprocal_density=100, small_gap_multiply=None, **kwargs):
"""
Generate a set of Vasp input files for SOC calculations from a
directory of previous static Vasp run. SOC calc requires all 3
components for MAGMOM for each atom in the structure.
Args:
prev_calc_dir (str): The directory contains the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
copy_chgcar: Whether to copy the old CHGCAR. Defaults to True.
nbands_factor (float): Multiplicative factor for NBANDS. Choose a
higher number if you are doing an LOPTICS calculation.
standardize (float): Whether to standardize to a primitive
standard cell. Defaults to False.
sym_prec (float): Tolerance for symmetry finding. If not 0,
the final structure from the previous run will be symmetrized
to get a primitive standard cell. Set to 0 if you don't want
that.
international_monoclinic (bool): Whether to use international
convention (vs Curtarolo) for monoclinic. Defaults True.
reciprocal_density (int): density of k-mesh by reciprocal
volume (defaults to 100)
small_gap_multiply ([float, float]): If the gap is less than
1st index, multiply the default reciprocal_density by the 2nd
index.
\\*\\*kwargs: All kwargs supported by MPSOCSet,
other than structure, prev_incar and prev_chgcar which
are determined from the prev_calc_dir.
"""
(vasprun, outcar) = get_vasprun_outcar(prev_calc_dir)
incar = vasprun.incar
# Remove magmoms from previous INCAR, since we will prefer
# the final calculated magmoms
# TODO: revisit in context of MPStaticSet incar logic
if 'MAGMOM' in incar:
del incar['magmom'] # depends on [control=['if'], data=['incar']]
# Get a magmom-decorated structure
structure = get_structure_from_prev_run(vasprun, outcar, sym_prec=standardize and sym_prec, international_monoclinic=international_monoclinic)
# override magmom if provided
if kwargs.get('magmom', None):
structure = structure.copy(site_properties={'magmom': kwargs['magmom']})
kwargs.pop('magmom', None) # depends on [control=['if'], data=[]]
# magmom has to be 3D for SOC calculation.
if hasattr(structure[0], 'magmom'):
if not isinstance(structure[0].magmom, list):
structure = structure.copy(site_properties={'magmom': [[0, 0, site.magmom] for site in structure]}) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
raise ValueError('Neither the previous structure has mamgom property nor magmom provided')
nbands = int(np.ceil(vasprun.parameters['NBANDS'] * nbands_factor))
incar.update({'NBANDS': nbands})
if standardize:
warnings.warn('Use of standardize=True with from_prev_run is not recommended as there is no guarantee the copied files will be appropriate for the standardized structure. copy_chgcar is enforced to be false.')
copy_chgcar = False # depends on [control=['if'], data=[]]
files_to_transfer = {}
if copy_chgcar:
chgcars = sorted(glob.glob(str(Path(prev_calc_dir) / 'CHGCAR*')))
if chgcars:
files_to_transfer['CHGCAR'] = str(chgcars[-1]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# multiply the reciprocal density if needed:
if small_gap_multiply:
gap = vasprun.eigenvalue_band_properties[0]
if gap <= small_gap_multiply[0]:
reciprocal_density = reciprocal_density * small_gap_multiply[1] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return cls(structure, prev_incar=incar, files_to_transfer=files_to_transfer, reciprocal_density=reciprocal_density, **kwargs) |
def asISO8601TimeAndDate(self, includeDelimiters=True, tzinfo=None,
includeTimezone=True):
"""Return this time formatted as specified by ISO 8861.
ISO 8601 allows optional dashes to delimit dates and colons to delimit
times. The parameter includeDelimiters (default True) defines the
inclusion of these delimiters in the output.
If tzinfo is a datetime.tzinfo instance, the output time will be in the
timezone given. If it is None (the default), then the timezone string
will not be included in the output, and the time will be in UTC.
The includeTimezone parameter coresponds to the inclusion of an
explicit timezone. The default is True.
"""
if not self.isTimezoneDependent():
tzinfo = None
dtime = self.asDatetime(tzinfo)
if includeDelimiters:
dateSep = '-'
timeSep = ':'
else:
dateSep = timeSep = ''
if includeTimezone:
if tzinfo is None:
timezone = '+00%s00' % (timeSep,)
else:
sign, hour, min = _timedeltaToSignHrMin(dtime.utcoffset())
timezone = '%s%02i%s%02i' % (sign, hour, timeSep, min)
else:
timezone = ''
microsecond = ('%06i' % (dtime.microsecond,)).rstrip('0')
if microsecond:
microsecond = '.' + microsecond
parts = [
('%04i' % (dtime.year,), datetime.timedelta(days=366)),
('%s%02i' % (dateSep, dtime.month), datetime.timedelta(days=31)),
('%s%02i' % (dateSep, dtime.day), datetime.timedelta(days=1)),
('T', datetime.timedelta(hours=1)),
('%02i' % (dtime.hour,), datetime.timedelta(hours=1)),
('%s%02i' % (timeSep, dtime.minute), datetime.timedelta(minutes=1)),
('%s%02i' % (timeSep, dtime.second), datetime.timedelta(seconds=1)),
(microsecond, datetime.timedelta(microseconds=1)),
(timezone, datetime.timedelta(hours=1))
]
formatted = ''
for part, minResolution in parts:
if self.resolution <= minResolution:
formatted += part
return formatted | def function[asISO8601TimeAndDate, parameter[self, includeDelimiters, tzinfo, includeTimezone]]:
constant[Return this time formatted as specified by ISO 8861.
ISO 8601 allows optional dashes to delimit dates and colons to delimit
times. The parameter includeDelimiters (default True) defines the
inclusion of these delimiters in the output.
If tzinfo is a datetime.tzinfo instance, the output time will be in the
timezone given. If it is None (the default), then the timezone string
will not be included in the output, and the time will be in UTC.
The includeTimezone parameter coresponds to the inclusion of an
explicit timezone. The default is True.
]
if <ast.UnaryOp object at 0x7da20e956b30> begin[:]
variable[tzinfo] assign[=] constant[None]
variable[dtime] assign[=] call[name[self].asDatetime, parameter[name[tzinfo]]]
if name[includeDelimiters] begin[:]
variable[dateSep] assign[=] constant[-]
variable[timeSep] assign[=] constant[:]
if name[includeTimezone] begin[:]
if compare[name[tzinfo] is constant[None]] begin[:]
variable[timezone] assign[=] binary_operation[constant[+00%s00] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20e9566b0>]]]
variable[microsecond] assign[=] call[binary_operation[constant[%06i] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da20e956140>]]].rstrip, parameter[constant[0]]]
if name[microsecond] begin[:]
variable[microsecond] assign[=] binary_operation[constant[.] + name[microsecond]]
variable[parts] assign[=] list[[<ast.Tuple object at 0x7da18f00d780>, <ast.Tuple object at 0x7da18f00dc90>, <ast.Tuple object at 0x7da18dc07520>, <ast.Tuple object at 0x7da18f00f310>, <ast.Tuple object at 0x7da18f00fd30>, <ast.Tuple object at 0x7da18f00f7f0>, <ast.Tuple object at 0x7da18f00f340>, <ast.Tuple object at 0x7da18f00d3c0>, <ast.Tuple object at 0x7da20c796020>]]
variable[formatted] assign[=] constant[]
for taget[tuple[[<ast.Name object at 0x7da20c795390>, <ast.Name object at 0x7da20c796140>]]] in starred[name[parts]] begin[:]
if compare[name[self].resolution less_or_equal[<=] name[minResolution]] begin[:]
<ast.AugAssign object at 0x7da20c796230>
return[name[formatted]] | keyword[def] identifier[asISO8601TimeAndDate] ( identifier[self] , identifier[includeDelimiters] = keyword[True] , identifier[tzinfo] = keyword[None] ,
identifier[includeTimezone] = keyword[True] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[isTimezoneDependent] ():
identifier[tzinfo] = keyword[None]
identifier[dtime] = identifier[self] . identifier[asDatetime] ( identifier[tzinfo] )
keyword[if] identifier[includeDelimiters] :
identifier[dateSep] = literal[string]
identifier[timeSep] = literal[string]
keyword[else] :
identifier[dateSep] = identifier[timeSep] = literal[string]
keyword[if] identifier[includeTimezone] :
keyword[if] identifier[tzinfo] keyword[is] keyword[None] :
identifier[timezone] = literal[string] %( identifier[timeSep] ,)
keyword[else] :
identifier[sign] , identifier[hour] , identifier[min] = identifier[_timedeltaToSignHrMin] ( identifier[dtime] . identifier[utcoffset] ())
identifier[timezone] = literal[string] %( identifier[sign] , identifier[hour] , identifier[timeSep] , identifier[min] )
keyword[else] :
identifier[timezone] = literal[string]
identifier[microsecond] =( literal[string] %( identifier[dtime] . identifier[microsecond] ,)). identifier[rstrip] ( literal[string] )
keyword[if] identifier[microsecond] :
identifier[microsecond] = literal[string] + identifier[microsecond]
identifier[parts] =[
( literal[string] %( identifier[dtime] . identifier[year] ,), identifier[datetime] . identifier[timedelta] ( identifier[days] = literal[int] )),
( literal[string] %( identifier[dateSep] , identifier[dtime] . identifier[month] ), identifier[datetime] . identifier[timedelta] ( identifier[days] = literal[int] )),
( literal[string] %( identifier[dateSep] , identifier[dtime] . identifier[day] ), identifier[datetime] . identifier[timedelta] ( identifier[days] = literal[int] )),
( literal[string] , identifier[datetime] . identifier[timedelta] ( identifier[hours] = literal[int] )),
( literal[string] %( identifier[dtime] . identifier[hour] ,), identifier[datetime] . identifier[timedelta] ( identifier[hours] = literal[int] )),
( literal[string] %( identifier[timeSep] , identifier[dtime] . identifier[minute] ), identifier[datetime] . identifier[timedelta] ( identifier[minutes] = literal[int] )),
( literal[string] %( identifier[timeSep] , identifier[dtime] . identifier[second] ), identifier[datetime] . identifier[timedelta] ( identifier[seconds] = literal[int] )),
( identifier[microsecond] , identifier[datetime] . identifier[timedelta] ( identifier[microseconds] = literal[int] )),
( identifier[timezone] , identifier[datetime] . identifier[timedelta] ( identifier[hours] = literal[int] ))
]
identifier[formatted] = literal[string]
keyword[for] identifier[part] , identifier[minResolution] keyword[in] identifier[parts] :
keyword[if] identifier[self] . identifier[resolution] <= identifier[minResolution] :
identifier[formatted] += identifier[part]
keyword[return] identifier[formatted] | def asISO8601TimeAndDate(self, includeDelimiters=True, tzinfo=None, includeTimezone=True):
"""Return this time formatted as specified by ISO 8861.
ISO 8601 allows optional dashes to delimit dates and colons to delimit
times. The parameter includeDelimiters (default True) defines the
inclusion of these delimiters in the output.
If tzinfo is a datetime.tzinfo instance, the output time will be in the
timezone given. If it is None (the default), then the timezone string
will not be included in the output, and the time will be in UTC.
The includeTimezone parameter coresponds to the inclusion of an
explicit timezone. The default is True.
"""
if not self.isTimezoneDependent():
tzinfo = None # depends on [control=['if'], data=[]]
dtime = self.asDatetime(tzinfo)
if includeDelimiters:
dateSep = '-'
timeSep = ':' # depends on [control=['if'], data=[]]
else:
dateSep = timeSep = ''
if includeTimezone:
if tzinfo is None:
timezone = '+00%s00' % (timeSep,) # depends on [control=['if'], data=[]]
else:
(sign, hour, min) = _timedeltaToSignHrMin(dtime.utcoffset())
timezone = '%s%02i%s%02i' % (sign, hour, timeSep, min) # depends on [control=['if'], data=[]]
else:
timezone = ''
microsecond = ('%06i' % (dtime.microsecond,)).rstrip('0')
if microsecond:
microsecond = '.' + microsecond # depends on [control=['if'], data=[]]
parts = [('%04i' % (dtime.year,), datetime.timedelta(days=366)), ('%s%02i' % (dateSep, dtime.month), datetime.timedelta(days=31)), ('%s%02i' % (dateSep, dtime.day), datetime.timedelta(days=1)), ('T', datetime.timedelta(hours=1)), ('%02i' % (dtime.hour,), datetime.timedelta(hours=1)), ('%s%02i' % (timeSep, dtime.minute), datetime.timedelta(minutes=1)), ('%s%02i' % (timeSep, dtime.second), datetime.timedelta(seconds=1)), (microsecond, datetime.timedelta(microseconds=1)), (timezone, datetime.timedelta(hours=1))]
formatted = ''
for (part, minResolution) in parts:
if self.resolution <= minResolution:
formatted += part # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return formatted |
def filter(self, *args):
'''
Returns a filtered subset of this collection of signatures, based on
a set of key/value tuples
This is useful when you only want a subset of the signatures in a project.
Example usage:
::
pc = PerfherderClient()
signatures = pc.get_signatures('mozilla-central')
signatures = signatures.filter(('suite', 'tp5o'), ('machine_platform', 'windowsxp'))
'''
filtered_signatures = {}
for (signature, signature_value) in self.items():
skip = False
for (key, val) in args:
if signature_value.get(key) != val:
skip = True
break
if not skip:
filtered_signatures[signature] = signature_value
return PerformanceSignatureCollection(filtered_signatures) | def function[filter, parameter[self]]:
constant[
Returns a filtered subset of this collection of signatures, based on
a set of key/value tuples
This is useful when you only want a subset of the signatures in a project.
Example usage:
::
pc = PerfherderClient()
signatures = pc.get_signatures('mozilla-central')
signatures = signatures.filter(('suite', 'tp5o'), ('machine_platform', 'windowsxp'))
]
variable[filtered_signatures] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b08a5cc0>, <ast.Name object at 0x7da1b08a7eb0>]]] in starred[call[name[self].items, parameter[]]] begin[:]
variable[skip] assign[=] constant[False]
for taget[tuple[[<ast.Name object at 0x7da1b08a58a0>, <ast.Name object at 0x7da1b08a7d30>]]] in starred[name[args]] begin[:]
if compare[call[name[signature_value].get, parameter[name[key]]] not_equal[!=] name[val]] begin[:]
variable[skip] assign[=] constant[True]
break
if <ast.UnaryOp object at 0x7da1b08a7010> begin[:]
call[name[filtered_signatures]][name[signature]] assign[=] name[signature_value]
return[call[name[PerformanceSignatureCollection], parameter[name[filtered_signatures]]]] | keyword[def] identifier[filter] ( identifier[self] ,* identifier[args] ):
literal[string]
identifier[filtered_signatures] ={}
keyword[for] ( identifier[signature] , identifier[signature_value] ) keyword[in] identifier[self] . identifier[items] ():
identifier[skip] = keyword[False]
keyword[for] ( identifier[key] , identifier[val] ) keyword[in] identifier[args] :
keyword[if] identifier[signature_value] . identifier[get] ( identifier[key] )!= identifier[val] :
identifier[skip] = keyword[True]
keyword[break]
keyword[if] keyword[not] identifier[skip] :
identifier[filtered_signatures] [ identifier[signature] ]= identifier[signature_value]
keyword[return] identifier[PerformanceSignatureCollection] ( identifier[filtered_signatures] ) | def filter(self, *args):
"""
Returns a filtered subset of this collection of signatures, based on
a set of key/value tuples
This is useful when you only want a subset of the signatures in a project.
Example usage:
::
pc = PerfherderClient()
signatures = pc.get_signatures('mozilla-central')
signatures = signatures.filter(('suite', 'tp5o'), ('machine_platform', 'windowsxp'))
"""
filtered_signatures = {}
for (signature, signature_value) in self.items():
skip = False
for (key, val) in args:
if signature_value.get(key) != val:
skip = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if not skip:
filtered_signatures[signature] = signature_value # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return PerformanceSignatureCollection(filtered_signatures) |
def get_shard_iterator(self, *, stream_arn, shard_id, iterator_type, sequence_number=None):
"""Wraps :func:`boto3.DynamoDBStreams.Client.get_shard_iterator`.
:param str stream_arn: Stream arn. Usually :data:`Shard.stream_arn <bloop.stream.shard.Shard.stream_arn>`.
:param str shard_id: Shard identifier. Usually :data:`Shard.shard_id <bloop.stream.shard.Shard.shard_id>`.
:param str iterator_type: "sequence_at", "sequence_after", "trim_horizon", or "latest"
:param sequence_number:
:return: Iterator id, valid for 15 minutes.
:rtype: str
:raises bloop.exceptions.RecordsExpired: Tried to get an iterator beyond the Trim Horizon.
"""
real_iterator_type = validate_stream_iterator_type(iterator_type)
request = {
"StreamArn": stream_arn,
"ShardId": shard_id,
"ShardIteratorType": real_iterator_type,
"SequenceNumber": sequence_number
}
# boto3 isn't down with literal Nones.
if sequence_number is None:
request.pop("SequenceNumber")
try:
return self.stream_client.get_shard_iterator(**request)["ShardIterator"]
except botocore.exceptions.ClientError as error:
if error.response["Error"]["Code"] == "TrimmedDataAccessException":
raise RecordsExpired from error
raise BloopException("Unexpected error while creating shard iterator") from error | def function[get_shard_iterator, parameter[self]]:
constant[Wraps :func:`boto3.DynamoDBStreams.Client.get_shard_iterator`.
:param str stream_arn: Stream arn. Usually :data:`Shard.stream_arn <bloop.stream.shard.Shard.stream_arn>`.
:param str shard_id: Shard identifier. Usually :data:`Shard.shard_id <bloop.stream.shard.Shard.shard_id>`.
:param str iterator_type: "sequence_at", "sequence_after", "trim_horizon", or "latest"
:param sequence_number:
:return: Iterator id, valid for 15 minutes.
:rtype: str
:raises bloop.exceptions.RecordsExpired: Tried to get an iterator beyond the Trim Horizon.
]
variable[real_iterator_type] assign[=] call[name[validate_stream_iterator_type], parameter[name[iterator_type]]]
variable[request] assign[=] dictionary[[<ast.Constant object at 0x7da1b0f9b340>, <ast.Constant object at 0x7da1b0f9b0a0>, <ast.Constant object at 0x7da1b0f9b400>, <ast.Constant object at 0x7da1b0f98850>], [<ast.Name object at 0x7da1b0f9b4f0>, <ast.Name object at 0x7da1b0f9a5c0>, <ast.Name object at 0x7da1b0f9a560>, <ast.Name object at 0x7da1b0f9a5f0>]]
if compare[name[sequence_number] is constant[None]] begin[:]
call[name[request].pop, parameter[constant[SequenceNumber]]]
<ast.Try object at 0x7da1b0f9a830> | keyword[def] identifier[get_shard_iterator] ( identifier[self] ,*, identifier[stream_arn] , identifier[shard_id] , identifier[iterator_type] , identifier[sequence_number] = keyword[None] ):
literal[string]
identifier[real_iterator_type] = identifier[validate_stream_iterator_type] ( identifier[iterator_type] )
identifier[request] ={
literal[string] : identifier[stream_arn] ,
literal[string] : identifier[shard_id] ,
literal[string] : identifier[real_iterator_type] ,
literal[string] : identifier[sequence_number]
}
keyword[if] identifier[sequence_number] keyword[is] keyword[None] :
identifier[request] . identifier[pop] ( literal[string] )
keyword[try] :
keyword[return] identifier[self] . identifier[stream_client] . identifier[get_shard_iterator] (** identifier[request] )[ literal[string] ]
keyword[except] identifier[botocore] . identifier[exceptions] . identifier[ClientError] keyword[as] identifier[error] :
keyword[if] identifier[error] . identifier[response] [ literal[string] ][ literal[string] ]== literal[string] :
keyword[raise] identifier[RecordsExpired] keyword[from] identifier[error]
keyword[raise] identifier[BloopException] ( literal[string] ) keyword[from] identifier[error] | def get_shard_iterator(self, *, stream_arn, shard_id, iterator_type, sequence_number=None):
"""Wraps :func:`boto3.DynamoDBStreams.Client.get_shard_iterator`.
:param str stream_arn: Stream arn. Usually :data:`Shard.stream_arn <bloop.stream.shard.Shard.stream_arn>`.
:param str shard_id: Shard identifier. Usually :data:`Shard.shard_id <bloop.stream.shard.Shard.shard_id>`.
:param str iterator_type: "sequence_at", "sequence_after", "trim_horizon", or "latest"
:param sequence_number:
:return: Iterator id, valid for 15 minutes.
:rtype: str
:raises bloop.exceptions.RecordsExpired: Tried to get an iterator beyond the Trim Horizon.
"""
real_iterator_type = validate_stream_iterator_type(iterator_type)
request = {'StreamArn': stream_arn, 'ShardId': shard_id, 'ShardIteratorType': real_iterator_type, 'SequenceNumber': sequence_number}
# boto3 isn't down with literal Nones.
if sequence_number is None:
request.pop('SequenceNumber') # depends on [control=['if'], data=[]]
try:
return self.stream_client.get_shard_iterator(**request)['ShardIterator'] # depends on [control=['try'], data=[]]
except botocore.exceptions.ClientError as error:
if error.response['Error']['Code'] == 'TrimmedDataAccessException':
raise RecordsExpired from error # depends on [control=['if'], data=[]]
raise BloopException('Unexpected error while creating shard iterator') from error # depends on [control=['except'], data=['error']] |
def count_lines_in_file(src_file ):
"""
test function.
"""
tot = 0
res = ''
try:
with open(src_file, 'r') as f:
for line in f:
tot += 1
res = str(tot) + ' recs read'
except:
res = 'ERROR -couldnt open file'
return res | def function[count_lines_in_file, parameter[src_file]]:
constant[
test function.
]
variable[tot] assign[=] constant[0]
variable[res] assign[=] constant[]
<ast.Try object at 0x7da18f00fe80>
return[name[res]] | keyword[def] identifier[count_lines_in_file] ( identifier[src_file] ):
literal[string]
identifier[tot] = literal[int]
identifier[res] = literal[string]
keyword[try] :
keyword[with] identifier[open] ( identifier[src_file] , literal[string] ) keyword[as] identifier[f] :
keyword[for] identifier[line] keyword[in] identifier[f] :
identifier[tot] += literal[int]
identifier[res] = identifier[str] ( identifier[tot] )+ literal[string]
keyword[except] :
identifier[res] = literal[string]
keyword[return] identifier[res] | def count_lines_in_file(src_file):
"""
test function.
"""
tot = 0
res = ''
try:
with open(src_file, 'r') as f:
for line in f:
tot += 1 # depends on [control=['for'], data=[]]
res = str(tot) + ' recs read' # depends on [control=['with'], data=['f']] # depends on [control=['try'], data=[]]
except:
res = 'ERROR -couldnt open file' # depends on [control=['except'], data=[]]
return res |
def touch_member(config, dcs):
''' Rip-off of the ha.touch_member without inter-class dependencies '''
p = Postgresql(config['postgresql'])
p.set_state('running')
p.set_role('master')
def restapi_connection_string(config):
protocol = 'https' if config.get('certfile') else 'http'
connect_address = config.get('connect_address')
listen = config['listen']
return '{0}://{1}/patroni'.format(protocol, connect_address or listen)
data = {
'conn_url': p.connection_string,
'api_url': restapi_connection_string(config['restapi']),
'state': p.state,
'role': p.role
}
return dcs.touch_member(data, permanent=True) | def function[touch_member, parameter[config, dcs]]:
constant[ Rip-off of the ha.touch_member without inter-class dependencies ]
variable[p] assign[=] call[name[Postgresql], parameter[call[name[config]][constant[postgresql]]]]
call[name[p].set_state, parameter[constant[running]]]
call[name[p].set_role, parameter[constant[master]]]
def function[restapi_connection_string, parameter[config]]:
variable[protocol] assign[=] <ast.IfExp object at 0x7da1b2185fc0>
variable[connect_address] assign[=] call[name[config].get, parameter[constant[connect_address]]]
variable[listen] assign[=] call[name[config]][constant[listen]]
return[call[constant[{0}://{1}/patroni].format, parameter[name[protocol], <ast.BoolOp object at 0x7da1b2184250>]]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b2184100>, <ast.Constant object at 0x7da1b21840d0>, <ast.Constant object at 0x7da1b21845b0>, <ast.Constant object at 0x7da1b21845e0>], [<ast.Attribute object at 0x7da1b2184610>, <ast.Call object at 0x7da1b2184670>, <ast.Attribute object at 0x7da1b2184850>, <ast.Attribute object at 0x7da1b21848b0>]]
return[call[name[dcs].touch_member, parameter[name[data]]]] | keyword[def] identifier[touch_member] ( identifier[config] , identifier[dcs] ):
literal[string]
identifier[p] = identifier[Postgresql] ( identifier[config] [ literal[string] ])
identifier[p] . identifier[set_state] ( literal[string] )
identifier[p] . identifier[set_role] ( literal[string] )
keyword[def] identifier[restapi_connection_string] ( identifier[config] ):
identifier[protocol] = literal[string] keyword[if] identifier[config] . identifier[get] ( literal[string] ) keyword[else] literal[string]
identifier[connect_address] = identifier[config] . identifier[get] ( literal[string] )
identifier[listen] = identifier[config] [ literal[string] ]
keyword[return] literal[string] . identifier[format] ( identifier[protocol] , identifier[connect_address] keyword[or] identifier[listen] )
identifier[data] ={
literal[string] : identifier[p] . identifier[connection_string] ,
literal[string] : identifier[restapi_connection_string] ( identifier[config] [ literal[string] ]),
literal[string] : identifier[p] . identifier[state] ,
literal[string] : identifier[p] . identifier[role]
}
keyword[return] identifier[dcs] . identifier[touch_member] ( identifier[data] , identifier[permanent] = keyword[True] ) | def touch_member(config, dcs):
""" Rip-off of the ha.touch_member without inter-class dependencies """
p = Postgresql(config['postgresql'])
p.set_state('running')
p.set_role('master')
def restapi_connection_string(config):
protocol = 'https' if config.get('certfile') else 'http'
connect_address = config.get('connect_address')
listen = config['listen']
return '{0}://{1}/patroni'.format(protocol, connect_address or listen)
data = {'conn_url': p.connection_string, 'api_url': restapi_connection_string(config['restapi']), 'state': p.state, 'role': p.role}
return dcs.touch_member(data, permanent=True) |
def sharing_agreements(self):
"""
| Comment: The ids of the sharing agreements used for this ticket
"""
if self.api and self.sharing_agreement_ids:
return self.api._get_sharing_agreements(self.sharing_agreement_ids) | def function[sharing_agreements, parameter[self]]:
constant[
| Comment: The ids of the sharing agreements used for this ticket
]
if <ast.BoolOp object at 0x7da204345630> begin[:]
return[call[name[self].api._get_sharing_agreements, parameter[name[self].sharing_agreement_ids]]] | keyword[def] identifier[sharing_agreements] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[api] keyword[and] identifier[self] . identifier[sharing_agreement_ids] :
keyword[return] identifier[self] . identifier[api] . identifier[_get_sharing_agreements] ( identifier[self] . identifier[sharing_agreement_ids] ) | def sharing_agreements(self):
"""
| Comment: The ids of the sharing agreements used for this ticket
"""
if self.api and self.sharing_agreement_ids:
return self.api._get_sharing_agreements(self.sharing_agreement_ids) # depends on [control=['if'], data=[]] |
def empty_mets():
"""
Create an empty METS file from bundled template.
"""
tpl = METS_XML_EMPTY.decode('utf-8')
tpl = tpl.replace('{{ VERSION }}', VERSION)
tpl = tpl.replace('{{ NOW }}', '%s' % datetime.now())
return OcrdMets(content=tpl.encode('utf-8')) | def function[empty_mets, parameter[]]:
constant[
Create an empty METS file from bundled template.
]
variable[tpl] assign[=] call[name[METS_XML_EMPTY].decode, parameter[constant[utf-8]]]
variable[tpl] assign[=] call[name[tpl].replace, parameter[constant[{{ VERSION }}], name[VERSION]]]
variable[tpl] assign[=] call[name[tpl].replace, parameter[constant[{{ NOW }}], binary_operation[constant[%s] <ast.Mod object at 0x7da2590d6920> call[name[datetime].now, parameter[]]]]]
return[call[name[OcrdMets], parameter[]]] | keyword[def] identifier[empty_mets] ():
literal[string]
identifier[tpl] = identifier[METS_XML_EMPTY] . identifier[decode] ( literal[string] )
identifier[tpl] = identifier[tpl] . identifier[replace] ( literal[string] , identifier[VERSION] )
identifier[tpl] = identifier[tpl] . identifier[replace] ( literal[string] , literal[string] % identifier[datetime] . identifier[now] ())
keyword[return] identifier[OcrdMets] ( identifier[content] = identifier[tpl] . identifier[encode] ( literal[string] )) | def empty_mets():
"""
Create an empty METS file from bundled template.
"""
tpl = METS_XML_EMPTY.decode('utf-8')
tpl = tpl.replace('{{ VERSION }}', VERSION)
tpl = tpl.replace('{{ NOW }}', '%s' % datetime.now())
return OcrdMets(content=tpl.encode('utf-8')) |
def change_result(self, old_result_name, new_result_name, new_er_data=None,
new_pmag_data=None, spec_names=None, samp_names=None,
site_names=None, loc_names=None, replace_data=False):
"""
Find actual data object for result with old_result_name.
Then call Result class change method to update result name and data.
"""
result = self.find_by_name(old_result_name, self.results)
if not result:
msg = '-W- {} is not a currently existing result, so it cannot be updated.'.format(old_result_name)
print(msg)
return False
else:
specimens, samples, sites, locations = None, None, None, None
if spec_names:
specimens = [self.find_or_create_by_name(spec, self.specimens, 'specimen') for spec in spec_names]
if samp_names:
samples = [self.find_or_create_by_name(samp, self.samples, 'sample') for samp in samp_names]
if site_names:
sites = [self.find_or_create_by_name(site, self.sites, 'site') for site in site_names]
if loc_names:
locations = [self.find_or_create_by_name(loc, self.locations, 'location') for loc in loc_names]
result.change_result(new_result_name, new_pmag_data, specimens, samples,
sites, locations, replace_data)
return result | def function[change_result, parameter[self, old_result_name, new_result_name, new_er_data, new_pmag_data, spec_names, samp_names, site_names, loc_names, replace_data]]:
constant[
Find actual data object for result with old_result_name.
Then call Result class change method to update result name and data.
]
variable[result] assign[=] call[name[self].find_by_name, parameter[name[old_result_name], name[self].results]]
if <ast.UnaryOp object at 0x7da1b04a4940> begin[:]
variable[msg] assign[=] call[constant[-W- {} is not a currently existing result, so it cannot be updated.].format, parameter[name[old_result_name]]]
call[name[print], parameter[name[msg]]]
return[constant[False]] | keyword[def] identifier[change_result] ( identifier[self] , identifier[old_result_name] , identifier[new_result_name] , identifier[new_er_data] = keyword[None] ,
identifier[new_pmag_data] = keyword[None] , identifier[spec_names] = keyword[None] , identifier[samp_names] = keyword[None] ,
identifier[site_names] = keyword[None] , identifier[loc_names] = keyword[None] , identifier[replace_data] = keyword[False] ):
literal[string]
identifier[result] = identifier[self] . identifier[find_by_name] ( identifier[old_result_name] , identifier[self] . identifier[results] )
keyword[if] keyword[not] identifier[result] :
identifier[msg] = literal[string] . identifier[format] ( identifier[old_result_name] )
identifier[print] ( identifier[msg] )
keyword[return] keyword[False]
keyword[else] :
identifier[specimens] , identifier[samples] , identifier[sites] , identifier[locations] = keyword[None] , keyword[None] , keyword[None] , keyword[None]
keyword[if] identifier[spec_names] :
identifier[specimens] =[ identifier[self] . identifier[find_or_create_by_name] ( identifier[spec] , identifier[self] . identifier[specimens] , literal[string] ) keyword[for] identifier[spec] keyword[in] identifier[spec_names] ]
keyword[if] identifier[samp_names] :
identifier[samples] =[ identifier[self] . identifier[find_or_create_by_name] ( identifier[samp] , identifier[self] . identifier[samples] , literal[string] ) keyword[for] identifier[samp] keyword[in] identifier[samp_names] ]
keyword[if] identifier[site_names] :
identifier[sites] =[ identifier[self] . identifier[find_or_create_by_name] ( identifier[site] , identifier[self] . identifier[sites] , literal[string] ) keyword[for] identifier[site] keyword[in] identifier[site_names] ]
keyword[if] identifier[loc_names] :
identifier[locations] =[ identifier[self] . identifier[find_or_create_by_name] ( identifier[loc] , identifier[self] . identifier[locations] , literal[string] ) keyword[for] identifier[loc] keyword[in] identifier[loc_names] ]
identifier[result] . identifier[change_result] ( identifier[new_result_name] , identifier[new_pmag_data] , identifier[specimens] , identifier[samples] ,
identifier[sites] , identifier[locations] , identifier[replace_data] )
keyword[return] identifier[result] | def change_result(self, old_result_name, new_result_name, new_er_data=None, new_pmag_data=None, spec_names=None, samp_names=None, site_names=None, loc_names=None, replace_data=False):
"""
Find actual data object for result with old_result_name.
Then call Result class change method to update result name and data.
"""
result = self.find_by_name(old_result_name, self.results)
if not result:
msg = '-W- {} is not a currently existing result, so it cannot be updated.'.format(old_result_name)
print(msg)
return False # depends on [control=['if'], data=[]]
else:
(specimens, samples, sites, locations) = (None, None, None, None)
if spec_names:
specimens = [self.find_or_create_by_name(spec, self.specimens, 'specimen') for spec in spec_names] # depends on [control=['if'], data=[]]
if samp_names:
samples = [self.find_or_create_by_name(samp, self.samples, 'sample') for samp in samp_names] # depends on [control=['if'], data=[]]
if site_names:
sites = [self.find_or_create_by_name(site, self.sites, 'site') for site in site_names] # depends on [control=['if'], data=[]]
if loc_names:
locations = [self.find_or_create_by_name(loc, self.locations, 'location') for loc in loc_names] # depends on [control=['if'], data=[]]
result.change_result(new_result_name, new_pmag_data, specimens, samples, sites, locations, replace_data)
return result |
def register_dimension(self, name, dim_data, **kwargs):
"""
Registers a dimension on this cube.
.. code-block:: python
cube.register_dimension('ntime', 10000,
decription="Number of Timesteps",
lower_extent=100, upper_extent=200)
Parameters
----------
dim_data : int or :class:`~hypercube.dims.Dimension`
if an integer, this will be used to
define the global_size of the dimension
and possibly other attributes if they are
not present in kwargs.
If a Dimension, it will be updated with
any appropriate keyword arguments
description : str
The description for this dimension.
e.g. 'Number of timesteps'.
lower_extent : int
The lower extent of this dimension
within the global space
upper_extent : int
The upper extent of this dimension
within the global space
name : Dimension name
Returns
-------
:class:`~hypercube.dims.Dimension`
A hypercube Dimension
"""
if name in self._dims:
raise AttributeError((
"Attempted to register dimension '{n}'' "
"as an attribute of the cube, but "
"it already exists. Please choose "
"a different name!").format(n=name))
# Create the dimension dictionary
D = self._dims[name] = create_dimension(name,
dim_data, **kwargs)
return D | def function[register_dimension, parameter[self, name, dim_data]]:
constant[
Registers a dimension on this cube.
.. code-block:: python
cube.register_dimension('ntime', 10000,
decription="Number of Timesteps",
lower_extent=100, upper_extent=200)
Parameters
----------
dim_data : int or :class:`~hypercube.dims.Dimension`
if an integer, this will be used to
define the global_size of the dimension
and possibly other attributes if they are
not present in kwargs.
If a Dimension, it will be updated with
any appropriate keyword arguments
description : str
The description for this dimension.
e.g. 'Number of timesteps'.
lower_extent : int
The lower extent of this dimension
within the global space
upper_extent : int
The upper extent of this dimension
within the global space
name : Dimension name
Returns
-------
:class:`~hypercube.dims.Dimension`
A hypercube Dimension
]
if compare[name[name] in name[self]._dims] begin[:]
<ast.Raise object at 0x7da20e9b2c50>
variable[D] assign[=] call[name[create_dimension], parameter[name[name], name[dim_data]]]
return[name[D]] | keyword[def] identifier[register_dimension] ( identifier[self] , identifier[name] , identifier[dim_data] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[name] keyword[in] identifier[self] . identifier[_dims] :
keyword[raise] identifier[AttributeError] ((
literal[string]
literal[string]
literal[string]
literal[string] ). identifier[format] ( identifier[n] = identifier[name] ))
identifier[D] = identifier[self] . identifier[_dims] [ identifier[name] ]= identifier[create_dimension] ( identifier[name] ,
identifier[dim_data] ,** identifier[kwargs] )
keyword[return] identifier[D] | def register_dimension(self, name, dim_data, **kwargs):
"""
Registers a dimension on this cube.
.. code-block:: python
cube.register_dimension('ntime', 10000,
decription="Number of Timesteps",
lower_extent=100, upper_extent=200)
Parameters
----------
dim_data : int or :class:`~hypercube.dims.Dimension`
if an integer, this will be used to
define the global_size of the dimension
and possibly other attributes if they are
not present in kwargs.
If a Dimension, it will be updated with
any appropriate keyword arguments
description : str
The description for this dimension.
e.g. 'Number of timesteps'.
lower_extent : int
The lower extent of this dimension
within the global space
upper_extent : int
The upper extent of this dimension
within the global space
name : Dimension name
Returns
-------
:class:`~hypercube.dims.Dimension`
A hypercube Dimension
"""
if name in self._dims:
raise AttributeError("Attempted to register dimension '{n}'' as an attribute of the cube, but it already exists. Please choose a different name!".format(n=name)) # depends on [control=['if'], data=['name']]
# Create the dimension dictionary
D = self._dims[name] = create_dimension(name, dim_data, **kwargs)
return D |
def multiply(self, other):
"""Return the QuantumChannel self + other.
Args:
other (complex): a complex number.
Returns:
Kraus: the scalar multiplication other * self as a Kraus object.
Raises:
QiskitError: if other is not a valid scalar.
"""
if not isinstance(other, Number):
raise QiskitError("other is not a number")
# If the number is complex we need to convert to general
# kraus channel so we multiply via Choi representation
if isinstance(other, complex) or other < 0:
# Convert to Choi-matrix
return Kraus(Choi(self).multiply(other))
# If the number is real we can update the Kraus operators
# directly
val = np.sqrt(other)
kraus_r = None
kraus_l = [val * k for k in self._data[0]]
if self._data[1] is not None:
kraus_r = [val * k for k in self._data[1]]
return Kraus((kraus_l, kraus_r), self._input_dim, self._output_dim) | def function[multiply, parameter[self, other]]:
constant[Return the QuantumChannel self + other.
Args:
other (complex): a complex number.
Returns:
Kraus: the scalar multiplication other * self as a Kraus object.
Raises:
QiskitError: if other is not a valid scalar.
]
if <ast.UnaryOp object at 0x7da1b0512c20> begin[:]
<ast.Raise object at 0x7da1b0513730>
if <ast.BoolOp object at 0x7da1b05137f0> begin[:]
return[call[name[Kraus], parameter[call[call[name[Choi], parameter[name[self]]].multiply, parameter[name[other]]]]]]
variable[val] assign[=] call[name[np].sqrt, parameter[name[other]]]
variable[kraus_r] assign[=] constant[None]
variable[kraus_l] assign[=] <ast.ListComp object at 0x7da20c6c59f0>
if compare[call[name[self]._data][constant[1]] is_not constant[None]] begin[:]
variable[kraus_r] assign[=] <ast.ListComp object at 0x7da1b055dfc0>
return[call[name[Kraus], parameter[tuple[[<ast.Name object at 0x7da1b055e710>, <ast.Name object at 0x7da1b055e1d0>]], name[self]._input_dim, name[self]._output_dim]]] | keyword[def] identifier[multiply] ( identifier[self] , identifier[other] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[other] , identifier[Number] ):
keyword[raise] identifier[QiskitError] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[other] , identifier[complex] ) keyword[or] identifier[other] < literal[int] :
keyword[return] identifier[Kraus] ( identifier[Choi] ( identifier[self] ). identifier[multiply] ( identifier[other] ))
identifier[val] = identifier[np] . identifier[sqrt] ( identifier[other] )
identifier[kraus_r] = keyword[None]
identifier[kraus_l] =[ identifier[val] * identifier[k] keyword[for] identifier[k] keyword[in] identifier[self] . identifier[_data] [ literal[int] ]]
keyword[if] identifier[self] . identifier[_data] [ literal[int] ] keyword[is] keyword[not] keyword[None] :
identifier[kraus_r] =[ identifier[val] * identifier[k] keyword[for] identifier[k] keyword[in] identifier[self] . identifier[_data] [ literal[int] ]]
keyword[return] identifier[Kraus] (( identifier[kraus_l] , identifier[kraus_r] ), identifier[self] . identifier[_input_dim] , identifier[self] . identifier[_output_dim] ) | def multiply(self, other):
"""Return the QuantumChannel self + other.
Args:
other (complex): a complex number.
Returns:
Kraus: the scalar multiplication other * self as a Kraus object.
Raises:
QiskitError: if other is not a valid scalar.
"""
if not isinstance(other, Number):
raise QiskitError('other is not a number') # depends on [control=['if'], data=[]]
# If the number is complex we need to convert to general
# kraus channel so we multiply via Choi representation
if isinstance(other, complex) or other < 0:
# Convert to Choi-matrix
return Kraus(Choi(self).multiply(other)) # depends on [control=['if'], data=[]]
# If the number is real we can update the Kraus operators
# directly
val = np.sqrt(other)
kraus_r = None
kraus_l = [val * k for k in self._data[0]]
if self._data[1] is not None:
kraus_r = [val * k for k in self._data[1]] # depends on [control=['if'], data=[]]
return Kraus((kraus_l, kraus_r), self._input_dim, self._output_dim) |
def execute(self, example_groups):
"""Runs the specs. Returns a tuple indicating the
number of (succeses, failures, skipped)>
"""
total_successes, total_errors, total_skipped = 0, 0, 0
for group in example_groups:
runner = ExampleRunner(group, self.formatter)
successes, errors, skips = runner.run()
total_successes += successes
total_errors += errors
total_skipped += skips
return total_successes, total_errors, total_skipped | def function[execute, parameter[self, example_groups]]:
constant[Runs the specs. Returns a tuple indicating the
number of (succeses, failures, skipped)>
]
<ast.Tuple object at 0x7da1b1ff0fd0> assign[=] tuple[[<ast.Constant object at 0x7da1b1ff03d0>, <ast.Constant object at 0x7da1b1ff05e0>, <ast.Constant object at 0x7da1b1ff26b0>]]
for taget[name[group]] in starred[name[example_groups]] begin[:]
variable[runner] assign[=] call[name[ExampleRunner], parameter[name[group], name[self].formatter]]
<ast.Tuple object at 0x7da1b1ff1e40> assign[=] call[name[runner].run, parameter[]]
<ast.AugAssign object at 0x7da1b1ff16c0>
<ast.AugAssign object at 0x7da1b1ff2a10>
<ast.AugAssign object at 0x7da1b1ff0bb0>
return[tuple[[<ast.Name object at 0x7da1b1ff25c0>, <ast.Name object at 0x7da1b1ff0b20>, <ast.Name object at 0x7da1b1ff28c0>]]] | keyword[def] identifier[execute] ( identifier[self] , identifier[example_groups] ):
literal[string]
identifier[total_successes] , identifier[total_errors] , identifier[total_skipped] = literal[int] , literal[int] , literal[int]
keyword[for] identifier[group] keyword[in] identifier[example_groups] :
identifier[runner] = identifier[ExampleRunner] ( identifier[group] , identifier[self] . identifier[formatter] )
identifier[successes] , identifier[errors] , identifier[skips] = identifier[runner] . identifier[run] ()
identifier[total_successes] += identifier[successes]
identifier[total_errors] += identifier[errors]
identifier[total_skipped] += identifier[skips]
keyword[return] identifier[total_successes] , identifier[total_errors] , identifier[total_skipped] | def execute(self, example_groups):
"""Runs the specs. Returns a tuple indicating the
number of (succeses, failures, skipped)>
"""
(total_successes, total_errors, total_skipped) = (0, 0, 0)
for group in example_groups:
runner = ExampleRunner(group, self.formatter)
(successes, errors, skips) = runner.run()
total_successes += successes
total_errors += errors
total_skipped += skips # depends on [control=['for'], data=['group']]
return (total_successes, total_errors, total_skipped) |
def bisine_wave(frequency):
"""Emit two sine waves, in stereo at different octaves."""
#
# We can first our existing sine generator to generate two different
# waves.
f_hi = frequency
f_lo = frequency / 2.0
with tf.name_scope('hi'):
sine_hi = sine_wave(f_hi)
with tf.name_scope('lo'):
sine_lo = sine_wave(f_lo)
#
# Now, we have two tensors of shape [1, _samples(), 1]. By concatenating
# them along axis 2, we get a tensor of shape [1, _samples(), 2]---a
# stereo waveform.
return tf.concat([sine_lo, sine_hi], axis=2) | def function[bisine_wave, parameter[frequency]]:
constant[Emit two sine waves, in stereo at different octaves.]
variable[f_hi] assign[=] name[frequency]
variable[f_lo] assign[=] binary_operation[name[frequency] / constant[2.0]]
with call[name[tf].name_scope, parameter[constant[hi]]] begin[:]
variable[sine_hi] assign[=] call[name[sine_wave], parameter[name[f_hi]]]
with call[name[tf].name_scope, parameter[constant[lo]]] begin[:]
variable[sine_lo] assign[=] call[name[sine_wave], parameter[name[f_lo]]]
return[call[name[tf].concat, parameter[list[[<ast.Name object at 0x7da18dc05540>, <ast.Name object at 0x7da18dc07970>]]]]] | keyword[def] identifier[bisine_wave] ( identifier[frequency] ):
literal[string]
identifier[f_hi] = identifier[frequency]
identifier[f_lo] = identifier[frequency] / literal[int]
keyword[with] identifier[tf] . identifier[name_scope] ( literal[string] ):
identifier[sine_hi] = identifier[sine_wave] ( identifier[f_hi] )
keyword[with] identifier[tf] . identifier[name_scope] ( literal[string] ):
identifier[sine_lo] = identifier[sine_wave] ( identifier[f_lo] )
keyword[return] identifier[tf] . identifier[concat] ([ identifier[sine_lo] , identifier[sine_hi] ], identifier[axis] = literal[int] ) | def bisine_wave(frequency):
"""Emit two sine waves, in stereo at different octaves."""
#
# We can first our existing sine generator to generate two different
# waves.
f_hi = frequency
f_lo = frequency / 2.0
with tf.name_scope('hi'):
sine_hi = sine_wave(f_hi) # depends on [control=['with'], data=[]]
with tf.name_scope('lo'):
sine_lo = sine_wave(f_lo) # depends on [control=['with'], data=[]]
#
# Now, we have two tensors of shape [1, _samples(), 1]. By concatenating
# them along axis 2, we get a tensor of shape [1, _samples(), 2]---a
# stereo waveform.
return tf.concat([sine_lo, sine_hi], axis=2) |
def fetch(self):
"""
Fetch a FeedbackInstance
:returns: Fetched FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return FeedbackInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
call_sid=self._solution['call_sid'],
) | def function[fetch, parameter[self]]:
constant[
Fetch a FeedbackInstance
:returns: Fetched FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
]
variable[params] assign[=] call[name[values].of, parameter[dictionary[[], []]]]
variable[payload] assign[=] call[name[self]._version.fetch, parameter[constant[GET], name[self]._uri]]
return[call[name[FeedbackInstance], parameter[name[self]._version, name[payload]]]] | keyword[def] identifier[fetch] ( identifier[self] ):
literal[string]
identifier[params] = identifier[values] . identifier[of] ({})
identifier[payload] = identifier[self] . identifier[_version] . identifier[fetch] (
literal[string] ,
identifier[self] . identifier[_uri] ,
identifier[params] = identifier[params] ,
)
keyword[return] identifier[FeedbackInstance] (
identifier[self] . identifier[_version] ,
identifier[payload] ,
identifier[account_sid] = identifier[self] . identifier[_solution] [ literal[string] ],
identifier[call_sid] = identifier[self] . identifier[_solution] [ literal[string] ],
) | def fetch(self):
"""
Fetch a FeedbackInstance
:returns: Fetched FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
"""
params = values.of({})
payload = self._version.fetch('GET', self._uri, params=params)
return FeedbackInstance(self._version, payload, account_sid=self._solution['account_sid'], call_sid=self._solution['call_sid']) |
def _clean_pending_exits(self):
"""
Remove those pending exits if:
a) they are the return exits of non-returning SimProcedures
b) they are the return exits of non-returning syscalls
c) they are the return exits of non-returning functions
:return: True if any pending exits are removed, False otherwise
:rtype: bool
"""
pending_exits_to_remove = [ ]
for block_id, pe in self._pending_jobs.items():
if pe.returning_source is None:
# The original call failed. This pending exit must be followed.
continue
func = self.kb.functions.function(pe.returning_source)
if func is None:
# Why does it happen?
l.warning("An expected function at %s is not found. Please report it to Fish.",
hex(pe.returning_source) if pe.returning_source is not None else 'None')
continue
if func.returning is False:
# Oops, it's not returning
# Remove this pending exit
pending_exits_to_remove.append(block_id)
# We want to mark that call as not returning in the current function
current_function_addr = self._block_id_current_func_addr(block_id)
if current_function_addr is not None:
current_function = self.kb.functions.function(current_function_addr)
if current_function is not None:
call_site_addr = self._block_id_addr(pe.src_block_id)
current_function._call_sites[call_site_addr] = (func.addr, None)
else:
l.warning('An expected function at %#x is not found. Please report it to Fish.',
current_function_addr
)
for block_id in pending_exits_to_remove:
l.debug('Removing a pending exit to %#x since the target function %#x does not return',
self._block_id_addr(block_id),
self._pending_jobs[block_id].returning_source,
)
to_remove = self._pending_jobs[block_id]
self._deregister_analysis_job(to_remove.caller_func_addr, to_remove)
del self._pending_jobs[block_id]
if pending_exits_to_remove:
return True
return False | def function[_clean_pending_exits, parameter[self]]:
constant[
Remove those pending exits if:
a) they are the return exits of non-returning SimProcedures
b) they are the return exits of non-returning syscalls
c) they are the return exits of non-returning functions
:return: True if any pending exits are removed, False otherwise
:rtype: bool
]
variable[pending_exits_to_remove] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18bc72fe0>, <ast.Name object at 0x7da18bc72b90>]]] in starred[call[name[self]._pending_jobs.items, parameter[]]] begin[:]
if compare[name[pe].returning_source is constant[None]] begin[:]
continue
variable[func] assign[=] call[name[self].kb.functions.function, parameter[name[pe].returning_source]]
if compare[name[func] is constant[None]] begin[:]
call[name[l].warning, parameter[constant[An expected function at %s is not found. Please report it to Fish.], <ast.IfExp object at 0x7da18bc704f0>]]
continue
if compare[name[func].returning is constant[False]] begin[:]
call[name[pending_exits_to_remove].append, parameter[name[block_id]]]
variable[current_function_addr] assign[=] call[name[self]._block_id_current_func_addr, parameter[name[block_id]]]
if compare[name[current_function_addr] is_not constant[None]] begin[:]
variable[current_function] assign[=] call[name[self].kb.functions.function, parameter[name[current_function_addr]]]
if compare[name[current_function] is_not constant[None]] begin[:]
variable[call_site_addr] assign[=] call[name[self]._block_id_addr, parameter[name[pe].src_block_id]]
call[name[current_function]._call_sites][name[call_site_addr]] assign[=] tuple[[<ast.Attribute object at 0x7da18bc73880>, <ast.Constant object at 0x7da18bc71de0>]]
for taget[name[block_id]] in starred[name[pending_exits_to_remove]] begin[:]
call[name[l].debug, parameter[constant[Removing a pending exit to %#x since the target function %#x does not return], call[name[self]._block_id_addr, parameter[name[block_id]]], call[name[self]._pending_jobs][name[block_id]].returning_source]]
variable[to_remove] assign[=] call[name[self]._pending_jobs][name[block_id]]
call[name[self]._deregister_analysis_job, parameter[name[to_remove].caller_func_addr, name[to_remove]]]
<ast.Delete object at 0x7da1b1c33e80>
if name[pending_exits_to_remove] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[_clean_pending_exits] ( identifier[self] ):
literal[string]
identifier[pending_exits_to_remove] =[]
keyword[for] identifier[block_id] , identifier[pe] keyword[in] identifier[self] . identifier[_pending_jobs] . identifier[items] ():
keyword[if] identifier[pe] . identifier[returning_source] keyword[is] keyword[None] :
keyword[continue]
identifier[func] = identifier[self] . identifier[kb] . identifier[functions] . identifier[function] ( identifier[pe] . identifier[returning_source] )
keyword[if] identifier[func] keyword[is] keyword[None] :
identifier[l] . identifier[warning] ( literal[string] ,
identifier[hex] ( identifier[pe] . identifier[returning_source] ) keyword[if] identifier[pe] . identifier[returning_source] keyword[is] keyword[not] keyword[None] keyword[else] literal[string] )
keyword[continue]
keyword[if] identifier[func] . identifier[returning] keyword[is] keyword[False] :
identifier[pending_exits_to_remove] . identifier[append] ( identifier[block_id] )
identifier[current_function_addr] = identifier[self] . identifier[_block_id_current_func_addr] ( identifier[block_id] )
keyword[if] identifier[current_function_addr] keyword[is] keyword[not] keyword[None] :
identifier[current_function] = identifier[self] . identifier[kb] . identifier[functions] . identifier[function] ( identifier[current_function_addr] )
keyword[if] identifier[current_function] keyword[is] keyword[not] keyword[None] :
identifier[call_site_addr] = identifier[self] . identifier[_block_id_addr] ( identifier[pe] . identifier[src_block_id] )
identifier[current_function] . identifier[_call_sites] [ identifier[call_site_addr] ]=( identifier[func] . identifier[addr] , keyword[None] )
keyword[else] :
identifier[l] . identifier[warning] ( literal[string] ,
identifier[current_function_addr]
)
keyword[for] identifier[block_id] keyword[in] identifier[pending_exits_to_remove] :
identifier[l] . identifier[debug] ( literal[string] ,
identifier[self] . identifier[_block_id_addr] ( identifier[block_id] ),
identifier[self] . identifier[_pending_jobs] [ identifier[block_id] ]. identifier[returning_source] ,
)
identifier[to_remove] = identifier[self] . identifier[_pending_jobs] [ identifier[block_id] ]
identifier[self] . identifier[_deregister_analysis_job] ( identifier[to_remove] . identifier[caller_func_addr] , identifier[to_remove] )
keyword[del] identifier[self] . identifier[_pending_jobs] [ identifier[block_id] ]
keyword[if] identifier[pending_exits_to_remove] :
keyword[return] keyword[True]
keyword[return] keyword[False] | def _clean_pending_exits(self):
"""
Remove those pending exits if:
a) they are the return exits of non-returning SimProcedures
b) they are the return exits of non-returning syscalls
c) they are the return exits of non-returning functions
:return: True if any pending exits are removed, False otherwise
:rtype: bool
"""
pending_exits_to_remove = []
for (block_id, pe) in self._pending_jobs.items():
if pe.returning_source is None:
# The original call failed. This pending exit must be followed.
continue # depends on [control=['if'], data=[]]
func = self.kb.functions.function(pe.returning_source)
if func is None:
# Why does it happen?
l.warning('An expected function at %s is not found. Please report it to Fish.', hex(pe.returning_source) if pe.returning_source is not None else 'None')
continue # depends on [control=['if'], data=[]]
if func.returning is False:
# Oops, it's not returning
# Remove this pending exit
pending_exits_to_remove.append(block_id)
# We want to mark that call as not returning in the current function
current_function_addr = self._block_id_current_func_addr(block_id)
if current_function_addr is not None:
current_function = self.kb.functions.function(current_function_addr)
if current_function is not None:
call_site_addr = self._block_id_addr(pe.src_block_id)
current_function._call_sites[call_site_addr] = (func.addr, None) # depends on [control=['if'], data=['current_function']]
else:
l.warning('An expected function at %#x is not found. Please report it to Fish.', current_function_addr) # depends on [control=['if'], data=['current_function_addr']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
for block_id in pending_exits_to_remove:
l.debug('Removing a pending exit to %#x since the target function %#x does not return', self._block_id_addr(block_id), self._pending_jobs[block_id].returning_source)
to_remove = self._pending_jobs[block_id]
self._deregister_analysis_job(to_remove.caller_func_addr, to_remove)
del self._pending_jobs[block_id] # depends on [control=['for'], data=['block_id']]
if pending_exits_to_remove:
return True # depends on [control=['if'], data=[]]
return False |
def update_record(self, name, recordid, content, username, password):
''' Update record '''
# headers = {'key': username, 'secret': password}
req = requests.put(self.api_server + '/api/' + name + '/' +
str(recordid), data=json.dumps(content),
auth=(username, password))
return req | def function[update_record, parameter[self, name, recordid, content, username, password]]:
constant[ Update record ]
variable[req] assign[=] call[name[requests].put, parameter[binary_operation[binary_operation[binary_operation[binary_operation[name[self].api_server + constant[/api/]] + name[name]] + constant[/]] + call[name[str], parameter[name[recordid]]]]]]
return[name[req]] | keyword[def] identifier[update_record] ( identifier[self] , identifier[name] , identifier[recordid] , identifier[content] , identifier[username] , identifier[password] ):
literal[string]
identifier[req] = identifier[requests] . identifier[put] ( identifier[self] . identifier[api_server] + literal[string] + identifier[name] + literal[string] +
identifier[str] ( identifier[recordid] ), identifier[data] = identifier[json] . identifier[dumps] ( identifier[content] ),
identifier[auth] =( identifier[username] , identifier[password] ))
keyword[return] identifier[req] | def update_record(self, name, recordid, content, username, password):
""" Update record """
# headers = {'key': username, 'secret': password}
req = requests.put(self.api_server + '/api/' + name + '/' + str(recordid), data=json.dumps(content), auth=(username, password))
return req |
def _generateChildrenR(self, target=None):
"""Generator which recursively yields all AXChildren of the object."""
if target is None:
target = self
try:
children = target.AXChildren
except _a11y.Error:
return
if children:
for child in children:
yield child
for c in self._generateChildrenR(child):
yield c | def function[_generateChildrenR, parameter[self, target]]:
constant[Generator which recursively yields all AXChildren of the object.]
if compare[name[target] is constant[None]] begin[:]
variable[target] assign[=] name[self]
<ast.Try object at 0x7da18f09e440>
if name[children] begin[:]
for taget[name[child]] in starred[name[children]] begin[:]
<ast.Yield object at 0x7da18f09cd90>
for taget[name[c]] in starred[call[name[self]._generateChildrenR, parameter[name[child]]]] begin[:]
<ast.Yield object at 0x7da18f09cd60> | keyword[def] identifier[_generateChildrenR] ( identifier[self] , identifier[target] = keyword[None] ):
literal[string]
keyword[if] identifier[target] keyword[is] keyword[None] :
identifier[target] = identifier[self]
keyword[try] :
identifier[children] = identifier[target] . identifier[AXChildren]
keyword[except] identifier[_a11y] . identifier[Error] :
keyword[return]
keyword[if] identifier[children] :
keyword[for] identifier[child] keyword[in] identifier[children] :
keyword[yield] identifier[child]
keyword[for] identifier[c] keyword[in] identifier[self] . identifier[_generateChildrenR] ( identifier[child] ):
keyword[yield] identifier[c] | def _generateChildrenR(self, target=None):
"""Generator which recursively yields all AXChildren of the object."""
if target is None:
target = self # depends on [control=['if'], data=['target']]
try:
children = target.AXChildren # depends on [control=['try'], data=[]]
except _a11y.Error:
return # depends on [control=['except'], data=[]]
if children:
for child in children:
yield child
for c in self._generateChildrenR(child):
yield c # depends on [control=['for'], data=['c']] # depends on [control=['for'], data=['child']] # depends on [control=['if'], data=[]] |
def executeBatch(cursor, sql,
regex=r"(?mx) ([^';]* (?:'[^']*'[^';]*)*)",
comment_regex=r"(?mx) (?:^\s*$)|(?:--.*$)"):
"""
Takes a SQL file and executes it as many separate statements.
TODO: replace regexes with something easier to grok and extend.
"""
# First, strip comments
sql = "\n".join([x.strip().replace("%", "%%") for x in re.split(comment_regex, sql) if x.strip()])
# Stored procedures don't work with the above regex because many of them are
# made up multiple sql statements each delimited with a single ;
# where the regexes assume each statement delimited by a ; is a complete
# statement to send to mysql and execute.
#
# Here i'm simply checking for the delimiter statements (which seem to be
# mysql-only) and then using them as markers to start accumulating statements.
# So the first delimiter is the signal to start accumulating
# and the second delimiter is the signal to combine them into
# single sql compound statement and send it to mysql.
in_proc = False
statements = []
for st in re.split(regex, sql)[1:][::2]:
if st.strip().lower().startswith("delimiter"):
in_proc = not in_proc
if statements and not in_proc:
procedure = ";".join(statements)
statements = []
cursor.execute(procedure)
# skip the delimiter line
continue
if in_proc:
statements.append(st)
else:
cursor.execute(st) | def function[executeBatch, parameter[cursor, sql, regex, comment_regex]]:
constant[
Takes a SQL file and executes it as many separate statements.
TODO: replace regexes with something easier to grok and extend.
]
variable[sql] assign[=] call[constant[
].join, parameter[<ast.ListComp object at 0x7da1b164bd90>]]
variable[in_proc] assign[=] constant[False]
variable[statements] assign[=] list[[]]
for taget[name[st]] in starred[call[call[call[name[re].split, parameter[name[regex], name[sql]]]][<ast.Slice object at 0x7da1b1649120>]][<ast.Slice object at 0x7da1b164a7d0>]] begin[:]
if call[call[call[name[st].strip, parameter[]].lower, parameter[]].startswith, parameter[constant[delimiter]]] begin[:]
variable[in_proc] assign[=] <ast.UnaryOp object at 0x7da1b1455b40>
if <ast.BoolOp object at 0x7da1b14391b0> begin[:]
variable[procedure] assign[=] call[constant[;].join, parameter[name[statements]]]
variable[statements] assign[=] list[[]]
call[name[cursor].execute, parameter[name[procedure]]]
continue
if name[in_proc] begin[:]
call[name[statements].append, parameter[name[st]]] | keyword[def] identifier[executeBatch] ( identifier[cursor] , identifier[sql] ,
identifier[regex] = literal[string] ,
identifier[comment_regex] = literal[string] ):
literal[string]
identifier[sql] = literal[string] . identifier[join] ([ identifier[x] . identifier[strip] (). identifier[replace] ( literal[string] , literal[string] ) keyword[for] identifier[x] keyword[in] identifier[re] . identifier[split] ( identifier[comment_regex] , identifier[sql] ) keyword[if] identifier[x] . identifier[strip] ()])
identifier[in_proc] = keyword[False]
identifier[statements] =[]
keyword[for] identifier[st] keyword[in] identifier[re] . identifier[split] ( identifier[regex] , identifier[sql] )[ literal[int] :][:: literal[int] ]:
keyword[if] identifier[st] . identifier[strip] (). identifier[lower] (). identifier[startswith] ( literal[string] ):
identifier[in_proc] = keyword[not] identifier[in_proc]
keyword[if] identifier[statements] keyword[and] keyword[not] identifier[in_proc] :
identifier[procedure] = literal[string] . identifier[join] ( identifier[statements] )
identifier[statements] =[]
identifier[cursor] . identifier[execute] ( identifier[procedure] )
keyword[continue]
keyword[if] identifier[in_proc] :
identifier[statements] . identifier[append] ( identifier[st] )
keyword[else] :
identifier[cursor] . identifier[execute] ( identifier[st] ) | def executeBatch(cursor, sql, regex="(?mx) ([^';]* (?:'[^']*'[^';]*)*)", comment_regex='(?mx) (?:^\\s*$)|(?:--.*$)'):
"""
Takes a SQL file and executes it as many separate statements.
TODO: replace regexes with something easier to grok and extend.
"""
# First, strip comments
sql = '\n'.join([x.strip().replace('%', '%%') for x in re.split(comment_regex, sql) if x.strip()])
# Stored procedures don't work with the above regex because many of them are
# made up multiple sql statements each delimited with a single ;
# where the regexes assume each statement delimited by a ; is a complete
# statement to send to mysql and execute.
#
# Here i'm simply checking for the delimiter statements (which seem to be
# mysql-only) and then using them as markers to start accumulating statements.
# So the first delimiter is the signal to start accumulating
# and the second delimiter is the signal to combine them into
# single sql compound statement and send it to mysql.
in_proc = False
statements = []
for st in re.split(regex, sql)[1:][::2]:
if st.strip().lower().startswith('delimiter'):
in_proc = not in_proc
if statements and (not in_proc):
procedure = ';'.join(statements)
statements = []
cursor.execute(procedure) # depends on [control=['if'], data=[]]
# skip the delimiter line
continue # depends on [control=['if'], data=[]]
if in_proc:
statements.append(st) # depends on [control=['if'], data=[]]
else:
cursor.execute(st) # depends on [control=['for'], data=['st']] |
def get_active_length(self):
"""
Return the maximum active length (i.e., without trailing silence) among
the pianorolls of all tracks. The unit is time step.
Returns
-------
active_length : int
The maximum active length (i.e., without trailing silence) among the
pianorolls of all tracks. The unit is time step.
"""
active_length = 0
for track in self.tracks:
now_length = track.get_active_length()
if active_length < track.get_active_length():
active_length = now_length
return active_length | def function[get_active_length, parameter[self]]:
constant[
Return the maximum active length (i.e., without trailing silence) among
the pianorolls of all tracks. The unit is time step.
Returns
-------
active_length : int
The maximum active length (i.e., without trailing silence) among the
pianorolls of all tracks. The unit is time step.
]
variable[active_length] assign[=] constant[0]
for taget[name[track]] in starred[name[self].tracks] begin[:]
variable[now_length] assign[=] call[name[track].get_active_length, parameter[]]
if compare[name[active_length] less[<] call[name[track].get_active_length, parameter[]]] begin[:]
variable[active_length] assign[=] name[now_length]
return[name[active_length]] | keyword[def] identifier[get_active_length] ( identifier[self] ):
literal[string]
identifier[active_length] = literal[int]
keyword[for] identifier[track] keyword[in] identifier[self] . identifier[tracks] :
identifier[now_length] = identifier[track] . identifier[get_active_length] ()
keyword[if] identifier[active_length] < identifier[track] . identifier[get_active_length] ():
identifier[active_length] = identifier[now_length]
keyword[return] identifier[active_length] | def get_active_length(self):
"""
Return the maximum active length (i.e., without trailing silence) among
the pianorolls of all tracks. The unit is time step.
Returns
-------
active_length : int
The maximum active length (i.e., without trailing silence) among the
pianorolls of all tracks. The unit is time step.
"""
active_length = 0
for track in self.tracks:
now_length = track.get_active_length()
if active_length < track.get_active_length():
active_length = now_length # depends on [control=['if'], data=['active_length']] # depends on [control=['for'], data=['track']]
return active_length |
def on_start_scene(self, event: StartScene, signal: Callable[[Any], None]):
"""
Start a new scene. The current scene pauses.
"""
self.pause_scene()
self.start_scene(event.new_scene, event.kwargs) | def function[on_start_scene, parameter[self, event, signal]]:
constant[
Start a new scene. The current scene pauses.
]
call[name[self].pause_scene, parameter[]]
call[name[self].start_scene, parameter[name[event].new_scene, name[event].kwargs]] | keyword[def] identifier[on_start_scene] ( identifier[self] , identifier[event] : identifier[StartScene] , identifier[signal] : identifier[Callable] [[ identifier[Any] ], keyword[None] ]):
literal[string]
identifier[self] . identifier[pause_scene] ()
identifier[self] . identifier[start_scene] ( identifier[event] . identifier[new_scene] , identifier[event] . identifier[kwargs] ) | def on_start_scene(self, event: StartScene, signal: Callable[[Any], None]):
"""
Start a new scene. The current scene pauses.
"""
self.pause_scene()
self.start_scene(event.new_scene, event.kwargs) |
def create_assignment( # pylint: disable=too-many-arguments
self,
name,
short_name,
weight,
max_points,
due_date_str,
gradebook_id='',
**kwargs
):
"""Create a new assignment.
Create a new assignment. By default, assignments are created
under the `Uncategorized` category.
Args:
name (str): descriptive assignment name,
i.e. ``new NUMERIC SIMPLE ASSIGNMENT``
short_name (str): short name of assignment, one word of
no more than 5 characters, i.e. ``SAnew``
weight (str): floating point value for weight, i.e. ``1.0``
max_points (str): floating point value for maximum point
total, i.e. ``100.0``
due_date_str (str): due date as string in ``mm-dd-yyyy``
format, i.e. ``08-21-2011``
gradebook_id (str): unique identifier for gradebook, i.e. ``2314``
kwargs (dict): dictionary containing additional parameters,
i.e. ``graderVisible``, ``totalAverage``, and ``categoryId``.
For example:
.. code-block:: python
{
u'graderVisible': True,
u'totalAverage': None
u'categoryId': 1007964,
}
Raises:
requests.RequestException: Exception connection error
ValueError: Unable to decode response content
Returns:
dict: dictionary containing ``data``, ``status`` and ``message``
for example:
.. code-block:: python
{
u'data':
{
u'assignmentId': 18490492,
u'categoryId': 1293820,
u'description': u'',
u'dueDate': 1312171200000,
u'dueDateString': u'08-01-2011',
u'gradebookId': 1293808,
u'graderVisible': False,
u'gradingSchemeId': 18490493,
u'gradingSchemeType': u'NUMERIC',
u'isComposite': False,
u'isHomework': False,
u'maxPointsTotal': 100.0,
u'name': u'new NUMERIC SIMPLE ASSIGNMENT',
u'numStudentGradesToBeApproved': 0,
u'numStudentsToBeGraded': 614,
u'shortName': u'SAnew',
u'userDeleted': False,
u'weight': 1.0
},
u'message': u'assignment is created successfully',
u'status': 1
}
"""
data = {
'name': name,
'shortName': short_name,
'weight': weight,
'graderVisible': False,
'gradingSchemeType': 'NUMERIC',
'gradebookId': gradebook_id or self.gradebook_id,
'maxPointsTotal': max_points,
'dueDateString': due_date_str
}
data.update(kwargs)
log.info("Creating assignment %s", name)
response = self.post('assignment', data)
log.debug('Received response data: %s', response)
return response | def function[create_assignment, parameter[self, name, short_name, weight, max_points, due_date_str, gradebook_id]]:
constant[Create a new assignment.
Create a new assignment. By default, assignments are created
under the `Uncategorized` category.
Args:
name (str): descriptive assignment name,
i.e. ``new NUMERIC SIMPLE ASSIGNMENT``
short_name (str): short name of assignment, one word of
no more than 5 characters, i.e. ``SAnew``
weight (str): floating point value for weight, i.e. ``1.0``
max_points (str): floating point value for maximum point
total, i.e. ``100.0``
due_date_str (str): due date as string in ``mm-dd-yyyy``
format, i.e. ``08-21-2011``
gradebook_id (str): unique identifier for gradebook, i.e. ``2314``
kwargs (dict): dictionary containing additional parameters,
i.e. ``graderVisible``, ``totalAverage``, and ``categoryId``.
For example:
.. code-block:: python
{
u'graderVisible': True,
u'totalAverage': None
u'categoryId': 1007964,
}
Raises:
requests.RequestException: Exception connection error
ValueError: Unable to decode response content
Returns:
dict: dictionary containing ``data``, ``status`` and ``message``
for example:
.. code-block:: python
{
u'data':
{
u'assignmentId': 18490492,
u'categoryId': 1293820,
u'description': u'',
u'dueDate': 1312171200000,
u'dueDateString': u'08-01-2011',
u'gradebookId': 1293808,
u'graderVisible': False,
u'gradingSchemeId': 18490493,
u'gradingSchemeType': u'NUMERIC',
u'isComposite': False,
u'isHomework': False,
u'maxPointsTotal': 100.0,
u'name': u'new NUMERIC SIMPLE ASSIGNMENT',
u'numStudentGradesToBeApproved': 0,
u'numStudentsToBeGraded': 614,
u'shortName': u'SAnew',
u'userDeleted': False,
u'weight': 1.0
},
u'message': u'assignment is created successfully',
u'status': 1
}
]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b05daa70>, <ast.Constant object at 0x7da1b05da1d0>, <ast.Constant object at 0x7da1b05d9a50>, <ast.Constant object at 0x7da1b05d8310>, <ast.Constant object at 0x7da1b05da770>, <ast.Constant object at 0x7da1b05dafb0>, <ast.Constant object at 0x7da1b05da260>, <ast.Constant object at 0x7da1b05d8fd0>], [<ast.Name object at 0x7da1b05dae00>, <ast.Name object at 0x7da1b05db9d0>, <ast.Name object at 0x7da1b05d9630>, <ast.Constant object at 0x7da1b05dae90>, <ast.Constant object at 0x7da1b05dab30>, <ast.BoolOp object at 0x7da1b05d8820>, <ast.Name object at 0x7da1b05d8070>, <ast.Name object at 0x7da1b05d8ca0>]]
call[name[data].update, parameter[name[kwargs]]]
call[name[log].info, parameter[constant[Creating assignment %s], name[name]]]
variable[response] assign[=] call[name[self].post, parameter[constant[assignment], name[data]]]
call[name[log].debug, parameter[constant[Received response data: %s], name[response]]]
return[name[response]] | keyword[def] identifier[create_assignment] (
identifier[self] ,
identifier[name] ,
identifier[short_name] ,
identifier[weight] ,
identifier[max_points] ,
identifier[due_date_str] ,
identifier[gradebook_id] = literal[string] ,
** identifier[kwargs]
):
literal[string]
identifier[data] ={
literal[string] : identifier[name] ,
literal[string] : identifier[short_name] ,
literal[string] : identifier[weight] ,
literal[string] : keyword[False] ,
literal[string] : literal[string] ,
literal[string] : identifier[gradebook_id] keyword[or] identifier[self] . identifier[gradebook_id] ,
literal[string] : identifier[max_points] ,
literal[string] : identifier[due_date_str]
}
identifier[data] . identifier[update] ( identifier[kwargs] )
identifier[log] . identifier[info] ( literal[string] , identifier[name] )
identifier[response] = identifier[self] . identifier[post] ( literal[string] , identifier[data] )
identifier[log] . identifier[debug] ( literal[string] , identifier[response] )
keyword[return] identifier[response] | def create_assignment(self, name, short_name, weight, max_points, due_date_str, gradebook_id='', **kwargs): # pylint: disable=too-many-arguments
"Create a new assignment.\n\n Create a new assignment. By default, assignments are created\n under the `Uncategorized` category.\n\n Args:\n name (str): descriptive assignment name,\n i.e. ``new NUMERIC SIMPLE ASSIGNMENT``\n short_name (str): short name of assignment, one word of\n no more than 5 characters, i.e. ``SAnew``\n weight (str): floating point value for weight, i.e. ``1.0``\n max_points (str): floating point value for maximum point\n total, i.e. ``100.0``\n due_date_str (str): due date as string in ``mm-dd-yyyy``\n format, i.e. ``08-21-2011``\n gradebook_id (str): unique identifier for gradebook, i.e. ``2314``\n kwargs (dict): dictionary containing additional parameters,\n i.e. ``graderVisible``, ``totalAverage``, and ``categoryId``.\n\n For example:\n\n .. code-block:: python\n\n {\n u'graderVisible': True,\n u'totalAverage': None\n u'categoryId': 1007964,\n }\n\n Raises:\n requests.RequestException: Exception connection error\n ValueError: Unable to decode response content\n\n Returns:\n dict: dictionary containing ``data``, ``status`` and ``message``\n for example:\n\n .. code-block:: python\n\n {\n u'data':\n {\n u'assignmentId': 18490492,\n u'categoryId': 1293820,\n u'description': u'',\n u'dueDate': 1312171200000,\n u'dueDateString': u'08-01-2011',\n u'gradebookId': 1293808,\n u'graderVisible': False,\n u'gradingSchemeId': 18490493,\n u'gradingSchemeType': u'NUMERIC',\n u'isComposite': False,\n u'isHomework': False,\n u'maxPointsTotal': 100.0,\n u'name': u'new NUMERIC SIMPLE ASSIGNMENT',\n u'numStudentGradesToBeApproved': 0,\n u'numStudentsToBeGraded': 614,\n u'shortName': u'SAnew',\n u'userDeleted': False,\n u'weight': 1.0\n },\n u'message': u'assignment is created successfully',\n u'status': 1\n }\n\n "
data = {'name': name, 'shortName': short_name, 'weight': weight, 'graderVisible': False, 'gradingSchemeType': 'NUMERIC', 'gradebookId': gradebook_id or self.gradebook_id, 'maxPointsTotal': max_points, 'dueDateString': due_date_str}
data.update(kwargs)
log.info('Creating assignment %s', name)
response = self.post('assignment', data)
log.debug('Received response data: %s', response)
return response |
def get_output_dict(stack):
"""Returns a dict of key/values for the outputs for a given CF stack.
Args:
stack (dict): The stack object to get
outputs from.
Returns:
dict: A dictionary with key/values for each output on the stack.
"""
outputs = {}
if 'Outputs' not in stack:
return outputs
for output in stack['Outputs']:
logger.debug(" %s %s: %s", stack['StackName'], output['OutputKey'],
output['OutputValue'])
outputs[output['OutputKey']] = output['OutputValue']
return outputs | def function[get_output_dict, parameter[stack]]:
constant[Returns a dict of key/values for the outputs for a given CF stack.
Args:
stack (dict): The stack object to get
outputs from.
Returns:
dict: A dictionary with key/values for each output on the stack.
]
variable[outputs] assign[=] dictionary[[], []]
if compare[constant[Outputs] <ast.NotIn object at 0x7da2590d7190> name[stack]] begin[:]
return[name[outputs]]
for taget[name[output]] in starred[call[name[stack]][constant[Outputs]]] begin[:]
call[name[logger].debug, parameter[constant[ %s %s: %s], call[name[stack]][constant[StackName]], call[name[output]][constant[OutputKey]], call[name[output]][constant[OutputValue]]]]
call[name[outputs]][call[name[output]][constant[OutputKey]]] assign[=] call[name[output]][constant[OutputValue]]
return[name[outputs]] | keyword[def] identifier[get_output_dict] ( identifier[stack] ):
literal[string]
identifier[outputs] ={}
keyword[if] literal[string] keyword[not] keyword[in] identifier[stack] :
keyword[return] identifier[outputs]
keyword[for] identifier[output] keyword[in] identifier[stack] [ literal[string] ]:
identifier[logger] . identifier[debug] ( literal[string] , identifier[stack] [ literal[string] ], identifier[output] [ literal[string] ],
identifier[output] [ literal[string] ])
identifier[outputs] [ identifier[output] [ literal[string] ]]= identifier[output] [ literal[string] ]
keyword[return] identifier[outputs] | def get_output_dict(stack):
"""Returns a dict of key/values for the outputs for a given CF stack.
Args:
stack (dict): The stack object to get
outputs from.
Returns:
dict: A dictionary with key/values for each output on the stack.
"""
outputs = {}
if 'Outputs' not in stack:
return outputs # depends on [control=['if'], data=[]]
for output in stack['Outputs']:
logger.debug(' %s %s: %s', stack['StackName'], output['OutputKey'], output['OutputValue'])
outputs[output['OutputKey']] = output['OutputValue'] # depends on [control=['for'], data=['output']]
return outputs |
def restore_snapshot(self, context, snapshot_name):
"""
Restores virtual machine from a snapshot
:param context: resource context of the vCenterShell
:type context: models.QualiDriverModels.ResourceCommandContext
:param snapshot_name: snapshot name to save to
:type snapshot_name: str
:return:
"""
resource_details = self._parse_remote_model(context)
self.command_wrapper.execute_command_with_connection(context,
self.snapshot_restorer.restore_snapshot,
resource_details.vm_uuid,
resource_details.fullname,
snapshot_name) | def function[restore_snapshot, parameter[self, context, snapshot_name]]:
constant[
Restores virtual machine from a snapshot
:param context: resource context of the vCenterShell
:type context: models.QualiDriverModels.ResourceCommandContext
:param snapshot_name: snapshot name to save to
:type snapshot_name: str
:return:
]
variable[resource_details] assign[=] call[name[self]._parse_remote_model, parameter[name[context]]]
call[name[self].command_wrapper.execute_command_with_connection, parameter[name[context], name[self].snapshot_restorer.restore_snapshot, name[resource_details].vm_uuid, name[resource_details].fullname, name[snapshot_name]]] | keyword[def] identifier[restore_snapshot] ( identifier[self] , identifier[context] , identifier[snapshot_name] ):
literal[string]
identifier[resource_details] = identifier[self] . identifier[_parse_remote_model] ( identifier[context] )
identifier[self] . identifier[command_wrapper] . identifier[execute_command_with_connection] ( identifier[context] ,
identifier[self] . identifier[snapshot_restorer] . identifier[restore_snapshot] ,
identifier[resource_details] . identifier[vm_uuid] ,
identifier[resource_details] . identifier[fullname] ,
identifier[snapshot_name] ) | def restore_snapshot(self, context, snapshot_name):
"""
Restores virtual machine from a snapshot
:param context: resource context of the vCenterShell
:type context: models.QualiDriverModels.ResourceCommandContext
:param snapshot_name: snapshot name to save to
:type snapshot_name: str
:return:
"""
resource_details = self._parse_remote_model(context)
self.command_wrapper.execute_command_with_connection(context, self.snapshot_restorer.restore_snapshot, resource_details.vm_uuid, resource_details.fullname, snapshot_name) |
def process_der(self, data, name):
"""
DER processing
:param data:
:param name:
:return:
"""
from cryptography.x509.base import load_der_x509_certificate
try:
x509 = load_der_x509_certificate(data, self.get_backend())
self.num_der_certs += 1
return self.process_x509(x509, name=name, pem=False, source='der-cert')
except Exception as e:
logger.debug('DER processing failed: %s : %s' % (name, e))
self.trace_logger.log(e) | def function[process_der, parameter[self, data, name]]:
constant[
DER processing
:param data:
:param name:
:return:
]
from relative_module[cryptography.x509.base] import module[load_der_x509_certificate]
<ast.Try object at 0x7da20c6c5cc0> | keyword[def] identifier[process_der] ( identifier[self] , identifier[data] , identifier[name] ):
literal[string]
keyword[from] identifier[cryptography] . identifier[x509] . identifier[base] keyword[import] identifier[load_der_x509_certificate]
keyword[try] :
identifier[x509] = identifier[load_der_x509_certificate] ( identifier[data] , identifier[self] . identifier[get_backend] ())
identifier[self] . identifier[num_der_certs] += literal[int]
keyword[return] identifier[self] . identifier[process_x509] ( identifier[x509] , identifier[name] = identifier[name] , identifier[pem] = keyword[False] , identifier[source] = literal[string] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[debug] ( literal[string] %( identifier[name] , identifier[e] ))
identifier[self] . identifier[trace_logger] . identifier[log] ( identifier[e] ) | def process_der(self, data, name):
"""
DER processing
:param data:
:param name:
:return:
"""
from cryptography.x509.base import load_der_x509_certificate
try:
x509 = load_der_x509_certificate(data, self.get_backend())
self.num_der_certs += 1
return self.process_x509(x509, name=name, pem=False, source='der-cert') # depends on [control=['try'], data=[]]
except Exception as e:
logger.debug('DER processing failed: %s : %s' % (name, e))
self.trace_logger.log(e) # depends on [control=['except'], data=['e']] |
def can_update_topics_to_sticky_topics(self, forum, user):
""" Given a forum, checks whether the user can change its topic types to sticky topics. """
return (
self._perform_basic_permission_check(forum, user, 'can_edit_posts') and
self._perform_basic_permission_check(forum, user, 'can_post_stickies')
) | def function[can_update_topics_to_sticky_topics, parameter[self, forum, user]]:
constant[ Given a forum, checks whether the user can change its topic types to sticky topics. ]
return[<ast.BoolOp object at 0x7da207f9ad70>] | keyword[def] identifier[can_update_topics_to_sticky_topics] ( identifier[self] , identifier[forum] , identifier[user] ):
literal[string]
keyword[return] (
identifier[self] . identifier[_perform_basic_permission_check] ( identifier[forum] , identifier[user] , literal[string] ) keyword[and]
identifier[self] . identifier[_perform_basic_permission_check] ( identifier[forum] , identifier[user] , literal[string] )
) | def can_update_topics_to_sticky_topics(self, forum, user):
""" Given a forum, checks whether the user can change its topic types to sticky topics. """
return self._perform_basic_permission_check(forum, user, 'can_edit_posts') and self._perform_basic_permission_check(forum, user, 'can_post_stickies') |
def replicate_with_dst_resource_provisioning(self, max_time_out_of_sync,
dst_pool_id,
dst_lun_name=None,
remote_system=None,
replication_name=None,
dst_size=None, dst_sp=None,
is_dst_thin=None,
dst_tiering_policy=None,
is_dst_compression=None):
"""
Creates a replication session with destination lun provisioning.
:param max_time_out_of_sync: maximum time to wait before syncing the
source and destination. Value `-1` means the automatic sync is not
performed. `0` means it is a sync replication.
:param dst_pool_id: id of pool to allocate destination lun.
:param dst_lun_name: destination lun name.
:param remote_system: `UnityRemoteSystem` object. The remote system to
which the replication is being configured. When not specified, it
defaults to local system.
:param replication_name: replication name.
:param dst_size: destination lun size.
:param dst_sp: `NodeEnum` value. Default storage processor of
destination lun.
:param is_dst_thin: indicates whether destination lun is thin or not.
:param dst_tiering_policy: `TieringPolicyEnum` value. Tiering policy of
destination lun.
:param is_dst_compression: indicates whether destination lun is
compression enabled or not.
:return: created replication session.
"""
dst_size = self.size_total if dst_size is None else dst_size
dst_resource = UnityResourceConfig.to_embedded(
name=dst_lun_name, pool_id=dst_pool_id,
size=dst_size, default_sp=dst_sp,
tiering_policy=dst_tiering_policy, is_thin_enabled=is_dst_thin,
is_compression_enabled=is_dst_compression)
return UnityReplicationSession.create_with_dst_resource_provisioning(
self._cli, self.get_id(), dst_resource, max_time_out_of_sync,
remote_system=remote_system, name=replication_name) | def function[replicate_with_dst_resource_provisioning, parameter[self, max_time_out_of_sync, dst_pool_id, dst_lun_name, remote_system, replication_name, dst_size, dst_sp, is_dst_thin, dst_tiering_policy, is_dst_compression]]:
constant[
Creates a replication session with destination lun provisioning.
:param max_time_out_of_sync: maximum time to wait before syncing the
source and destination. Value `-1` means the automatic sync is not
performed. `0` means it is a sync replication.
:param dst_pool_id: id of pool to allocate destination lun.
:param dst_lun_name: destination lun name.
:param remote_system: `UnityRemoteSystem` object. The remote system to
which the replication is being configured. When not specified, it
defaults to local system.
:param replication_name: replication name.
:param dst_size: destination lun size.
:param dst_sp: `NodeEnum` value. Default storage processor of
destination lun.
:param is_dst_thin: indicates whether destination lun is thin or not.
:param dst_tiering_policy: `TieringPolicyEnum` value. Tiering policy of
destination lun.
:param is_dst_compression: indicates whether destination lun is
compression enabled or not.
:return: created replication session.
]
variable[dst_size] assign[=] <ast.IfExp object at 0x7da1b10c4790>
variable[dst_resource] assign[=] call[name[UnityResourceConfig].to_embedded, parameter[]]
return[call[name[UnityReplicationSession].create_with_dst_resource_provisioning, parameter[name[self]._cli, call[name[self].get_id, parameter[]], name[dst_resource], name[max_time_out_of_sync]]]] | keyword[def] identifier[replicate_with_dst_resource_provisioning] ( identifier[self] , identifier[max_time_out_of_sync] ,
identifier[dst_pool_id] ,
identifier[dst_lun_name] = keyword[None] ,
identifier[remote_system] = keyword[None] ,
identifier[replication_name] = keyword[None] ,
identifier[dst_size] = keyword[None] , identifier[dst_sp] = keyword[None] ,
identifier[is_dst_thin] = keyword[None] ,
identifier[dst_tiering_policy] = keyword[None] ,
identifier[is_dst_compression] = keyword[None] ):
literal[string]
identifier[dst_size] = identifier[self] . identifier[size_total] keyword[if] identifier[dst_size] keyword[is] keyword[None] keyword[else] identifier[dst_size]
identifier[dst_resource] = identifier[UnityResourceConfig] . identifier[to_embedded] (
identifier[name] = identifier[dst_lun_name] , identifier[pool_id] = identifier[dst_pool_id] ,
identifier[size] = identifier[dst_size] , identifier[default_sp] = identifier[dst_sp] ,
identifier[tiering_policy] = identifier[dst_tiering_policy] , identifier[is_thin_enabled] = identifier[is_dst_thin] ,
identifier[is_compression_enabled] = identifier[is_dst_compression] )
keyword[return] identifier[UnityReplicationSession] . identifier[create_with_dst_resource_provisioning] (
identifier[self] . identifier[_cli] , identifier[self] . identifier[get_id] (), identifier[dst_resource] , identifier[max_time_out_of_sync] ,
identifier[remote_system] = identifier[remote_system] , identifier[name] = identifier[replication_name] ) | def replicate_with_dst_resource_provisioning(self, max_time_out_of_sync, dst_pool_id, dst_lun_name=None, remote_system=None, replication_name=None, dst_size=None, dst_sp=None, is_dst_thin=None, dst_tiering_policy=None, is_dst_compression=None):
"""
Creates a replication session with destination lun provisioning.
:param max_time_out_of_sync: maximum time to wait before syncing the
source and destination. Value `-1` means the automatic sync is not
performed. `0` means it is a sync replication.
:param dst_pool_id: id of pool to allocate destination lun.
:param dst_lun_name: destination lun name.
:param remote_system: `UnityRemoteSystem` object. The remote system to
which the replication is being configured. When not specified, it
defaults to local system.
:param replication_name: replication name.
:param dst_size: destination lun size.
:param dst_sp: `NodeEnum` value. Default storage processor of
destination lun.
:param is_dst_thin: indicates whether destination lun is thin or not.
:param dst_tiering_policy: `TieringPolicyEnum` value. Tiering policy of
destination lun.
:param is_dst_compression: indicates whether destination lun is
compression enabled or not.
:return: created replication session.
"""
dst_size = self.size_total if dst_size is None else dst_size
dst_resource = UnityResourceConfig.to_embedded(name=dst_lun_name, pool_id=dst_pool_id, size=dst_size, default_sp=dst_sp, tiering_policy=dst_tiering_policy, is_thin_enabled=is_dst_thin, is_compression_enabled=is_dst_compression)
return UnityReplicationSession.create_with_dst_resource_provisioning(self._cli, self.get_id(), dst_resource, max_time_out_of_sync, remote_system=remote_system, name=replication_name) |
def get_string(self, **kwargs):
"""Return string representation of table in current state.
Arguments:
title - optional table title
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
fields - names of fields (columns) to include
header - print a header showing field names (True or False)
border - print a border around the table (True or False)
hrules - controls printing of horizontal rules after rows. Allowed values: ALL, FRAME, HEADER, NONE
vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
padding_width - number of spaces on either side of column data (only used if left and right paddings are None)
left_padding_width - number of spaces on left hand side of column data
right_padding_width - number of spaces on right hand side of column data
vertical_char - single character string used to draw vertical lines
horizontal_char - single character string used to draw horizontal lines
junction_char - single character string used to draw line junctions
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
reversesort - True or False to sort in descending or ascending order
print empty - if True, stringify just the header for an empty table, if False return an empty string """
options = self._get_options(kwargs)
lines = []
# Don't think too hard about an empty table
# Is this the desired behaviour? Maybe we should still print the
# header?
if self.rowcount == 0 and (not options["print_empty"] or not options["border"]):
return ""
# Get the rows we need to print, taking into account slicing, sorting,
# etc.
rows = self._get_rows(options)
# Turn all data in all rows into Unicode, formatted as desired
formatted_rows = self._format_rows(rows, options)
# Compute column widths
self._compute_widths(formatted_rows, options)
self._hrule = self._stringify_hrule(options)
# Add title
title = options["title"] or self._title
if title:
lines.append(self._stringify_title(title, options))
# Add header or top of border
if options["header"]:
lines.append(self._stringify_header(options))
elif options["border"] and options["hrules"] in (ALL, FRAME):
lines.append(self._hrule)
# Add rows
for row in formatted_rows:
lines.append(self._stringify_row(row, options))
# Add bottom of border
if options["border"] and options["hrules"] == FRAME:
lines.append(self._hrule)
return self._unicode("\n").join(lines) | def function[get_string, parameter[self]]:
constant[Return string representation of table in current state.
Arguments:
title - optional table title
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
fields - names of fields (columns) to include
header - print a header showing field names (True or False)
border - print a border around the table (True or False)
hrules - controls printing of horizontal rules after rows. Allowed values: ALL, FRAME, HEADER, NONE
vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
padding_width - number of spaces on either side of column data (only used if left and right paddings are None)
left_padding_width - number of spaces on left hand side of column data
right_padding_width - number of spaces on right hand side of column data
vertical_char - single character string used to draw vertical lines
horizontal_char - single character string used to draw horizontal lines
junction_char - single character string used to draw line junctions
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
reversesort - True or False to sort in descending or ascending order
print empty - if True, stringify just the header for an empty table, if False return an empty string ]
variable[options] assign[=] call[name[self]._get_options, parameter[name[kwargs]]]
variable[lines] assign[=] list[[]]
if <ast.BoolOp object at 0x7da18eb561d0> begin[:]
return[constant[]]
variable[rows] assign[=] call[name[self]._get_rows, parameter[name[options]]]
variable[formatted_rows] assign[=] call[name[self]._format_rows, parameter[name[rows], name[options]]]
call[name[self]._compute_widths, parameter[name[formatted_rows], name[options]]]
name[self]._hrule assign[=] call[name[self]._stringify_hrule, parameter[name[options]]]
variable[title] assign[=] <ast.BoolOp object at 0x7da18eb54e80>
if name[title] begin[:]
call[name[lines].append, parameter[call[name[self]._stringify_title, parameter[name[title], name[options]]]]]
if call[name[options]][constant[header]] begin[:]
call[name[lines].append, parameter[call[name[self]._stringify_header, parameter[name[options]]]]]
for taget[name[row]] in starred[name[formatted_rows]] begin[:]
call[name[lines].append, parameter[call[name[self]._stringify_row, parameter[name[row], name[options]]]]]
if <ast.BoolOp object at 0x7da18eb57160> begin[:]
call[name[lines].append, parameter[name[self]._hrule]]
return[call[call[name[self]._unicode, parameter[constant[
]]].join, parameter[name[lines]]]] | keyword[def] identifier[get_string] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[options] = identifier[self] . identifier[_get_options] ( identifier[kwargs] )
identifier[lines] =[]
keyword[if] identifier[self] . identifier[rowcount] == literal[int] keyword[and] ( keyword[not] identifier[options] [ literal[string] ] keyword[or] keyword[not] identifier[options] [ literal[string] ]):
keyword[return] literal[string]
identifier[rows] = identifier[self] . identifier[_get_rows] ( identifier[options] )
identifier[formatted_rows] = identifier[self] . identifier[_format_rows] ( identifier[rows] , identifier[options] )
identifier[self] . identifier[_compute_widths] ( identifier[formatted_rows] , identifier[options] )
identifier[self] . identifier[_hrule] = identifier[self] . identifier[_stringify_hrule] ( identifier[options] )
identifier[title] = identifier[options] [ literal[string] ] keyword[or] identifier[self] . identifier[_title]
keyword[if] identifier[title] :
identifier[lines] . identifier[append] ( identifier[self] . identifier[_stringify_title] ( identifier[title] , identifier[options] ))
keyword[if] identifier[options] [ literal[string] ]:
identifier[lines] . identifier[append] ( identifier[self] . identifier[_stringify_header] ( identifier[options] ))
keyword[elif] identifier[options] [ literal[string] ] keyword[and] identifier[options] [ literal[string] ] keyword[in] ( identifier[ALL] , identifier[FRAME] ):
identifier[lines] . identifier[append] ( identifier[self] . identifier[_hrule] )
keyword[for] identifier[row] keyword[in] identifier[formatted_rows] :
identifier[lines] . identifier[append] ( identifier[self] . identifier[_stringify_row] ( identifier[row] , identifier[options] ))
keyword[if] identifier[options] [ literal[string] ] keyword[and] identifier[options] [ literal[string] ]== identifier[FRAME] :
identifier[lines] . identifier[append] ( identifier[self] . identifier[_hrule] )
keyword[return] identifier[self] . identifier[_unicode] ( literal[string] ). identifier[join] ( identifier[lines] ) | def get_string(self, **kwargs):
"""Return string representation of table in current state.
Arguments:
title - optional table title
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
fields - names of fields (columns) to include
header - print a header showing field names (True or False)
border - print a border around the table (True or False)
hrules - controls printing of horizontal rules after rows. Allowed values: ALL, FRAME, HEADER, NONE
vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
padding_width - number of spaces on either side of column data (only used if left and right paddings are None)
left_padding_width - number of spaces on left hand side of column data
right_padding_width - number of spaces on right hand side of column data
vertical_char - single character string used to draw vertical lines
horizontal_char - single character string used to draw horizontal lines
junction_char - single character string used to draw line junctions
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
reversesort - True or False to sort in descending or ascending order
print empty - if True, stringify just the header for an empty table, if False return an empty string """
options = self._get_options(kwargs)
lines = []
# Don't think too hard about an empty table
# Is this the desired behaviour? Maybe we should still print the
# header?
if self.rowcount == 0 and (not options['print_empty'] or not options['border']):
return '' # depends on [control=['if'], data=[]]
# Get the rows we need to print, taking into account slicing, sorting,
# etc.
rows = self._get_rows(options)
# Turn all data in all rows into Unicode, formatted as desired
formatted_rows = self._format_rows(rows, options)
# Compute column widths
self._compute_widths(formatted_rows, options)
self._hrule = self._stringify_hrule(options)
# Add title
title = options['title'] or self._title
if title:
lines.append(self._stringify_title(title, options)) # depends on [control=['if'], data=[]]
# Add header or top of border
if options['header']:
lines.append(self._stringify_header(options)) # depends on [control=['if'], data=[]]
elif options['border'] and options['hrules'] in (ALL, FRAME):
lines.append(self._hrule) # depends on [control=['if'], data=[]]
# Add rows
for row in formatted_rows:
lines.append(self._stringify_row(row, options)) # depends on [control=['for'], data=['row']]
# Add bottom of border
if options['border'] and options['hrules'] == FRAME:
lines.append(self._hrule) # depends on [control=['if'], data=[]]
return self._unicode('\n').join(lines) |
def deserialize_tag(stream, header, verifier=None):
"""Deserialize the Tag value from a non-framed stream.
:param stream: Source data stream
:type stream: io.BytesIO
:param header: Deserialized header
:type header: aws_encryption_sdk.structures.MessageHeader
:param verifier: Signature verifier object (optional)
:type verifier: aws_encryption_sdk.internal.crypto.Verifier
:returns: Tag value for body
:rtype: bytes
"""
(data_tag,) = unpack_values(
format_string=">{auth_len}s".format(auth_len=header.algorithm.auth_len), stream=stream, verifier=verifier
)
return data_tag | def function[deserialize_tag, parameter[stream, header, verifier]]:
constant[Deserialize the Tag value from a non-framed stream.
:param stream: Source data stream
:type stream: io.BytesIO
:param header: Deserialized header
:type header: aws_encryption_sdk.structures.MessageHeader
:param verifier: Signature verifier object (optional)
:type verifier: aws_encryption_sdk.internal.crypto.Verifier
:returns: Tag value for body
:rtype: bytes
]
<ast.Tuple object at 0x7da18f810be0> assign[=] call[name[unpack_values], parameter[]]
return[name[data_tag]] | keyword[def] identifier[deserialize_tag] ( identifier[stream] , identifier[header] , identifier[verifier] = keyword[None] ):
literal[string]
( identifier[data_tag] ,)= identifier[unpack_values] (
identifier[format_string] = literal[string] . identifier[format] ( identifier[auth_len] = identifier[header] . identifier[algorithm] . identifier[auth_len] ), identifier[stream] = identifier[stream] , identifier[verifier] = identifier[verifier]
)
keyword[return] identifier[data_tag] | def deserialize_tag(stream, header, verifier=None):
"""Deserialize the Tag value from a non-framed stream.
:param stream: Source data stream
:type stream: io.BytesIO
:param header: Deserialized header
:type header: aws_encryption_sdk.structures.MessageHeader
:param verifier: Signature verifier object (optional)
:type verifier: aws_encryption_sdk.internal.crypto.Verifier
:returns: Tag value for body
:rtype: bytes
"""
(data_tag,) = unpack_values(format_string='>{auth_len}s'.format(auth_len=header.algorithm.auth_len), stream=stream, verifier=verifier)
return data_tag |
def _BytesForNonRepeatedElement(value, field_number, field_type):
"""Returns the number of bytes needed to serialize a non-repeated element.
The returned byte count includes space for tag information and any
other additional space associated with serializing value.
Args:
value: Value we're serializing.
field_number: Field number of this value. (Since the field number
is stored as part of a varint-encoded tag, this has an impact
on the total bytes required to serialize the value).
field_type: The type of the field. One of the TYPE_* constants
within FieldDescriptor.
"""
try:
fn = type_checkers.TYPE_TO_BYTE_SIZE_FN[field_type]
return fn(field_number, value)
except KeyError:
raise message_mod.EncodeError('Unrecognized field type: %d' % field_type) | def function[_BytesForNonRepeatedElement, parameter[value, field_number, field_type]]:
constant[Returns the number of bytes needed to serialize a non-repeated element.
The returned byte count includes space for tag information and any
other additional space associated with serializing value.
Args:
value: Value we're serializing.
field_number: Field number of this value. (Since the field number
is stored as part of a varint-encoded tag, this has an impact
on the total bytes required to serialize the value).
field_type: The type of the field. One of the TYPE_* constants
within FieldDescriptor.
]
<ast.Try object at 0x7da1b1f0a440> | keyword[def] identifier[_BytesForNonRepeatedElement] ( identifier[value] , identifier[field_number] , identifier[field_type] ):
literal[string]
keyword[try] :
identifier[fn] = identifier[type_checkers] . identifier[TYPE_TO_BYTE_SIZE_FN] [ identifier[field_type] ]
keyword[return] identifier[fn] ( identifier[field_number] , identifier[value] )
keyword[except] identifier[KeyError] :
keyword[raise] identifier[message_mod] . identifier[EncodeError] ( literal[string] % identifier[field_type] ) | def _BytesForNonRepeatedElement(value, field_number, field_type):
"""Returns the number of bytes needed to serialize a non-repeated element.
The returned byte count includes space for tag information and any
other additional space associated with serializing value.
Args:
value: Value we're serializing.
field_number: Field number of this value. (Since the field number
is stored as part of a varint-encoded tag, this has an impact
on the total bytes required to serialize the value).
field_type: The type of the field. One of the TYPE_* constants
within FieldDescriptor.
"""
try:
fn = type_checkers.TYPE_TO_BYTE_SIZE_FN[field_type]
return fn(field_number, value) # depends on [control=['try'], data=[]]
except KeyError:
raise message_mod.EncodeError('Unrecognized field type: %d' % field_type) # depends on [control=['except'], data=[]] |
def init_device(self):
"""
Initializes the device with the proper keymaps and name
"""
try:
product_id = int(self._send_command('_d2', 1))
except ValueError:
product_id = self._send_command('_d2', 1)
if product_id == 0:
self._impl = ResponseDevice(
self.con,
'Cedrus Lumina LP-400 Response Pad System',
lumina_keymap)
elif product_id == 1:
self._impl = ResponseDevice(
self.con,
'Cedrus SV-1 Voice Key',
None,
'Voice Response')
elif product_id == 2:
model_id = int(self._send_command('_d3', 1))
if model_id == 1:
self._impl = ResponseDevice(
self.con,
'Cedrus RB-530',
rb_530_keymap)
elif model_id == 2:
self._impl = ResponseDevice(
self.con,
'Cedrus RB-730',
rb_730_keymap)
elif model_id == 3:
self._impl = ResponseDevice(
self.con,
'Cedrus RB-830',
rb_830_keymap)
elif model_id == 4:
self._impl = ResponseDevice(
self.con,
'Cedrus RB-834',
rb_834_keymap)
else:
raise XidError('Unknown RB Device')
elif product_id == 4:
self._impl = StimTracker(
self.con,
'Cedrus C-POD')
elif product_id == b'S':
self._impl = StimTracker(
self.con,
'Cedrus StimTracker')
elif product_id == -99:
raise XidError('Invalid XID device') | def function[init_device, parameter[self]]:
constant[
Initializes the device with the proper keymaps and name
]
<ast.Try object at 0x7da18bc72350>
if compare[name[product_id] equal[==] constant[0]] begin[:]
name[self]._impl assign[=] call[name[ResponseDevice], parameter[name[self].con, constant[Cedrus Lumina LP-400 Response Pad System], name[lumina_keymap]]] | keyword[def] identifier[init_device] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[product_id] = identifier[int] ( identifier[self] . identifier[_send_command] ( literal[string] , literal[int] ))
keyword[except] identifier[ValueError] :
identifier[product_id] = identifier[self] . identifier[_send_command] ( literal[string] , literal[int] )
keyword[if] identifier[product_id] == literal[int] :
identifier[self] . identifier[_impl] = identifier[ResponseDevice] (
identifier[self] . identifier[con] ,
literal[string] ,
identifier[lumina_keymap] )
keyword[elif] identifier[product_id] == literal[int] :
identifier[self] . identifier[_impl] = identifier[ResponseDevice] (
identifier[self] . identifier[con] ,
literal[string] ,
keyword[None] ,
literal[string] )
keyword[elif] identifier[product_id] == literal[int] :
identifier[model_id] = identifier[int] ( identifier[self] . identifier[_send_command] ( literal[string] , literal[int] ))
keyword[if] identifier[model_id] == literal[int] :
identifier[self] . identifier[_impl] = identifier[ResponseDevice] (
identifier[self] . identifier[con] ,
literal[string] ,
identifier[rb_530_keymap] )
keyword[elif] identifier[model_id] == literal[int] :
identifier[self] . identifier[_impl] = identifier[ResponseDevice] (
identifier[self] . identifier[con] ,
literal[string] ,
identifier[rb_730_keymap] )
keyword[elif] identifier[model_id] == literal[int] :
identifier[self] . identifier[_impl] = identifier[ResponseDevice] (
identifier[self] . identifier[con] ,
literal[string] ,
identifier[rb_830_keymap] )
keyword[elif] identifier[model_id] == literal[int] :
identifier[self] . identifier[_impl] = identifier[ResponseDevice] (
identifier[self] . identifier[con] ,
literal[string] ,
identifier[rb_834_keymap] )
keyword[else] :
keyword[raise] identifier[XidError] ( literal[string] )
keyword[elif] identifier[product_id] == literal[int] :
identifier[self] . identifier[_impl] = identifier[StimTracker] (
identifier[self] . identifier[con] ,
literal[string] )
keyword[elif] identifier[product_id] == literal[string] :
identifier[self] . identifier[_impl] = identifier[StimTracker] (
identifier[self] . identifier[con] ,
literal[string] )
keyword[elif] identifier[product_id] ==- literal[int] :
keyword[raise] identifier[XidError] ( literal[string] ) | def init_device(self):
"""
Initializes the device with the proper keymaps and name
"""
try:
product_id = int(self._send_command('_d2', 1)) # depends on [control=['try'], data=[]]
except ValueError:
product_id = self._send_command('_d2', 1) # depends on [control=['except'], data=[]]
if product_id == 0:
self._impl = ResponseDevice(self.con, 'Cedrus Lumina LP-400 Response Pad System', lumina_keymap) # depends on [control=['if'], data=[]]
elif product_id == 1:
self._impl = ResponseDevice(self.con, 'Cedrus SV-1 Voice Key', None, 'Voice Response') # depends on [control=['if'], data=[]]
elif product_id == 2:
model_id = int(self._send_command('_d3', 1))
if model_id == 1:
self._impl = ResponseDevice(self.con, 'Cedrus RB-530', rb_530_keymap) # depends on [control=['if'], data=[]]
elif model_id == 2:
self._impl = ResponseDevice(self.con, 'Cedrus RB-730', rb_730_keymap) # depends on [control=['if'], data=[]]
elif model_id == 3:
self._impl = ResponseDevice(self.con, 'Cedrus RB-830', rb_830_keymap) # depends on [control=['if'], data=[]]
elif model_id == 4:
self._impl = ResponseDevice(self.con, 'Cedrus RB-834', rb_834_keymap) # depends on [control=['if'], data=[]]
else:
raise XidError('Unknown RB Device') # depends on [control=['if'], data=[]]
elif product_id == 4:
self._impl = StimTracker(self.con, 'Cedrus C-POD') # depends on [control=['if'], data=[]]
elif product_id == b'S':
self._impl = StimTracker(self.con, 'Cedrus StimTracker') # depends on [control=['if'], data=[]]
elif product_id == -99:
raise XidError('Invalid XID device') # depends on [control=['if'], data=[]] |
def init_limit(self, key, lower=None, upper=None, limit=False):
""" check if data is within limits. reset if violates"""
above = agtb(self.__dict__[key], upper)
for idx, item in enumerate(above):
if item == 0.:
continue
maxval = upper[idx]
self.log(
'{0} <{1}.{2}> above its maximum of {3}.'.format(
self.name[idx], self._name, key, maxval), ERROR)
if limit:
self.__dict__[key][idx] = maxval
below = altb(self.__dict__[key], lower)
for idx, item in enumerate(below):
if item == 0.:
continue
minval = lower[idx]
self.log(
'{0} <{1}.{2}> below its minimum of {3}.'.format(
self.name[idx], self._name, key, minval), ERROR)
if limit:
self.__dict__[key][idx] = minval | def function[init_limit, parameter[self, key, lower, upper, limit]]:
constant[ check if data is within limits. reset if violates]
variable[above] assign[=] call[name[agtb], parameter[call[name[self].__dict__][name[key]], name[upper]]]
for taget[tuple[[<ast.Name object at 0x7da18fe91c30>, <ast.Name object at 0x7da18fe912a0>]]] in starred[call[name[enumerate], parameter[name[above]]]] begin[:]
if compare[name[item] equal[==] constant[0.0]] begin[:]
continue
variable[maxval] assign[=] call[name[upper]][name[idx]]
call[name[self].log, parameter[call[constant[{0} <{1}.{2}> above its maximum of {3}.].format, parameter[call[name[self].name][name[idx]], name[self]._name, name[key], name[maxval]]], name[ERROR]]]
if name[limit] begin[:]
call[call[name[self].__dict__][name[key]]][name[idx]] assign[=] name[maxval]
variable[below] assign[=] call[name[altb], parameter[call[name[self].__dict__][name[key]], name[lower]]]
for taget[tuple[[<ast.Name object at 0x7da18fe92140>, <ast.Name object at 0x7da18fe938e0>]]] in starred[call[name[enumerate], parameter[name[below]]]] begin[:]
if compare[name[item] equal[==] constant[0.0]] begin[:]
continue
variable[minval] assign[=] call[name[lower]][name[idx]]
call[name[self].log, parameter[call[constant[{0} <{1}.{2}> below its minimum of {3}.].format, parameter[call[name[self].name][name[idx]], name[self]._name, name[key], name[minval]]], name[ERROR]]]
if name[limit] begin[:]
call[call[name[self].__dict__][name[key]]][name[idx]] assign[=] name[minval] | keyword[def] identifier[init_limit] ( identifier[self] , identifier[key] , identifier[lower] = keyword[None] , identifier[upper] = keyword[None] , identifier[limit] = keyword[False] ):
literal[string]
identifier[above] = identifier[agtb] ( identifier[self] . identifier[__dict__] [ identifier[key] ], identifier[upper] )
keyword[for] identifier[idx] , identifier[item] keyword[in] identifier[enumerate] ( identifier[above] ):
keyword[if] identifier[item] == literal[int] :
keyword[continue]
identifier[maxval] = identifier[upper] [ identifier[idx] ]
identifier[self] . identifier[log] (
literal[string] . identifier[format] (
identifier[self] . identifier[name] [ identifier[idx] ], identifier[self] . identifier[_name] , identifier[key] , identifier[maxval] ), identifier[ERROR] )
keyword[if] identifier[limit] :
identifier[self] . identifier[__dict__] [ identifier[key] ][ identifier[idx] ]= identifier[maxval]
identifier[below] = identifier[altb] ( identifier[self] . identifier[__dict__] [ identifier[key] ], identifier[lower] )
keyword[for] identifier[idx] , identifier[item] keyword[in] identifier[enumerate] ( identifier[below] ):
keyword[if] identifier[item] == literal[int] :
keyword[continue]
identifier[minval] = identifier[lower] [ identifier[idx] ]
identifier[self] . identifier[log] (
literal[string] . identifier[format] (
identifier[self] . identifier[name] [ identifier[idx] ], identifier[self] . identifier[_name] , identifier[key] , identifier[minval] ), identifier[ERROR] )
keyword[if] identifier[limit] :
identifier[self] . identifier[__dict__] [ identifier[key] ][ identifier[idx] ]= identifier[minval] | def init_limit(self, key, lower=None, upper=None, limit=False):
""" check if data is within limits. reset if violates"""
above = agtb(self.__dict__[key], upper)
for (idx, item) in enumerate(above):
if item == 0.0:
continue # depends on [control=['if'], data=[]]
maxval = upper[idx]
self.log('{0} <{1}.{2}> above its maximum of {3}.'.format(self.name[idx], self._name, key, maxval), ERROR)
if limit:
self.__dict__[key][idx] = maxval # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
below = altb(self.__dict__[key], lower)
for (idx, item) in enumerate(below):
if item == 0.0:
continue # depends on [control=['if'], data=[]]
minval = lower[idx]
self.log('{0} <{1}.{2}> below its minimum of {3}.'.format(self.name[idx], self._name, key, minval), ERROR)
if limit:
self.__dict__[key][idx] = minval # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def all_nodes_that_receive(service, service_configuration=None, run_only=False, deploy_to_only=False):
"""If run_only, returns only the services that are in the runs_on list.
If deploy_to_only, returns only the services in the deployed_to list.
If neither, both are returned, duplicates stripped.
Results are always sorted.
"""
assert not (run_only and deploy_to_only)
if service_configuration is None:
service_configuration = read_services_configuration()
runs_on = service_configuration[service]['runs_on']
deployed_to = service_configuration[service].get('deployed_to')
if deployed_to is None:
deployed_to = []
if run_only:
result = runs_on
elif deploy_to_only:
result = deployed_to
else:
result = set(runs_on) | set(deployed_to)
return list(sorted(result)) | def function[all_nodes_that_receive, parameter[service, service_configuration, run_only, deploy_to_only]]:
constant[If run_only, returns only the services that are in the runs_on list.
If deploy_to_only, returns only the services in the deployed_to list.
If neither, both are returned, duplicates stripped.
Results are always sorted.
]
assert[<ast.UnaryOp object at 0x7da1b19c9a50>]
if compare[name[service_configuration] is constant[None]] begin[:]
variable[service_configuration] assign[=] call[name[read_services_configuration], parameter[]]
variable[runs_on] assign[=] call[call[name[service_configuration]][name[service]]][constant[runs_on]]
variable[deployed_to] assign[=] call[call[name[service_configuration]][name[service]].get, parameter[constant[deployed_to]]]
if compare[name[deployed_to] is constant[None]] begin[:]
variable[deployed_to] assign[=] list[[]]
if name[run_only] begin[:]
variable[result] assign[=] name[runs_on]
return[call[name[list], parameter[call[name[sorted], parameter[name[result]]]]]] | keyword[def] identifier[all_nodes_that_receive] ( identifier[service] , identifier[service_configuration] = keyword[None] , identifier[run_only] = keyword[False] , identifier[deploy_to_only] = keyword[False] ):
literal[string]
keyword[assert] keyword[not] ( identifier[run_only] keyword[and] identifier[deploy_to_only] )
keyword[if] identifier[service_configuration] keyword[is] keyword[None] :
identifier[service_configuration] = identifier[read_services_configuration] ()
identifier[runs_on] = identifier[service_configuration] [ identifier[service] ][ literal[string] ]
identifier[deployed_to] = identifier[service_configuration] [ identifier[service] ]. identifier[get] ( literal[string] )
keyword[if] identifier[deployed_to] keyword[is] keyword[None] :
identifier[deployed_to] =[]
keyword[if] identifier[run_only] :
identifier[result] = identifier[runs_on]
keyword[elif] identifier[deploy_to_only] :
identifier[result] = identifier[deployed_to]
keyword[else] :
identifier[result] = identifier[set] ( identifier[runs_on] )| identifier[set] ( identifier[deployed_to] )
keyword[return] identifier[list] ( identifier[sorted] ( identifier[result] )) | def all_nodes_that_receive(service, service_configuration=None, run_only=False, deploy_to_only=False):
"""If run_only, returns only the services that are in the runs_on list.
If deploy_to_only, returns only the services in the deployed_to list.
If neither, both are returned, duplicates stripped.
Results are always sorted.
"""
assert not (run_only and deploy_to_only)
if service_configuration is None:
service_configuration = read_services_configuration() # depends on [control=['if'], data=['service_configuration']]
runs_on = service_configuration[service]['runs_on']
deployed_to = service_configuration[service].get('deployed_to')
if deployed_to is None:
deployed_to = [] # depends on [control=['if'], data=['deployed_to']]
if run_only:
result = runs_on # depends on [control=['if'], data=[]]
elif deploy_to_only:
result = deployed_to # depends on [control=['if'], data=[]]
else:
result = set(runs_on) | set(deployed_to)
return list(sorted(result)) |
def load_hypergraph_adjacency(hdf5_file_name):
"""
Parameters
----------
hdf5_file_name : file handle or string
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
"""
with tables.open_file(hdf5_file_name, 'r+') as fileh:
pars = []
for par in ('data', 'indices', 'indptr', 'shape'):
pars.append(getattr(fileh.root.consensus_group, par).read())
hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3])
return hypergraph_adjacency | def function[load_hypergraph_adjacency, parameter[hdf5_file_name]]:
constant[
Parameters
----------
hdf5_file_name : file handle or string
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
]
with call[name[tables].open_file, parameter[name[hdf5_file_name], constant[r+]]] begin[:]
variable[pars] assign[=] list[[]]
for taget[name[par]] in starred[tuple[[<ast.Constant object at 0x7da1b26afbe0>, <ast.Constant object at 0x7da1b26af070>, <ast.Constant object at 0x7da1b26ae6e0>, <ast.Constant object at 0x7da1b26ad960>]]] begin[:]
call[name[pars].append, parameter[call[call[name[getattr], parameter[name[fileh].root.consensus_group, name[par]]].read, parameter[]]]]
variable[hypergraph_adjacency] assign[=] call[name[scipy].sparse.csr_matrix, parameter[call[name[tuple], parameter[call[name[pars]][<ast.Slice object at 0x7da1b26adf00>]]]]]
return[name[hypergraph_adjacency]] | keyword[def] identifier[load_hypergraph_adjacency] ( identifier[hdf5_file_name] ):
literal[string]
keyword[with] identifier[tables] . identifier[open_file] ( identifier[hdf5_file_name] , literal[string] ) keyword[as] identifier[fileh] :
identifier[pars] =[]
keyword[for] identifier[par] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ):
identifier[pars] . identifier[append] ( identifier[getattr] ( identifier[fileh] . identifier[root] . identifier[consensus_group] , identifier[par] ). identifier[read] ())
identifier[hypergraph_adjacency] = identifier[scipy] . identifier[sparse] . identifier[csr_matrix] ( identifier[tuple] ( identifier[pars] [: literal[int] ]), identifier[shape] = identifier[pars] [ literal[int] ])
keyword[return] identifier[hypergraph_adjacency] | def load_hypergraph_adjacency(hdf5_file_name):
"""
Parameters
----------
hdf5_file_name : file handle or string
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
"""
with tables.open_file(hdf5_file_name, 'r+') as fileh:
pars = []
for par in ('data', 'indices', 'indptr', 'shape'):
pars.append(getattr(fileh.root.consensus_group, par).read()) # depends on [control=['for'], data=['par']] # depends on [control=['with'], data=['fileh']]
hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape=pars[3])
return hypergraph_adjacency |
def next(self):
"""Return the next match; raises Exception if no next match available"""
# Check the state and find the next match as a side-effect if necessary.
if not self.has_next():
raise StopIteration("No next match")
# Don't retain that memory any longer than necessary.
result = self._last_match
self._last_match = None
self._state = PhoneNumberMatcher._NOT_READY
return result | def function[next, parameter[self]]:
constant[Return the next match; raises Exception if no next match available]
if <ast.UnaryOp object at 0x7da1b18a1e70> begin[:]
<ast.Raise object at 0x7da1b18a0a60>
variable[result] assign[=] name[self]._last_match
name[self]._last_match assign[=] constant[None]
name[self]._state assign[=] name[PhoneNumberMatcher]._NOT_READY
return[name[result]] | keyword[def] identifier[next] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[has_next] ():
keyword[raise] identifier[StopIteration] ( literal[string] )
identifier[result] = identifier[self] . identifier[_last_match]
identifier[self] . identifier[_last_match] = keyword[None]
identifier[self] . identifier[_state] = identifier[PhoneNumberMatcher] . identifier[_NOT_READY]
keyword[return] identifier[result] | def next(self):
"""Return the next match; raises Exception if no next match available"""
# Check the state and find the next match as a side-effect if necessary.
if not self.has_next():
raise StopIteration('No next match') # depends on [control=['if'], data=[]]
# Don't retain that memory any longer than necessary.
result = self._last_match
self._last_match = None
self._state = PhoneNumberMatcher._NOT_READY
return result |
def get(self, key, lang=None):
""" Returns triple related to this node. Can filter on lang
:param key: Predicate of the triple
:param lang: Language of the triple if applicable
:rtype: Literal or BNode or URIRef
"""
if lang is not None:
for o in self.graph.objects(self.asNode(), key):
if o.language == lang:
yield o
else:
for o in self.graph.objects(self.asNode(), key):
yield o | def function[get, parameter[self, key, lang]]:
constant[ Returns triple related to this node. Can filter on lang
:param key: Predicate of the triple
:param lang: Language of the triple if applicable
:rtype: Literal or BNode or URIRef
]
if compare[name[lang] is_not constant[None]] begin[:]
for taget[name[o]] in starred[call[name[self].graph.objects, parameter[call[name[self].asNode, parameter[]], name[key]]]] begin[:]
if compare[name[o].language equal[==] name[lang]] begin[:]
<ast.Yield object at 0x7da18bc72200> | keyword[def] identifier[get] ( identifier[self] , identifier[key] , identifier[lang] = keyword[None] ):
literal[string]
keyword[if] identifier[lang] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[o] keyword[in] identifier[self] . identifier[graph] . identifier[objects] ( identifier[self] . identifier[asNode] (), identifier[key] ):
keyword[if] identifier[o] . identifier[language] == identifier[lang] :
keyword[yield] identifier[o]
keyword[else] :
keyword[for] identifier[o] keyword[in] identifier[self] . identifier[graph] . identifier[objects] ( identifier[self] . identifier[asNode] (), identifier[key] ):
keyword[yield] identifier[o] | def get(self, key, lang=None):
""" Returns triple related to this node. Can filter on lang
:param key: Predicate of the triple
:param lang: Language of the triple if applicable
:rtype: Literal or BNode or URIRef
"""
if lang is not None:
for o in self.graph.objects(self.asNode(), key):
if o.language == lang:
yield o # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['o']] # depends on [control=['if'], data=['lang']]
else:
for o in self.graph.objects(self.asNode(), key):
yield o # depends on [control=['for'], data=['o']] |
def totp(key, format='dec6', period=30, t=None, hash=hashlib.sha1):
'''
Compute a TOTP value as prescribed by OATH specifications.
:param key:
the TOTP key given as an hexadecimal string
:param format:
the output format, can be:
- hex, for a variable length hexadecimal format,
- hex-notrunc, for a 40 characters hexadecimal non-truncated format,
- dec4, for a 4 characters decimal format,
- dec6,
- dec7, or
- dec8
it defaults to dec6.
:param period:
a positive integer giving the period between changes of the OTP
value, as seconds, it defaults to 30.
:param t:
a positive integer giving the current time as seconds since EPOCH
(1st January 1970 at 00:00 GMT), if None we use time.time(); it
defaults to None;
:param hash:
the hash module (usually from the hashlib package) to use,
it defaults to hashlib.sha1.
:returns:
a string representation of the OTP value (as instructed by the format parameter).
:type: str
'''
if t is None:
t = int(time.time())
else:
if isinstance(t, datetime.datetime):
t = calendar.timegm(t.utctimetuple())
else:
t = int(t)
T = int(t/period)
return hotp(key, T, format=format, hash=hash) | def function[totp, parameter[key, format, period, t, hash]]:
constant[
Compute a TOTP value as prescribed by OATH specifications.
:param key:
the TOTP key given as an hexadecimal string
:param format:
the output format, can be:
- hex, for a variable length hexadecimal format,
- hex-notrunc, for a 40 characters hexadecimal non-truncated format,
- dec4, for a 4 characters decimal format,
- dec6,
- dec7, or
- dec8
it defaults to dec6.
:param period:
a positive integer giving the period between changes of the OTP
value, as seconds, it defaults to 30.
:param t:
a positive integer giving the current time as seconds since EPOCH
(1st January 1970 at 00:00 GMT), if None we use time.time(); it
defaults to None;
:param hash:
the hash module (usually from the hashlib package) to use,
it defaults to hashlib.sha1.
:returns:
a string representation of the OTP value (as instructed by the format parameter).
:type: str
]
if compare[name[t] is constant[None]] begin[:]
variable[t] assign[=] call[name[int], parameter[call[name[time].time, parameter[]]]]
variable[T] assign[=] call[name[int], parameter[binary_operation[name[t] / name[period]]]]
return[call[name[hotp], parameter[name[key], name[T]]]] | keyword[def] identifier[totp] ( identifier[key] , identifier[format] = literal[string] , identifier[period] = literal[int] , identifier[t] = keyword[None] , identifier[hash] = identifier[hashlib] . identifier[sha1] ):
literal[string]
keyword[if] identifier[t] keyword[is] keyword[None] :
identifier[t] = identifier[int] ( identifier[time] . identifier[time] ())
keyword[else] :
keyword[if] identifier[isinstance] ( identifier[t] , identifier[datetime] . identifier[datetime] ):
identifier[t] = identifier[calendar] . identifier[timegm] ( identifier[t] . identifier[utctimetuple] ())
keyword[else] :
identifier[t] = identifier[int] ( identifier[t] )
identifier[T] = identifier[int] ( identifier[t] / identifier[period] )
keyword[return] identifier[hotp] ( identifier[key] , identifier[T] , identifier[format] = identifier[format] , identifier[hash] = identifier[hash] ) | def totp(key, format='dec6', period=30, t=None, hash=hashlib.sha1):
"""
Compute a TOTP value as prescribed by OATH specifications.
:param key:
the TOTP key given as an hexadecimal string
:param format:
the output format, can be:
- hex, for a variable length hexadecimal format,
- hex-notrunc, for a 40 characters hexadecimal non-truncated format,
- dec4, for a 4 characters decimal format,
- dec6,
- dec7, or
- dec8
it defaults to dec6.
:param period:
a positive integer giving the period between changes of the OTP
value, as seconds, it defaults to 30.
:param t:
a positive integer giving the current time as seconds since EPOCH
(1st January 1970 at 00:00 GMT), if None we use time.time(); it
defaults to None;
:param hash:
the hash module (usually from the hashlib package) to use,
it defaults to hashlib.sha1.
:returns:
a string representation of the OTP value (as instructed by the format parameter).
:type: str
"""
if t is None:
t = int(time.time()) # depends on [control=['if'], data=['t']]
elif isinstance(t, datetime.datetime):
t = calendar.timegm(t.utctimetuple()) # depends on [control=['if'], data=[]]
else:
t = int(t)
T = int(t / period)
return hotp(key, T, format=format, hash=hash) |
def list_metrics(self, project, page_size=None, page_token=None):
"""List metrics for the project associated with this client.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/list
:type project: str
:param project: ID of the project whose metrics are to be listed.
:type page_size: int
:param page_size: maximum number of metrics to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of metrics. If not
passed, the API will return the first page of
metrics.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns: Iterator of
:class:`~google.cloud.logging.metric.Metric`
accessible to the current API.
"""
extra_params = {}
if page_size is not None:
extra_params["pageSize"] = page_size
path = "/projects/%s/metrics" % (project,)
return page_iterator.HTTPIterator(
client=self._client,
api_request=self._client._connection.api_request,
path=path,
item_to_value=_item_to_metric,
items_key="metrics",
page_token=page_token,
extra_params=extra_params,
) | def function[list_metrics, parameter[self, project, page_size, page_token]]:
constant[List metrics for the project associated with this client.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/list
:type project: str
:param project: ID of the project whose metrics are to be listed.
:type page_size: int
:param page_size: maximum number of metrics to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of metrics. If not
passed, the API will return the first page of
metrics.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns: Iterator of
:class:`~google.cloud.logging.metric.Metric`
accessible to the current API.
]
variable[extra_params] assign[=] dictionary[[], []]
if compare[name[page_size] is_not constant[None]] begin[:]
call[name[extra_params]][constant[pageSize]] assign[=] name[page_size]
variable[path] assign[=] binary_operation[constant[/projects/%s/metrics] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18bccb220>]]]
return[call[name[page_iterator].HTTPIterator, parameter[]]] | keyword[def] identifier[list_metrics] ( identifier[self] , identifier[project] , identifier[page_size] = keyword[None] , identifier[page_token] = keyword[None] ):
literal[string]
identifier[extra_params] ={}
keyword[if] identifier[page_size] keyword[is] keyword[not] keyword[None] :
identifier[extra_params] [ literal[string] ]= identifier[page_size]
identifier[path] = literal[string] %( identifier[project] ,)
keyword[return] identifier[page_iterator] . identifier[HTTPIterator] (
identifier[client] = identifier[self] . identifier[_client] ,
identifier[api_request] = identifier[self] . identifier[_client] . identifier[_connection] . identifier[api_request] ,
identifier[path] = identifier[path] ,
identifier[item_to_value] = identifier[_item_to_metric] ,
identifier[items_key] = literal[string] ,
identifier[page_token] = identifier[page_token] ,
identifier[extra_params] = identifier[extra_params] ,
) | def list_metrics(self, project, page_size=None, page_token=None):
"""List metrics for the project associated with this client.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/list
:type project: str
:param project: ID of the project whose metrics are to be listed.
:type page_size: int
:param page_size: maximum number of metrics to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of metrics. If not
passed, the API will return the first page of
metrics.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns: Iterator of
:class:`~google.cloud.logging.metric.Metric`
accessible to the current API.
"""
extra_params = {}
if page_size is not None:
extra_params['pageSize'] = page_size # depends on [control=['if'], data=['page_size']]
path = '/projects/%s/metrics' % (project,)
return page_iterator.HTTPIterator(client=self._client, api_request=self._client._connection.api_request, path=path, item_to_value=_item_to_metric, items_key='metrics', page_token=page_token, extra_params=extra_params) |
def get_filename(self, checksum):
"""
:param checksum: checksum
:return: filename no storage base part
"""
filename = None
for _filename, metadata in self._log.items():
if metadata['checksum'] == checksum:
filename = _filename
break
return filename | def function[get_filename, parameter[self, checksum]]:
constant[
:param checksum: checksum
:return: filename no storage base part
]
variable[filename] assign[=] constant[None]
for taget[tuple[[<ast.Name object at 0x7da1b1176260>, <ast.Name object at 0x7da1b11764d0>]]] in starred[call[name[self]._log.items, parameter[]]] begin[:]
if compare[call[name[metadata]][constant[checksum]] equal[==] name[checksum]] begin[:]
variable[filename] assign[=] name[_filename]
break
return[name[filename]] | keyword[def] identifier[get_filename] ( identifier[self] , identifier[checksum] ):
literal[string]
identifier[filename] = keyword[None]
keyword[for] identifier[_filename] , identifier[metadata] keyword[in] identifier[self] . identifier[_log] . identifier[items] ():
keyword[if] identifier[metadata] [ literal[string] ]== identifier[checksum] :
identifier[filename] = identifier[_filename]
keyword[break]
keyword[return] identifier[filename] | def get_filename(self, checksum):
"""
:param checksum: checksum
:return: filename no storage base part
"""
filename = None
for (_filename, metadata) in self._log.items():
if metadata['checksum'] == checksum:
filename = _filename
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return filename |
def _p2_unicode_reader(unicode_csv_data, dialect=csv.excel, **kwargs):
""" Encode Unicode as UTF-8 and parse as CSV.
This is needed since Python 2's `csv` doesn't do Unicode.
Kudos: https://docs.python.org/2/library/csv.html#examples
:param unicode_csv_data: The Unicode stream to parse.
:param dialect: The CSV dialect to use.
:param kwargs: Any other parameters to pass to csv.reader.
:returns: An iterator
"""
# Encode temporarily as UTF-8:
utf8_csv_data = _utf_8_encoder(unicode_csv_data)
# Now we can parse!
csv_reader = csv.reader(utf8_csv_data, dialect=dialect, **kwargs)
# Decode UTF-8 back to Unicode, cell by cell:
return ([unicode(cell, 'utf-8') for cell in row] for row in csv_reader) | def function[_p2_unicode_reader, parameter[unicode_csv_data, dialect]]:
constant[ Encode Unicode as UTF-8 and parse as CSV.
This is needed since Python 2's `csv` doesn't do Unicode.
Kudos: https://docs.python.org/2/library/csv.html#examples
:param unicode_csv_data: The Unicode stream to parse.
:param dialect: The CSV dialect to use.
:param kwargs: Any other parameters to pass to csv.reader.
:returns: An iterator
]
variable[utf8_csv_data] assign[=] call[name[_utf_8_encoder], parameter[name[unicode_csv_data]]]
variable[csv_reader] assign[=] call[name[csv].reader, parameter[name[utf8_csv_data]]]
return[<ast.GeneratorExp object at 0x7da2041d87f0>] | keyword[def] identifier[_p2_unicode_reader] ( identifier[unicode_csv_data] , identifier[dialect] = identifier[csv] . identifier[excel] ,** identifier[kwargs] ):
literal[string]
identifier[utf8_csv_data] = identifier[_utf_8_encoder] ( identifier[unicode_csv_data] )
identifier[csv_reader] = identifier[csv] . identifier[reader] ( identifier[utf8_csv_data] , identifier[dialect] = identifier[dialect] ,** identifier[kwargs] )
keyword[return] ([ identifier[unicode] ( identifier[cell] , literal[string] ) keyword[for] identifier[cell] keyword[in] identifier[row] ] keyword[for] identifier[row] keyword[in] identifier[csv_reader] ) | def _p2_unicode_reader(unicode_csv_data, dialect=csv.excel, **kwargs):
""" Encode Unicode as UTF-8 and parse as CSV.
This is needed since Python 2's `csv` doesn't do Unicode.
Kudos: https://docs.python.org/2/library/csv.html#examples
:param unicode_csv_data: The Unicode stream to parse.
:param dialect: The CSV dialect to use.
:param kwargs: Any other parameters to pass to csv.reader.
:returns: An iterator
"""
# Encode temporarily as UTF-8:
utf8_csv_data = _utf_8_encoder(unicode_csv_data)
# Now we can parse!
csv_reader = csv.reader(utf8_csv_data, dialect=dialect, **kwargs)
# Decode UTF-8 back to Unicode, cell by cell:
return ([unicode(cell, 'utf-8') for cell in row] for row in csv_reader) |
def set_table(self, schema, **kwargs):
"""
add the table to the db
schema -- Schema() -- contains all the information about the table
"""
with self.connection(**kwargs) as connection:
kwargs['connection'] = connection
if self.has_table(str(schema), **kwargs): return True
try:
with self.transaction(**kwargs):
self._set_table(schema, **kwargs)
for index_name, index in schema.indexes.items():
self.set_index(
schema,
name=index.name,
fields=index.fields,
connection=connection,
**index.options
)
except InterfaceError:
# check to see if this table now exists, it might have been created
# in another thread
if not self.has_table(schema, **kwargs):
raise | def function[set_table, parameter[self, schema]]:
constant[
add the table to the db
schema -- Schema() -- contains all the information about the table
]
with call[name[self].connection, parameter[]] begin[:]
call[name[kwargs]][constant[connection]] assign[=] name[connection]
if call[name[self].has_table, parameter[call[name[str], parameter[name[schema]]]]] begin[:]
return[constant[True]]
<ast.Try object at 0x7da18f09f280> | keyword[def] identifier[set_table] ( identifier[self] , identifier[schema] ,** identifier[kwargs] ):
literal[string]
keyword[with] identifier[self] . identifier[connection] (** identifier[kwargs] ) keyword[as] identifier[connection] :
identifier[kwargs] [ literal[string] ]= identifier[connection]
keyword[if] identifier[self] . identifier[has_table] ( identifier[str] ( identifier[schema] ),** identifier[kwargs] ): keyword[return] keyword[True]
keyword[try] :
keyword[with] identifier[self] . identifier[transaction] (** identifier[kwargs] ):
identifier[self] . identifier[_set_table] ( identifier[schema] ,** identifier[kwargs] )
keyword[for] identifier[index_name] , identifier[index] keyword[in] identifier[schema] . identifier[indexes] . identifier[items] ():
identifier[self] . identifier[set_index] (
identifier[schema] ,
identifier[name] = identifier[index] . identifier[name] ,
identifier[fields] = identifier[index] . identifier[fields] ,
identifier[connection] = identifier[connection] ,
** identifier[index] . identifier[options]
)
keyword[except] identifier[InterfaceError] :
keyword[if] keyword[not] identifier[self] . identifier[has_table] ( identifier[schema] ,** identifier[kwargs] ):
keyword[raise] | def set_table(self, schema, **kwargs):
"""
add the table to the db
schema -- Schema() -- contains all the information about the table
"""
with self.connection(**kwargs) as connection:
kwargs['connection'] = connection
if self.has_table(str(schema), **kwargs):
return True # depends on [control=['if'], data=[]]
try:
with self.transaction(**kwargs):
self._set_table(schema, **kwargs)
for (index_name, index) in schema.indexes.items():
self.set_index(schema, name=index.name, fields=index.fields, connection=connection, **index.options) # depends on [control=['for'], data=[]] # depends on [control=['with'], data=[]] # depends on [control=['try'], data=[]]
except InterfaceError:
# check to see if this table now exists, it might have been created
# in another thread
if not self.has_table(schema, **kwargs):
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['with'], data=['connection']] |
def isEdgeCollection(cls, name) :
"""return true or false wether 'name' is the name of an edge collection."""
try :
col = cls.getCollectionClass(name)
return issubclass(col, Edges)
except KeyError :
return False | def function[isEdgeCollection, parameter[cls, name]]:
constant[return true or false wether 'name' is the name of an edge collection.]
<ast.Try object at 0x7da1b0f5a3e0> | keyword[def] identifier[isEdgeCollection] ( identifier[cls] , identifier[name] ):
literal[string]
keyword[try] :
identifier[col] = identifier[cls] . identifier[getCollectionClass] ( identifier[name] )
keyword[return] identifier[issubclass] ( identifier[col] , identifier[Edges] )
keyword[except] identifier[KeyError] :
keyword[return] keyword[False] | def isEdgeCollection(cls, name):
"""return true or false wether 'name' is the name of an edge collection."""
try:
col = cls.getCollectionClass(name)
return issubclass(col, Edges) # depends on [control=['try'], data=[]]
except KeyError:
return False # depends on [control=['except'], data=[]] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.