code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def encode_cf_variable(var, needs_copy=True, name=None):
"""
Converts an Variable into an Variable which follows some
of the CF conventions:
- Nans are masked using _FillValue (or the deprecated missing_value)
- Rescaling via: scale_factor and add_offset
- datetimes are converted to the CF 'units since time' format
- dtype encodings are enforced.
Parameters
----------
var : xarray.Variable
A variable holding un-encoded data.
Returns
-------
out : xarray.Variable
A variable which has been encoded as described above.
"""
ensure_not_multiindex(var, name=name)
for coder in [times.CFDatetimeCoder(),
times.CFTimedeltaCoder(),
variables.CFScaleOffsetCoder(),
variables.CFMaskCoder(),
variables.UnsignedIntegerCoder()]:
var = coder.encode(var, name=name)
# TODO(shoyer): convert all of these to use coders, too:
var = maybe_encode_nonstring_dtype(var, name=name)
var = maybe_default_fill_value(var)
var = maybe_encode_bools(var)
var = ensure_dtype_not_object(var, name=name)
return var | def function[encode_cf_variable, parameter[var, needs_copy, name]]:
constant[
Converts an Variable into an Variable which follows some
of the CF conventions:
- Nans are masked using _FillValue (or the deprecated missing_value)
- Rescaling via: scale_factor and add_offset
- datetimes are converted to the CF 'units since time' format
- dtype encodings are enforced.
Parameters
----------
var : xarray.Variable
A variable holding un-encoded data.
Returns
-------
out : xarray.Variable
A variable which has been encoded as described above.
]
call[name[ensure_not_multiindex], parameter[name[var]]]
for taget[name[coder]] in starred[list[[<ast.Call object at 0x7da20e956bf0>, <ast.Call object at 0x7da20e954e80>, <ast.Call object at 0x7da20e957d60>, <ast.Call object at 0x7da20e955690>, <ast.Call object at 0x7da20e9564d0>]]] begin[:]
variable[var] assign[=] call[name[coder].encode, parameter[name[var]]]
variable[var] assign[=] call[name[maybe_encode_nonstring_dtype], parameter[name[var]]]
variable[var] assign[=] call[name[maybe_default_fill_value], parameter[name[var]]]
variable[var] assign[=] call[name[maybe_encode_bools], parameter[name[var]]]
variable[var] assign[=] call[name[ensure_dtype_not_object], parameter[name[var]]]
return[name[var]] | keyword[def] identifier[encode_cf_variable] ( identifier[var] , identifier[needs_copy] = keyword[True] , identifier[name] = keyword[None] ):
literal[string]
identifier[ensure_not_multiindex] ( identifier[var] , identifier[name] = identifier[name] )
keyword[for] identifier[coder] keyword[in] [ identifier[times] . identifier[CFDatetimeCoder] (),
identifier[times] . identifier[CFTimedeltaCoder] (),
identifier[variables] . identifier[CFScaleOffsetCoder] (),
identifier[variables] . identifier[CFMaskCoder] (),
identifier[variables] . identifier[UnsignedIntegerCoder] ()]:
identifier[var] = identifier[coder] . identifier[encode] ( identifier[var] , identifier[name] = identifier[name] )
identifier[var] = identifier[maybe_encode_nonstring_dtype] ( identifier[var] , identifier[name] = identifier[name] )
identifier[var] = identifier[maybe_default_fill_value] ( identifier[var] )
identifier[var] = identifier[maybe_encode_bools] ( identifier[var] )
identifier[var] = identifier[ensure_dtype_not_object] ( identifier[var] , identifier[name] = identifier[name] )
keyword[return] identifier[var] | def encode_cf_variable(var, needs_copy=True, name=None):
"""
Converts an Variable into an Variable which follows some
of the CF conventions:
- Nans are masked using _FillValue (or the deprecated missing_value)
- Rescaling via: scale_factor and add_offset
- datetimes are converted to the CF 'units since time' format
- dtype encodings are enforced.
Parameters
----------
var : xarray.Variable
A variable holding un-encoded data.
Returns
-------
out : xarray.Variable
A variable which has been encoded as described above.
"""
ensure_not_multiindex(var, name=name)
for coder in [times.CFDatetimeCoder(), times.CFTimedeltaCoder(), variables.CFScaleOffsetCoder(), variables.CFMaskCoder(), variables.UnsignedIntegerCoder()]:
var = coder.encode(var, name=name) # depends on [control=['for'], data=['coder']]
# TODO(shoyer): convert all of these to use coders, too:
var = maybe_encode_nonstring_dtype(var, name=name)
var = maybe_default_fill_value(var)
var = maybe_encode_bools(var)
var = ensure_dtype_not_object(var, name=name)
return var |
def matlab_formatter(level, vertices, codes=None):
"""`MATLAB`_ style contour formatter.
Contours are returned as a single Nx2, `MATLAB`_ style, contour array.
There are two types of rows in this format:
* Header: The first element of a header row is the level of the contour
(the lower level for filled contours) and the second element is the
number of vertices (to follow) belonging to this contour line.
* Vertex: x,y coordinate pairs of the vertex.
A header row is always followed by the coresponding number of vertices.
Another header row may follow if there are more contour lines.
For filled contours the direction of vertices matters:
* CCW (ACW): The vertices give the exterior of a contour polygon.
* CW: The vertices give a hole of a contour polygon. This hole will
always be inside the exterior of the last contour exterior.
For further explanation of this format see the `Mathworks documentation
<https://www.mathworks.com/help/matlab/ref/contour-properties.html#prop_ContourMatrix>`_
noting that the MATLAB format used in the `contours` package is the
transpose of that used by `MATLAB`_ (since `MATLAB`_ is column-major
and `NumPy`_ is row-major by default).
.. _NumPy: http://www.numpy.org
.. _MATLAB: https://www.mathworks.com/products/matlab.html
"""
vertices = numpy_formatter(level, vertices, codes)
if codes is not None:
level = level[0]
headers = np.vstack((
[v.shape[0] for v in vertices],
[level]*len(vertices))).T
vertices = np.vstack(
list(it.__next__() for it in
itertools.cycle((iter(headers), iter(vertices)))))
return vertices | def function[matlab_formatter, parameter[level, vertices, codes]]:
constant[`MATLAB`_ style contour formatter.
Contours are returned as a single Nx2, `MATLAB`_ style, contour array.
There are two types of rows in this format:
* Header: The first element of a header row is the level of the contour
(the lower level for filled contours) and the second element is the
number of vertices (to follow) belonging to this contour line.
* Vertex: x,y coordinate pairs of the vertex.
A header row is always followed by the coresponding number of vertices.
Another header row may follow if there are more contour lines.
For filled contours the direction of vertices matters:
* CCW (ACW): The vertices give the exterior of a contour polygon.
* CW: The vertices give a hole of a contour polygon. This hole will
always be inside the exterior of the last contour exterior.
For further explanation of this format see the `Mathworks documentation
<https://www.mathworks.com/help/matlab/ref/contour-properties.html#prop_ContourMatrix>`_
noting that the MATLAB format used in the `contours` package is the
transpose of that used by `MATLAB`_ (since `MATLAB`_ is column-major
and `NumPy`_ is row-major by default).
.. _NumPy: http://www.numpy.org
.. _MATLAB: https://www.mathworks.com/products/matlab.html
]
variable[vertices] assign[=] call[name[numpy_formatter], parameter[name[level], name[vertices], name[codes]]]
if compare[name[codes] is_not constant[None]] begin[:]
variable[level] assign[=] call[name[level]][constant[0]]
variable[headers] assign[=] call[name[np].vstack, parameter[tuple[[<ast.ListComp object at 0x7da1b2347f40>, <ast.BinOp object at 0x7da18ede69b0>]]]].T
variable[vertices] assign[=] call[name[np].vstack, parameter[call[name[list], parameter[<ast.GeneratorExp object at 0x7da18ede79d0>]]]]
return[name[vertices]] | keyword[def] identifier[matlab_formatter] ( identifier[level] , identifier[vertices] , identifier[codes] = keyword[None] ):
literal[string]
identifier[vertices] = identifier[numpy_formatter] ( identifier[level] , identifier[vertices] , identifier[codes] )
keyword[if] identifier[codes] keyword[is] keyword[not] keyword[None] :
identifier[level] = identifier[level] [ literal[int] ]
identifier[headers] = identifier[np] . identifier[vstack] ((
[ identifier[v] . identifier[shape] [ literal[int] ] keyword[for] identifier[v] keyword[in] identifier[vertices] ],
[ identifier[level] ]* identifier[len] ( identifier[vertices] ))). identifier[T]
identifier[vertices] = identifier[np] . identifier[vstack] (
identifier[list] ( identifier[it] . identifier[__next__] () keyword[for] identifier[it] keyword[in]
identifier[itertools] . identifier[cycle] (( identifier[iter] ( identifier[headers] ), identifier[iter] ( identifier[vertices] )))))
keyword[return] identifier[vertices] | def matlab_formatter(level, vertices, codes=None):
"""`MATLAB`_ style contour formatter.
Contours are returned as a single Nx2, `MATLAB`_ style, contour array.
There are two types of rows in this format:
* Header: The first element of a header row is the level of the contour
(the lower level for filled contours) and the second element is the
number of vertices (to follow) belonging to this contour line.
* Vertex: x,y coordinate pairs of the vertex.
A header row is always followed by the coresponding number of vertices.
Another header row may follow if there are more contour lines.
For filled contours the direction of vertices matters:
* CCW (ACW): The vertices give the exterior of a contour polygon.
* CW: The vertices give a hole of a contour polygon. This hole will
always be inside the exterior of the last contour exterior.
For further explanation of this format see the `Mathworks documentation
<https://www.mathworks.com/help/matlab/ref/contour-properties.html#prop_ContourMatrix>`_
noting that the MATLAB format used in the `contours` package is the
transpose of that used by `MATLAB`_ (since `MATLAB`_ is column-major
and `NumPy`_ is row-major by default).
.. _NumPy: http://www.numpy.org
.. _MATLAB: https://www.mathworks.com/products/matlab.html
"""
vertices = numpy_formatter(level, vertices, codes)
if codes is not None:
level = level[0] # depends on [control=['if'], data=[]]
headers = np.vstack(([v.shape[0] for v in vertices], [level] * len(vertices))).T
vertices = np.vstack(list((it.__next__() for it in itertools.cycle((iter(headers), iter(vertices))))))
return vertices |
def _get_dvs_infrastructure_traffic_resources(dvs_name,
dvs_infra_traffic_ress):
'''
Returns a list of dict representations of the DVS infrastructure traffic
resource
dvs_name
The name of the DVS
dvs_infra_traffic_ress
The DVS infrastructure traffic resources
'''
log.trace('Building the dicts of the DVS \'%s\' infrastructure '
'traffic resources', dvs_name)
res_dicts = []
for res in dvs_infra_traffic_ress:
res_dict = {'key': res.key,
'limit': res.allocationInfo.limit,
'reservation': res.allocationInfo.reservation}
if res.allocationInfo.shares:
res_dict.update({'num_shares': res.allocationInfo.shares.shares,
'share_level': res.allocationInfo.shares.level})
res_dicts.append(res_dict)
return res_dicts | def function[_get_dvs_infrastructure_traffic_resources, parameter[dvs_name, dvs_infra_traffic_ress]]:
constant[
Returns a list of dict representations of the DVS infrastructure traffic
resource
dvs_name
The name of the DVS
dvs_infra_traffic_ress
The DVS infrastructure traffic resources
]
call[name[log].trace, parameter[constant[Building the dicts of the DVS '%s' infrastructure traffic resources], name[dvs_name]]]
variable[res_dicts] assign[=] list[[]]
for taget[name[res]] in starred[name[dvs_infra_traffic_ress]] begin[:]
variable[res_dict] assign[=] dictionary[[<ast.Constant object at 0x7da2041d80d0>, <ast.Constant object at 0x7da2041da5c0>, <ast.Constant object at 0x7da2041d9f00>], [<ast.Attribute object at 0x7da2041da9b0>, <ast.Attribute object at 0x7da2041dacb0>, <ast.Attribute object at 0x7da2041da530>]]
if name[res].allocationInfo.shares begin[:]
call[name[res_dict].update, parameter[dictionary[[<ast.Constant object at 0x7da2041dba60>, <ast.Constant object at 0x7da2041da0b0>], [<ast.Attribute object at 0x7da2041da710>, <ast.Attribute object at 0x7da2041d9c30>]]]]
call[name[res_dicts].append, parameter[name[res_dict]]]
return[name[res_dicts]] | keyword[def] identifier[_get_dvs_infrastructure_traffic_resources] ( identifier[dvs_name] ,
identifier[dvs_infra_traffic_ress] ):
literal[string]
identifier[log] . identifier[trace] ( literal[string]
literal[string] , identifier[dvs_name] )
identifier[res_dicts] =[]
keyword[for] identifier[res] keyword[in] identifier[dvs_infra_traffic_ress] :
identifier[res_dict] ={ literal[string] : identifier[res] . identifier[key] ,
literal[string] : identifier[res] . identifier[allocationInfo] . identifier[limit] ,
literal[string] : identifier[res] . identifier[allocationInfo] . identifier[reservation] }
keyword[if] identifier[res] . identifier[allocationInfo] . identifier[shares] :
identifier[res_dict] . identifier[update] ({ literal[string] : identifier[res] . identifier[allocationInfo] . identifier[shares] . identifier[shares] ,
literal[string] : identifier[res] . identifier[allocationInfo] . identifier[shares] . identifier[level] })
identifier[res_dicts] . identifier[append] ( identifier[res_dict] )
keyword[return] identifier[res_dicts] | def _get_dvs_infrastructure_traffic_resources(dvs_name, dvs_infra_traffic_ress):
"""
Returns a list of dict representations of the DVS infrastructure traffic
resource
dvs_name
The name of the DVS
dvs_infra_traffic_ress
The DVS infrastructure traffic resources
"""
log.trace("Building the dicts of the DVS '%s' infrastructure traffic resources", dvs_name)
res_dicts = []
for res in dvs_infra_traffic_ress:
res_dict = {'key': res.key, 'limit': res.allocationInfo.limit, 'reservation': res.allocationInfo.reservation}
if res.allocationInfo.shares:
res_dict.update({'num_shares': res.allocationInfo.shares.shares, 'share_level': res.allocationInfo.shares.level}) # depends on [control=['if'], data=[]]
res_dicts.append(res_dict) # depends on [control=['for'], data=['res']]
return res_dicts |
def BindVar(self, var_id, value):
"""Associates a value with given variable.
This can be called multiple times to associate multiple values.
Args:
var_id: A variable id to bind the values to.
value: A value to bind to the specified variable.
Raises:
KeyError: If given variable is not specified in the pattern.
"""
if var_id not in self._vars:
raise KeyError(var_id)
self._var_bindings[var_id].append(value) | def function[BindVar, parameter[self, var_id, value]]:
constant[Associates a value with given variable.
This can be called multiple times to associate multiple values.
Args:
var_id: A variable id to bind the values to.
value: A value to bind to the specified variable.
Raises:
KeyError: If given variable is not specified in the pattern.
]
if compare[name[var_id] <ast.NotIn object at 0x7da2590d7190> name[self]._vars] begin[:]
<ast.Raise object at 0x7da18dc06800>
call[call[name[self]._var_bindings][name[var_id]].append, parameter[name[value]]] | keyword[def] identifier[BindVar] ( identifier[self] , identifier[var_id] , identifier[value] ):
literal[string]
keyword[if] identifier[var_id] keyword[not] keyword[in] identifier[self] . identifier[_vars] :
keyword[raise] identifier[KeyError] ( identifier[var_id] )
identifier[self] . identifier[_var_bindings] [ identifier[var_id] ]. identifier[append] ( identifier[value] ) | def BindVar(self, var_id, value):
"""Associates a value with given variable.
This can be called multiple times to associate multiple values.
Args:
var_id: A variable id to bind the values to.
value: A value to bind to the specified variable.
Raises:
KeyError: If given variable is not specified in the pattern.
"""
if var_id not in self._vars:
raise KeyError(var_id) # depends on [control=['if'], data=['var_id']]
self._var_bindings[var_id].append(value) |
def install_package(package,
wheels_path,
venv=None,
requirement_files=None,
upgrade=False,
install_args=None):
"""Install a Python package.
Can specify a specific version.
Can specify a prerelease.
Can specify a venv to install in.
Can specify a list of paths or urls to requirement txt files.
Can specify a local wheels_path to use for offline installation.
Can request an upgrade.
"""
requirement_files = requirement_files or []
logger.info('Installing %s...', package)
if venv and not os.path.isdir(venv):
raise WagonError('virtualenv {0} does not exist'.format(venv))
pip_command = _construct_pip_command(
package,
wheels_path,
venv,
requirement_files,
upgrade,
install_args)
if IS_VIRTUALENV and not venv:
logger.info('Installing within current virtualenv')
result = _run(pip_command)
if not result.returncode == 0:
raise WagonError('Could not install package: {0} ({1})'.format(
package, result.aggr_stderr)) | def function[install_package, parameter[package, wheels_path, venv, requirement_files, upgrade, install_args]]:
constant[Install a Python package.
Can specify a specific version.
Can specify a prerelease.
Can specify a venv to install in.
Can specify a list of paths or urls to requirement txt files.
Can specify a local wheels_path to use for offline installation.
Can request an upgrade.
]
variable[requirement_files] assign[=] <ast.BoolOp object at 0x7da1b0dbdc00>
call[name[logger].info, parameter[constant[Installing %s...], name[package]]]
if <ast.BoolOp object at 0x7da1b0dbc250> begin[:]
<ast.Raise object at 0x7da1b0dbf370>
variable[pip_command] assign[=] call[name[_construct_pip_command], parameter[name[package], name[wheels_path], name[venv], name[requirement_files], name[upgrade], name[install_args]]]
if <ast.BoolOp object at 0x7da1b0dbefe0> begin[:]
call[name[logger].info, parameter[constant[Installing within current virtualenv]]]
variable[result] assign[=] call[name[_run], parameter[name[pip_command]]]
if <ast.UnaryOp object at 0x7da1b0dbdc90> begin[:]
<ast.Raise object at 0x7da1b0dbf2b0> | keyword[def] identifier[install_package] ( identifier[package] ,
identifier[wheels_path] ,
identifier[venv] = keyword[None] ,
identifier[requirement_files] = keyword[None] ,
identifier[upgrade] = keyword[False] ,
identifier[install_args] = keyword[None] ):
literal[string]
identifier[requirement_files] = identifier[requirement_files] keyword[or] []
identifier[logger] . identifier[info] ( literal[string] , identifier[package] )
keyword[if] identifier[venv] keyword[and] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[venv] ):
keyword[raise] identifier[WagonError] ( literal[string] . identifier[format] ( identifier[venv] ))
identifier[pip_command] = identifier[_construct_pip_command] (
identifier[package] ,
identifier[wheels_path] ,
identifier[venv] ,
identifier[requirement_files] ,
identifier[upgrade] ,
identifier[install_args] )
keyword[if] identifier[IS_VIRTUALENV] keyword[and] keyword[not] identifier[venv] :
identifier[logger] . identifier[info] ( literal[string] )
identifier[result] = identifier[_run] ( identifier[pip_command] )
keyword[if] keyword[not] identifier[result] . identifier[returncode] == literal[int] :
keyword[raise] identifier[WagonError] ( literal[string] . identifier[format] (
identifier[package] , identifier[result] . identifier[aggr_stderr] )) | def install_package(package, wheels_path, venv=None, requirement_files=None, upgrade=False, install_args=None):
"""Install a Python package.
Can specify a specific version.
Can specify a prerelease.
Can specify a venv to install in.
Can specify a list of paths or urls to requirement txt files.
Can specify a local wheels_path to use for offline installation.
Can request an upgrade.
"""
requirement_files = requirement_files or []
logger.info('Installing %s...', package)
if venv and (not os.path.isdir(venv)):
raise WagonError('virtualenv {0} does not exist'.format(venv)) # depends on [control=['if'], data=[]]
pip_command = _construct_pip_command(package, wheels_path, venv, requirement_files, upgrade, install_args)
if IS_VIRTUALENV and (not venv):
logger.info('Installing within current virtualenv') # depends on [control=['if'], data=[]]
result = _run(pip_command)
if not result.returncode == 0:
raise WagonError('Could not install package: {0} ({1})'.format(package, result.aggr_stderr)) # depends on [control=['if'], data=[]] |
def enableSync(self, url, definition = None):
"""Enables Sync capability for an AGOL feature service.
Args:
url (str): The URL of the feature service.
definition (dict): A dictionary containing valid definition values. Defaults to ``None``.
Returns:
dict: The result from :py:func:`arcrest.hostedservice.service.AdminFeatureService.updateDefinition`.
"""
adminFS = AdminFeatureService(url=url, securityHandler=self._securityHandler)
cap = str(adminFS.capabilities)
existingDef = {}
enableResults = 'skipped'
if 'Sync' in cap:
return "Sync is already enabled"
else:
capItems = cap.split(',')
capItems.append('Sync')
existingDef['capabilities'] = ','.join(capItems)
enableResults = adminFS.updateDefinition(json_dict=existingDef)
if 'error' in enableResults:
return enableResults['error']
adminFS = None
del adminFS
return enableResults | def function[enableSync, parameter[self, url, definition]]:
constant[Enables Sync capability for an AGOL feature service.
Args:
url (str): The URL of the feature service.
definition (dict): A dictionary containing valid definition values. Defaults to ``None``.
Returns:
dict: The result from :py:func:`arcrest.hostedservice.service.AdminFeatureService.updateDefinition`.
]
variable[adminFS] assign[=] call[name[AdminFeatureService], parameter[]]
variable[cap] assign[=] call[name[str], parameter[name[adminFS].capabilities]]
variable[existingDef] assign[=] dictionary[[], []]
variable[enableResults] assign[=] constant[skipped]
if compare[constant[Sync] in name[cap]] begin[:]
return[constant[Sync is already enabled]]
variable[adminFS] assign[=] constant[None]
<ast.Delete object at 0x7da1b12f15d0>
return[name[enableResults]] | keyword[def] identifier[enableSync] ( identifier[self] , identifier[url] , identifier[definition] = keyword[None] ):
literal[string]
identifier[adminFS] = identifier[AdminFeatureService] ( identifier[url] = identifier[url] , identifier[securityHandler] = identifier[self] . identifier[_securityHandler] )
identifier[cap] = identifier[str] ( identifier[adminFS] . identifier[capabilities] )
identifier[existingDef] ={}
identifier[enableResults] = literal[string]
keyword[if] literal[string] keyword[in] identifier[cap] :
keyword[return] literal[string]
keyword[else] :
identifier[capItems] = identifier[cap] . identifier[split] ( literal[string] )
identifier[capItems] . identifier[append] ( literal[string] )
identifier[existingDef] [ literal[string] ]= literal[string] . identifier[join] ( identifier[capItems] )
identifier[enableResults] = identifier[adminFS] . identifier[updateDefinition] ( identifier[json_dict] = identifier[existingDef] )
keyword[if] literal[string] keyword[in] identifier[enableResults] :
keyword[return] identifier[enableResults] [ literal[string] ]
identifier[adminFS] = keyword[None]
keyword[del] identifier[adminFS]
keyword[return] identifier[enableResults] | def enableSync(self, url, definition=None):
"""Enables Sync capability for an AGOL feature service.
Args:
url (str): The URL of the feature service.
definition (dict): A dictionary containing valid definition values. Defaults to ``None``.
Returns:
dict: The result from :py:func:`arcrest.hostedservice.service.AdminFeatureService.updateDefinition`.
"""
adminFS = AdminFeatureService(url=url, securityHandler=self._securityHandler)
cap = str(adminFS.capabilities)
existingDef = {}
enableResults = 'skipped'
if 'Sync' in cap:
return 'Sync is already enabled' # depends on [control=['if'], data=[]]
else:
capItems = cap.split(',')
capItems.append('Sync')
existingDef['capabilities'] = ','.join(capItems)
enableResults = adminFS.updateDefinition(json_dict=existingDef)
if 'error' in enableResults:
return enableResults['error'] # depends on [control=['if'], data=['enableResults']]
adminFS = None
del adminFS
return enableResults |
async def move_rel(self, mount: top_types.Mount, delta: top_types.Point,
speed: float = None):
""" Move the critical point of the specified mount by a specified
displacement in a specified direction, at the specified speed.
'speed' sets the speed of all axes to the given value. So, if multiple
axes are to be moved, they will do so at the same speed
"""
if not self._current_position:
raise MustHomeError
await self._cache_and_maybe_retract_mount(mount)
z_axis = Axis.by_mount(mount)
try:
target_position = OrderedDict(
((Axis.X,
self._current_position[Axis.X] + delta.x),
(Axis.Y,
self._current_position[Axis.Y] + delta.y),
(z_axis,
self._current_position[z_axis] + delta.z))
)
except KeyError:
raise MustHomeError
await self._move(target_position, speed=speed) | <ast.AsyncFunctionDef object at 0x7da204345540> | keyword[async] keyword[def] identifier[move_rel] ( identifier[self] , identifier[mount] : identifier[top_types] . identifier[Mount] , identifier[delta] : identifier[top_types] . identifier[Point] ,
identifier[speed] : identifier[float] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_current_position] :
keyword[raise] identifier[MustHomeError]
keyword[await] identifier[self] . identifier[_cache_and_maybe_retract_mount] ( identifier[mount] )
identifier[z_axis] = identifier[Axis] . identifier[by_mount] ( identifier[mount] )
keyword[try] :
identifier[target_position] = identifier[OrderedDict] (
(( identifier[Axis] . identifier[X] ,
identifier[self] . identifier[_current_position] [ identifier[Axis] . identifier[X] ]+ identifier[delta] . identifier[x] ),
( identifier[Axis] . identifier[Y] ,
identifier[self] . identifier[_current_position] [ identifier[Axis] . identifier[Y] ]+ identifier[delta] . identifier[y] ),
( identifier[z_axis] ,
identifier[self] . identifier[_current_position] [ identifier[z_axis] ]+ identifier[delta] . identifier[z] ))
)
keyword[except] identifier[KeyError] :
keyword[raise] identifier[MustHomeError]
keyword[await] identifier[self] . identifier[_move] ( identifier[target_position] , identifier[speed] = identifier[speed] ) | async def move_rel(self, mount: top_types.Mount, delta: top_types.Point, speed: float=None):
""" Move the critical point of the specified mount by a specified
displacement in a specified direction, at the specified speed.
'speed' sets the speed of all axes to the given value. So, if multiple
axes are to be moved, they will do so at the same speed
"""
if not self._current_position:
raise MustHomeError # depends on [control=['if'], data=[]]
await self._cache_and_maybe_retract_mount(mount)
z_axis = Axis.by_mount(mount)
try:
target_position = OrderedDict(((Axis.X, self._current_position[Axis.X] + delta.x), (Axis.Y, self._current_position[Axis.Y] + delta.y), (z_axis, self._current_position[z_axis] + delta.z))) # depends on [control=['try'], data=[]]
except KeyError:
raise MustHomeError # depends on [control=['except'], data=[]]
await self._move(target_position, speed=speed) |
def sort_depth(vals, reverse=False):
"""Sort bids or asks by price
"""
lst = [[float(price), quantity] for price, quantity in vals.items()]
lst = sorted(lst, key=itemgetter(0), reverse=reverse)
return lst | def function[sort_depth, parameter[vals, reverse]]:
constant[Sort bids or asks by price
]
variable[lst] assign[=] <ast.ListComp object at 0x7da2054a6320>
variable[lst] assign[=] call[name[sorted], parameter[name[lst]]]
return[name[lst]] | keyword[def] identifier[sort_depth] ( identifier[vals] , identifier[reverse] = keyword[False] ):
literal[string]
identifier[lst] =[[ identifier[float] ( identifier[price] ), identifier[quantity] ] keyword[for] identifier[price] , identifier[quantity] keyword[in] identifier[vals] . identifier[items] ()]
identifier[lst] = identifier[sorted] ( identifier[lst] , identifier[key] = identifier[itemgetter] ( literal[int] ), identifier[reverse] = identifier[reverse] )
keyword[return] identifier[lst] | def sort_depth(vals, reverse=False):
"""Sort bids or asks by price
"""
lst = [[float(price), quantity] for (price, quantity) in vals.items()]
lst = sorted(lst, key=itemgetter(0), reverse=reverse)
return lst |
def name():
"""
Generates a random person's name which has the following structure
<optional prefix> <first name> <second name> <optional suffix>
:return: a random name.
"""
result = ""
if RandomBoolean.chance(3, 5):
result += random.choice(_name_prefixes) + " "
result += random.choice(_first_names) + " " + random.choice(_last_names)
if RandomBoolean.chance(5, 10):
result += " " + random.choice(_name_suffixes)
return result | def function[name, parameter[]]:
constant[
Generates a random person's name which has the following structure
<optional prefix> <first name> <second name> <optional suffix>
:return: a random name.
]
variable[result] assign[=] constant[]
if call[name[RandomBoolean].chance, parameter[constant[3], constant[5]]] begin[:]
<ast.AugAssign object at 0x7da18fe90a00>
<ast.AugAssign object at 0x7da18fe917e0>
if call[name[RandomBoolean].chance, parameter[constant[5], constant[10]]] begin[:]
<ast.AugAssign object at 0x7da18fe910c0>
return[name[result]] | keyword[def] identifier[name] ():
literal[string]
identifier[result] = literal[string]
keyword[if] identifier[RandomBoolean] . identifier[chance] ( literal[int] , literal[int] ):
identifier[result] += identifier[random] . identifier[choice] ( identifier[_name_prefixes] )+ literal[string]
identifier[result] += identifier[random] . identifier[choice] ( identifier[_first_names] )+ literal[string] + identifier[random] . identifier[choice] ( identifier[_last_names] )
keyword[if] identifier[RandomBoolean] . identifier[chance] ( literal[int] , literal[int] ):
identifier[result] += literal[string] + identifier[random] . identifier[choice] ( identifier[_name_suffixes] )
keyword[return] identifier[result] | def name():
"""
Generates a random person's name which has the following structure
<optional prefix> <first name> <second name> <optional suffix>
:return: a random name.
"""
result = ''
if RandomBoolean.chance(3, 5):
result += random.choice(_name_prefixes) + ' ' # depends on [control=['if'], data=[]]
result += random.choice(_first_names) + ' ' + random.choice(_last_names)
if RandomBoolean.chance(5, 10):
result += ' ' + random.choice(_name_suffixes) # depends on [control=['if'], data=[]]
return result |
def fetch(self, is_dl_forced=False):
"""
Note there is a unpublished mydrug client that works like this:
from mydrug import MyDrugInfo
md = MyDrugInfo()
r = list(md.query('_exists_:aeolus', fetch_all=True))
:param is_dl_forced: boolean, force download
:return:
"""
dir_path = Path(self.rawdir)
aeolus_file = dir_path / self.files['aeolus']['file']
if self.checkIfRemoteIsNewer(aeolus_file):
aeolis_fh = aeolus_file.open('w')
aeolis_fh.write("[\n")
params = {
'q': '_exists_:aeolus',
'from': 0,
'rows': 10
}
result_count = params['rows']
while params['from'] < result_count:
solr_request = requests.get(self.MY_DRUG_API, params=params)
response = solr_request.json()
for index, doc in enumerate(response['hits']):
if params['from'] == 0 and index == 0:
aeolis_fh.write("{}".format(json.dumps(doc)))
else:
aeolis_fh.write(",\n{}".format(json.dumps(doc)))
if params['from'] % 500 == 0:
LOG.info("Fetched %s documents", params['from'])
result_count = response['total']
params['from'] += params['rows']
aeolis_fh.write("\n]")
aeolis_fh.close() | def function[fetch, parameter[self, is_dl_forced]]:
constant[
Note there is a unpublished mydrug client that works like this:
from mydrug import MyDrugInfo
md = MyDrugInfo()
r = list(md.query('_exists_:aeolus', fetch_all=True))
:param is_dl_forced: boolean, force download
:return:
]
variable[dir_path] assign[=] call[name[Path], parameter[name[self].rawdir]]
variable[aeolus_file] assign[=] binary_operation[name[dir_path] / call[call[name[self].files][constant[aeolus]]][constant[file]]]
if call[name[self].checkIfRemoteIsNewer, parameter[name[aeolus_file]]] begin[:]
variable[aeolis_fh] assign[=] call[name[aeolus_file].open, parameter[constant[w]]]
call[name[aeolis_fh].write, parameter[constant[[
]]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da18eb57010>, <ast.Constant object at 0x7da18eb57310>, <ast.Constant object at 0x7da18eb57e80>], [<ast.Constant object at 0x7da18eb54be0>, <ast.Constant object at 0x7da18eb547f0>, <ast.Constant object at 0x7da18eb54dc0>]]
variable[result_count] assign[=] call[name[params]][constant[rows]]
while compare[call[name[params]][constant[from]] less[<] name[result_count]] begin[:]
variable[solr_request] assign[=] call[name[requests].get, parameter[name[self].MY_DRUG_API]]
variable[response] assign[=] call[name[solr_request].json, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da18eb55a20>, <ast.Name object at 0x7da18eb559f0>]]] in starred[call[name[enumerate], parameter[call[name[response]][constant[hits]]]]] begin[:]
if <ast.BoolOp object at 0x7da18eb553f0> begin[:]
call[name[aeolis_fh].write, parameter[call[constant[{}].format, parameter[call[name[json].dumps, parameter[name[doc]]]]]]]
if compare[binary_operation[call[name[params]][constant[from]] <ast.Mod object at 0x7da2590d6920> constant[500]] equal[==] constant[0]] begin[:]
call[name[LOG].info, parameter[constant[Fetched %s documents], call[name[params]][constant[from]]]]
variable[result_count] assign[=] call[name[response]][constant[total]]
<ast.AugAssign object at 0x7da18eb57580>
call[name[aeolis_fh].write, parameter[constant[
]]]]
call[name[aeolis_fh].close, parameter[]] | keyword[def] identifier[fetch] ( identifier[self] , identifier[is_dl_forced] = keyword[False] ):
literal[string]
identifier[dir_path] = identifier[Path] ( identifier[self] . identifier[rawdir] )
identifier[aeolus_file] = identifier[dir_path] / identifier[self] . identifier[files] [ literal[string] ][ literal[string] ]
keyword[if] identifier[self] . identifier[checkIfRemoteIsNewer] ( identifier[aeolus_file] ):
identifier[aeolis_fh] = identifier[aeolus_file] . identifier[open] ( literal[string] )
identifier[aeolis_fh] . identifier[write] ( literal[string] )
identifier[params] ={
literal[string] : literal[string] ,
literal[string] : literal[int] ,
literal[string] : literal[int]
}
identifier[result_count] = identifier[params] [ literal[string] ]
keyword[while] identifier[params] [ literal[string] ]< identifier[result_count] :
identifier[solr_request] = identifier[requests] . identifier[get] ( identifier[self] . identifier[MY_DRUG_API] , identifier[params] = identifier[params] )
identifier[response] = identifier[solr_request] . identifier[json] ()
keyword[for] identifier[index] , identifier[doc] keyword[in] identifier[enumerate] ( identifier[response] [ literal[string] ]):
keyword[if] identifier[params] [ literal[string] ]== literal[int] keyword[and] identifier[index] == literal[int] :
identifier[aeolis_fh] . identifier[write] ( literal[string] . identifier[format] ( identifier[json] . identifier[dumps] ( identifier[doc] )))
keyword[else] :
identifier[aeolis_fh] . identifier[write] ( literal[string] . identifier[format] ( identifier[json] . identifier[dumps] ( identifier[doc] )))
keyword[if] identifier[params] [ literal[string] ]% literal[int] == literal[int] :
identifier[LOG] . identifier[info] ( literal[string] , identifier[params] [ literal[string] ])
identifier[result_count] = identifier[response] [ literal[string] ]
identifier[params] [ literal[string] ]+= identifier[params] [ literal[string] ]
identifier[aeolis_fh] . identifier[write] ( literal[string] )
identifier[aeolis_fh] . identifier[close] () | def fetch(self, is_dl_forced=False):
"""
Note there is a unpublished mydrug client that works like this:
from mydrug import MyDrugInfo
md = MyDrugInfo()
r = list(md.query('_exists_:aeolus', fetch_all=True))
:param is_dl_forced: boolean, force download
:return:
"""
dir_path = Path(self.rawdir)
aeolus_file = dir_path / self.files['aeolus']['file']
if self.checkIfRemoteIsNewer(aeolus_file):
aeolis_fh = aeolus_file.open('w')
aeolis_fh.write('[\n')
params = {'q': '_exists_:aeolus', 'from': 0, 'rows': 10}
result_count = params['rows']
while params['from'] < result_count:
solr_request = requests.get(self.MY_DRUG_API, params=params)
response = solr_request.json()
for (index, doc) in enumerate(response['hits']):
if params['from'] == 0 and index == 0:
aeolis_fh.write('{}'.format(json.dumps(doc))) # depends on [control=['if'], data=[]]
else:
aeolis_fh.write(',\n{}'.format(json.dumps(doc))) # depends on [control=['for'], data=[]]
if params['from'] % 500 == 0:
LOG.info('Fetched %s documents', params['from']) # depends on [control=['if'], data=[]]
result_count = response['total']
params['from'] += params['rows'] # depends on [control=['while'], data=['result_count']]
aeolis_fh.write('\n]')
aeolis_fh.close() # depends on [control=['if'], data=[]] |
def to_query(self):
"""
Returns a json-serializable representation.
"""
query = {}
for field_instance in self.fields:
query.update(field_instance.to_query())
return query | def function[to_query, parameter[self]]:
constant[
Returns a json-serializable representation.
]
variable[query] assign[=] dictionary[[], []]
for taget[name[field_instance]] in starred[name[self].fields] begin[:]
call[name[query].update, parameter[call[name[field_instance].to_query, parameter[]]]]
return[name[query]] | keyword[def] identifier[to_query] ( identifier[self] ):
literal[string]
identifier[query] ={}
keyword[for] identifier[field_instance] keyword[in] identifier[self] . identifier[fields] :
identifier[query] . identifier[update] ( identifier[field_instance] . identifier[to_query] ())
keyword[return] identifier[query] | def to_query(self):
"""
Returns a json-serializable representation.
"""
query = {}
for field_instance in self.fields:
query.update(field_instance.to_query()) # depends on [control=['for'], data=['field_instance']]
return query |
def create(cls, data, id_=None, **kwargs):
r"""Create a new record instance and store it in the database.
#. Send a signal :data:`invenio_records.signals.before_record_insert`
with the new record as parameter.
#. Validate the new record data.
#. Add the new record in the database.
#. Send a signal :data:`invenio_records.signals.after_record_insert`
with the new created record as parameter.
:Keyword Arguments:
* **format_checker** --
An instance of the class :class:`jsonschema.FormatChecker`, which
contains validation rules for formats. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
* **validator** --
A :class:`jsonschema.IValidator` class that will be used to
validate the record. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
:param data: Dict with the record metadata.
:param id_: Specify a UUID to use for the new record, instead of
automatically generated.
:returns: A new :class:`Record` instance.
"""
from .models import RecordMetadata
with db.session.begin_nested():
record = cls(data)
before_record_insert.send(
current_app._get_current_object(),
record=record
)
record.validate(**kwargs)
record.model = RecordMetadata(id=id_, json=record)
db.session.add(record.model)
after_record_insert.send(
current_app._get_current_object(),
record=record
)
return record | def function[create, parameter[cls, data, id_]]:
constant[Create a new record instance and store it in the database.
#. Send a signal :data:`invenio_records.signals.before_record_insert`
with the new record as parameter.
#. Validate the new record data.
#. Add the new record in the database.
#. Send a signal :data:`invenio_records.signals.after_record_insert`
with the new created record as parameter.
:Keyword Arguments:
* **format_checker** --
An instance of the class :class:`jsonschema.FormatChecker`, which
contains validation rules for formats. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
* **validator** --
A :class:`jsonschema.IValidator` class that will be used to
validate the record. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
:param data: Dict with the record metadata.
:param id_: Specify a UUID to use for the new record, instead of
automatically generated.
:returns: A new :class:`Record` instance.
]
from relative_module[models] import module[RecordMetadata]
with call[name[db].session.begin_nested, parameter[]] begin[:]
variable[record] assign[=] call[name[cls], parameter[name[data]]]
call[name[before_record_insert].send, parameter[call[name[current_app]._get_current_object, parameter[]]]]
call[name[record].validate, parameter[]]
name[record].model assign[=] call[name[RecordMetadata], parameter[]]
call[name[db].session.add, parameter[name[record].model]]
call[name[after_record_insert].send, parameter[call[name[current_app]._get_current_object, parameter[]]]]
return[name[record]] | keyword[def] identifier[create] ( identifier[cls] , identifier[data] , identifier[id_] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[from] . identifier[models] keyword[import] identifier[RecordMetadata]
keyword[with] identifier[db] . identifier[session] . identifier[begin_nested] ():
identifier[record] = identifier[cls] ( identifier[data] )
identifier[before_record_insert] . identifier[send] (
identifier[current_app] . identifier[_get_current_object] (),
identifier[record] = identifier[record]
)
identifier[record] . identifier[validate] (** identifier[kwargs] )
identifier[record] . identifier[model] = identifier[RecordMetadata] ( identifier[id] = identifier[id_] , identifier[json] = identifier[record] )
identifier[db] . identifier[session] . identifier[add] ( identifier[record] . identifier[model] )
identifier[after_record_insert] . identifier[send] (
identifier[current_app] . identifier[_get_current_object] (),
identifier[record] = identifier[record]
)
keyword[return] identifier[record] | def create(cls, data, id_=None, **kwargs):
"""Create a new record instance and store it in the database.
#. Send a signal :data:`invenio_records.signals.before_record_insert`
with the new record as parameter.
#. Validate the new record data.
#. Add the new record in the database.
#. Send a signal :data:`invenio_records.signals.after_record_insert`
with the new created record as parameter.
:Keyword Arguments:
* **format_checker** --
An instance of the class :class:`jsonschema.FormatChecker`, which
contains validation rules for formats. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
* **validator** --
A :class:`jsonschema.IValidator` class that will be used to
validate the record. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
:param data: Dict with the record metadata.
:param id_: Specify a UUID to use for the new record, instead of
automatically generated.
:returns: A new :class:`Record` instance.
"""
from .models import RecordMetadata
with db.session.begin_nested():
record = cls(data)
before_record_insert.send(current_app._get_current_object(), record=record)
record.validate(**kwargs)
record.model = RecordMetadata(id=id_, json=record)
db.session.add(record.model) # depends on [control=['with'], data=[]]
after_record_insert.send(current_app._get_current_object(), record=record)
return record |
def clear_numeric_score_increment(self):
"""Clears the numeric score increment.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.grading.GradeSystemForm.clear_lowest_numeric_score
if (self.get_numeric_score_increment_metadata().is_read_only() or
self.get_numeric_score_increment_metadata().is_required()):
raise errors.NoAccess()
self._my_map['numericScoreIncrement'] = self._numeric_score_increment_default | def function[clear_numeric_score_increment, parameter[self]]:
constant[Clears the numeric score increment.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
]
if <ast.BoolOp object at 0x7da1b26ae170> begin[:]
<ast.Raise object at 0x7da1b26af2b0>
call[name[self]._my_map][constant[numericScoreIncrement]] assign[=] name[self]._numeric_score_increment_default | keyword[def] identifier[clear_numeric_score_increment] ( identifier[self] ):
literal[string]
keyword[if] ( identifier[self] . identifier[get_numeric_score_increment_metadata] (). identifier[is_read_only] () keyword[or]
identifier[self] . identifier[get_numeric_score_increment_metadata] (). identifier[is_required] ()):
keyword[raise] identifier[errors] . identifier[NoAccess] ()
identifier[self] . identifier[_my_map] [ literal[string] ]= identifier[self] . identifier[_numeric_score_increment_default] | def clear_numeric_score_increment(self):
"""Clears the numeric score increment.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.grading.GradeSystemForm.clear_lowest_numeric_score
if self.get_numeric_score_increment_metadata().is_read_only() or self.get_numeric_score_increment_metadata().is_required():
raise errors.NoAccess() # depends on [control=['if'], data=[]]
self._my_map['numericScoreIncrement'] = self._numeric_score_increment_default |
def filter_short(terms):
'''
only keep if brute-force possibilities are greater than this word's rank in the dictionary
'''
return [term for i, term in enumerate(terms) if 26**(len(term)) > i] | def function[filter_short, parameter[terms]]:
constant[
only keep if brute-force possibilities are greater than this word's rank in the dictionary
]
return[<ast.ListComp object at 0x7da1b0a81150>] | keyword[def] identifier[filter_short] ( identifier[terms] ):
literal[string]
keyword[return] [ identifier[term] keyword[for] identifier[i] , identifier[term] keyword[in] identifier[enumerate] ( identifier[terms] ) keyword[if] literal[int] **( identifier[len] ( identifier[term] ))> identifier[i] ] | def filter_short(terms):
"""
only keep if brute-force possibilities are greater than this word's rank in the dictionary
"""
return [term for (i, term) in enumerate(terms) if 26 ** len(term) > i] |
def _parse_variable(self, parent, patch_nml=None):
"""Parse a variable and return its name and values."""
if not patch_nml:
patch_nml = Namelist()
v_name = self.prior_token
v_values = []
# Patch state
patch_values = None
# Derived type parent index (see notes below)
dt_idx = None
if self.token == '(':
v_idx_bounds = self._parse_indices()
v_idx = FIndex(v_idx_bounds, self.global_start_index)
# Update starting index against namelist record
if v_name.lower() in parent.start_index:
p_idx = parent.start_index[v_name.lower()]
for idx, pv in enumerate(zip(p_idx, v_idx.first)):
if all(i is None for i in pv):
i_first = None
else:
i_first = min(i for i in pv if i is not None)
v_idx.first[idx] = i_first
# Resize vector based on starting index
for i_p, i_v in zip(p_idx, v_idx.first):
if i_p is not None and i_v is not None and i_v < i_p:
pad = [None for _ in range(i_p - i_v)]
parent[v_name] = pad + parent[v_name]
else:
# If variable already existed without an index, then assume a
# 1-based index
# FIXME: Need to respect undefined `None` starting indexes?
if v_name in parent:
v_idx.first = [self.default_start_index
for _ in v_idx.first]
parent.start_index[v_name.lower()] = v_idx.first
self._update_tokens()
# Derived type parent check
# NOTE: This assumes single-dimension derived type vectors
# (which I think is the only case supported in Fortran)
if self.token == '%':
assert v_idx_bounds[0][1] - v_idx_bounds[0][0] == 1
dt_idx = v_idx_bounds[0][0] - v_idx.first[0]
# NOTE: This is the sensible play to call `parse_variable`
# but not yet sure how to implement it, so we currently pass
# along `dt_idx` to the `%` handler.
else:
v_idx = None
# If indexed variable already exists, then re-index this new
# non-indexed variable using the global start index
if v_name in parent.start_index:
p_start = parent.start_index[v_name.lower()]
v_start = [self.default_start_index for _ in p_start]
# Resize vector based on new starting index
for i_p, i_v in zip(p_start, v_start):
if i_v < i_p:
pad = [None for _ in range(i_p - i_v)]
parent[v_name] = pad + parent[v_name]
parent.start_index[v_name.lower()] = v_start
if self.token == '%':
# Resolve the derived type
# Check for value in patch
v_patch_nml = None
if v_name in patch_nml:
v_patch_nml = patch_nml.pop(v_name.lower())
if parent:
vpar = parent.get(v_name.lower())
if vpar and isinstance(vpar, list):
# If new element is not a list, then assume it's the first
# element of the list.
if dt_idx is None:
dt_idx = self.default_start_index
try:
v_parent = vpar[dt_idx]
except IndexError:
v_parent = Namelist()
elif vpar:
v_parent = vpar
else:
v_parent = Namelist()
else:
v_parent = Namelist()
parent[v_name] = v_parent
self._update_tokens()
self._update_tokens()
v_att, v_att_vals = self._parse_variable(
v_parent,
patch_nml=v_patch_nml
)
next_value = Namelist()
next_value[v_att] = v_att_vals
self._append_value(v_values, next_value, v_idx)
else:
# Construct the variable array
assert self.token == '='
n_vals = None
self._update_tokens()
# Check if value is in the namelist patch
# TODO: Edit `Namelist` to support case-insensitive `pop` calls
# (Currently only a problem in PyPy2)
if v_name in patch_nml:
patch_values = patch_nml.pop(v_name.lower())
if not isinstance(patch_values, list):
patch_values = [patch_values]
p_idx = 0
# Add variables until next variable trigger
while (self.token not in ('=', '(', '%') or
(self.prior_token, self.token) in (('=', '('), (',', '('))):
# Check for repeated values
if self.token == '*':
n_vals = self._parse_value()
assert isinstance(n_vals, int)
self._update_tokens()
elif not n_vals:
n_vals = 1
# First check for implicit null values
if self.prior_token in ('=', '%', ','):
if (self.token in (',', '/', '&', '$') and
not (self.prior_token == ',' and
self.token in ('/', '&', '$'))):
self._append_value(v_values, None, v_idx, n_vals)
elif self.prior_token == '*':
if self.token not in ('/', '&', '$'):
self._update_tokens()
if (self.token == '=' or (self.token in ('/', '&', '$') and
self.prior_token == '*')):
next_value = None
else:
next_value = self._parse_value()
self._append_value(v_values, next_value, v_idx, n_vals)
else:
next_value = self._parse_value()
self._append_value(v_values, next_value, v_idx, n_vals)
# Reset default repeat factor for subsequent values
n_vals = 1
# Exit for end of nml group (/, &, $) or null broadcast (=)
if self.token in ('/', '&', '$', '='):
break
else:
# Get the remaining length of the unpatched vector?
# NOTE: it is probably very inefficient to keep re-creating
# iterators upon every element; this solution reflects the
# absence of mature lookahead in the script.
#
# This is a temporary fix to address errors caused by
# patches of different length from the original value, and
# represents a direction to fully rewrite the parser using
# `tee`.
self.tokens, lookahead = itertools.tee(self.tokens)
n_vals_remain = count_values(lookahead)
if patch_values:
# XXX: The (p_idx - 1) <= n_vals_remain test is dodgy
# and does not really make sense to me, but it appears
# to work.
# TODO: Patch indices that are not set in the namelist
if (p_idx < len(patch_values) and
(p_idx - 1) <= n_vals_remain and
len(patch_values) > 0 and self.token != ','):
p_val = patch_values[p_idx]
p_repr = patch_nml._f90repr(patch_values[p_idx])
p_idx += 1
self._update_tokens(override=p_repr)
if isinstance(p_val, complex):
# Skip over the complex content
# NOTE: Assumes input and patch are complex
self._update_tokens(write_token=False)
self._update_tokens(write_token=False)
self._update_tokens(write_token=False)
self._update_tokens(write_token=False)
else:
# Skip any values beyond the patch size
skip = (p_idx >= len(patch_values))
self._update_tokens(patch_skip=skip)
else:
self._update_tokens()
if patch_values:
v_values = patch_values
if not v_idx:
v_values = delist(v_values)
return v_name, v_values | def function[_parse_variable, parameter[self, parent, patch_nml]]:
constant[Parse a variable and return its name and values.]
if <ast.UnaryOp object at 0x7da1b04d7df0> begin[:]
variable[patch_nml] assign[=] call[name[Namelist], parameter[]]
variable[v_name] assign[=] name[self].prior_token
variable[v_values] assign[=] list[[]]
variable[patch_values] assign[=] constant[None]
variable[dt_idx] assign[=] constant[None]
if compare[name[self].token equal[==] constant[(]] begin[:]
variable[v_idx_bounds] assign[=] call[name[self]._parse_indices, parameter[]]
variable[v_idx] assign[=] call[name[FIndex], parameter[name[v_idx_bounds], name[self].global_start_index]]
if compare[call[name[v_name].lower, parameter[]] in name[parent].start_index] begin[:]
variable[p_idx] assign[=] call[name[parent].start_index][call[name[v_name].lower, parameter[]]]
for taget[tuple[[<ast.Name object at 0x7da1b04d4280>, <ast.Name object at 0x7da1b04d5270>]]] in starred[call[name[enumerate], parameter[call[name[zip], parameter[name[p_idx], name[v_idx].first]]]]] begin[:]
if call[name[all], parameter[<ast.GeneratorExp object at 0x7da1b04d60b0>]] begin[:]
variable[i_first] assign[=] constant[None]
call[name[v_idx].first][name[idx]] assign[=] name[i_first]
for taget[tuple[[<ast.Name object at 0x7da1b04d5000>, <ast.Name object at 0x7da1b04d57e0>]]] in starred[call[name[zip], parameter[name[p_idx], name[v_idx].first]]] begin[:]
if <ast.BoolOp object at 0x7da1b04d6410> begin[:]
variable[pad] assign[=] <ast.ListComp object at 0x7da1b04d74f0>
call[name[parent]][name[v_name]] assign[=] binary_operation[name[pad] + call[name[parent]][name[v_name]]]
call[name[parent].start_index][call[name[v_name].lower, parameter[]]] assign[=] name[v_idx].first
call[name[self]._update_tokens, parameter[]]
if compare[name[self].token equal[==] constant[%]] begin[:]
assert[compare[binary_operation[call[call[name[v_idx_bounds]][constant[0]]][constant[1]] - call[call[name[v_idx_bounds]][constant[0]]][constant[0]]] equal[==] constant[1]]]
variable[dt_idx] assign[=] binary_operation[call[call[name[v_idx_bounds]][constant[0]]][constant[0]] - call[name[v_idx].first][constant[0]]]
if compare[name[self].token equal[==] constant[%]] begin[:]
variable[v_patch_nml] assign[=] constant[None]
if compare[name[v_name] in name[patch_nml]] begin[:]
variable[v_patch_nml] assign[=] call[name[patch_nml].pop, parameter[call[name[v_name].lower, parameter[]]]]
if name[parent] begin[:]
variable[vpar] assign[=] call[name[parent].get, parameter[call[name[v_name].lower, parameter[]]]]
if <ast.BoolOp object at 0x7da1b03adab0> begin[:]
if compare[name[dt_idx] is constant[None]] begin[:]
variable[dt_idx] assign[=] name[self].default_start_index
<ast.Try object at 0x7da1b03ad600>
call[name[self]._update_tokens, parameter[]]
call[name[self]._update_tokens, parameter[]]
<ast.Tuple object at 0x7da1b03acc10> assign[=] call[name[self]._parse_variable, parameter[name[v_parent]]]
variable[next_value] assign[=] call[name[Namelist], parameter[]]
call[name[next_value]][name[v_att]] assign[=] name[v_att_vals]
call[name[self]._append_value, parameter[name[v_values], name[next_value], name[v_idx]]]
if name[patch_values] begin[:]
variable[v_values] assign[=] name[patch_values]
if <ast.UnaryOp object at 0x7da1b03908b0> begin[:]
variable[v_values] assign[=] call[name[delist], parameter[name[v_values]]]
return[tuple[[<ast.Name object at 0x7da1b03900d0>, <ast.Name object at 0x7da1b0390be0>]]] | keyword[def] identifier[_parse_variable] ( identifier[self] , identifier[parent] , identifier[patch_nml] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[patch_nml] :
identifier[patch_nml] = identifier[Namelist] ()
identifier[v_name] = identifier[self] . identifier[prior_token]
identifier[v_values] =[]
identifier[patch_values] = keyword[None]
identifier[dt_idx] = keyword[None]
keyword[if] identifier[self] . identifier[token] == literal[string] :
identifier[v_idx_bounds] = identifier[self] . identifier[_parse_indices] ()
identifier[v_idx] = identifier[FIndex] ( identifier[v_idx_bounds] , identifier[self] . identifier[global_start_index] )
keyword[if] identifier[v_name] . identifier[lower] () keyword[in] identifier[parent] . identifier[start_index] :
identifier[p_idx] = identifier[parent] . identifier[start_index] [ identifier[v_name] . identifier[lower] ()]
keyword[for] identifier[idx] , identifier[pv] keyword[in] identifier[enumerate] ( identifier[zip] ( identifier[p_idx] , identifier[v_idx] . identifier[first] )):
keyword[if] identifier[all] ( identifier[i] keyword[is] keyword[None] keyword[for] identifier[i] keyword[in] identifier[pv] ):
identifier[i_first] = keyword[None]
keyword[else] :
identifier[i_first] = identifier[min] ( identifier[i] keyword[for] identifier[i] keyword[in] identifier[pv] keyword[if] identifier[i] keyword[is] keyword[not] keyword[None] )
identifier[v_idx] . identifier[first] [ identifier[idx] ]= identifier[i_first]
keyword[for] identifier[i_p] , identifier[i_v] keyword[in] identifier[zip] ( identifier[p_idx] , identifier[v_idx] . identifier[first] ):
keyword[if] identifier[i_p] keyword[is] keyword[not] keyword[None] keyword[and] identifier[i_v] keyword[is] keyword[not] keyword[None] keyword[and] identifier[i_v] < identifier[i_p] :
identifier[pad] =[ keyword[None] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[i_p] - identifier[i_v] )]
identifier[parent] [ identifier[v_name] ]= identifier[pad] + identifier[parent] [ identifier[v_name] ]
keyword[else] :
keyword[if] identifier[v_name] keyword[in] identifier[parent] :
identifier[v_idx] . identifier[first] =[ identifier[self] . identifier[default_start_index]
keyword[for] identifier[_] keyword[in] identifier[v_idx] . identifier[first] ]
identifier[parent] . identifier[start_index] [ identifier[v_name] . identifier[lower] ()]= identifier[v_idx] . identifier[first]
identifier[self] . identifier[_update_tokens] ()
keyword[if] identifier[self] . identifier[token] == literal[string] :
keyword[assert] identifier[v_idx_bounds] [ literal[int] ][ literal[int] ]- identifier[v_idx_bounds] [ literal[int] ][ literal[int] ]== literal[int]
identifier[dt_idx] = identifier[v_idx_bounds] [ literal[int] ][ literal[int] ]- identifier[v_idx] . identifier[first] [ literal[int] ]
keyword[else] :
identifier[v_idx] = keyword[None]
keyword[if] identifier[v_name] keyword[in] identifier[parent] . identifier[start_index] :
identifier[p_start] = identifier[parent] . identifier[start_index] [ identifier[v_name] . identifier[lower] ()]
identifier[v_start] =[ identifier[self] . identifier[default_start_index] keyword[for] identifier[_] keyword[in] identifier[p_start] ]
keyword[for] identifier[i_p] , identifier[i_v] keyword[in] identifier[zip] ( identifier[p_start] , identifier[v_start] ):
keyword[if] identifier[i_v] < identifier[i_p] :
identifier[pad] =[ keyword[None] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[i_p] - identifier[i_v] )]
identifier[parent] [ identifier[v_name] ]= identifier[pad] + identifier[parent] [ identifier[v_name] ]
identifier[parent] . identifier[start_index] [ identifier[v_name] . identifier[lower] ()]= identifier[v_start]
keyword[if] identifier[self] . identifier[token] == literal[string] :
identifier[v_patch_nml] = keyword[None]
keyword[if] identifier[v_name] keyword[in] identifier[patch_nml] :
identifier[v_patch_nml] = identifier[patch_nml] . identifier[pop] ( identifier[v_name] . identifier[lower] ())
keyword[if] identifier[parent] :
identifier[vpar] = identifier[parent] . identifier[get] ( identifier[v_name] . identifier[lower] ())
keyword[if] identifier[vpar] keyword[and] identifier[isinstance] ( identifier[vpar] , identifier[list] ):
keyword[if] identifier[dt_idx] keyword[is] keyword[None] :
identifier[dt_idx] = identifier[self] . identifier[default_start_index]
keyword[try] :
identifier[v_parent] = identifier[vpar] [ identifier[dt_idx] ]
keyword[except] identifier[IndexError] :
identifier[v_parent] = identifier[Namelist] ()
keyword[elif] identifier[vpar] :
identifier[v_parent] = identifier[vpar]
keyword[else] :
identifier[v_parent] = identifier[Namelist] ()
keyword[else] :
identifier[v_parent] = identifier[Namelist] ()
identifier[parent] [ identifier[v_name] ]= identifier[v_parent]
identifier[self] . identifier[_update_tokens] ()
identifier[self] . identifier[_update_tokens] ()
identifier[v_att] , identifier[v_att_vals] = identifier[self] . identifier[_parse_variable] (
identifier[v_parent] ,
identifier[patch_nml] = identifier[v_patch_nml]
)
identifier[next_value] = identifier[Namelist] ()
identifier[next_value] [ identifier[v_att] ]= identifier[v_att_vals]
identifier[self] . identifier[_append_value] ( identifier[v_values] , identifier[next_value] , identifier[v_idx] )
keyword[else] :
keyword[assert] identifier[self] . identifier[token] == literal[string]
identifier[n_vals] = keyword[None]
identifier[self] . identifier[_update_tokens] ()
keyword[if] identifier[v_name] keyword[in] identifier[patch_nml] :
identifier[patch_values] = identifier[patch_nml] . identifier[pop] ( identifier[v_name] . identifier[lower] ())
keyword[if] keyword[not] identifier[isinstance] ( identifier[patch_values] , identifier[list] ):
identifier[patch_values] =[ identifier[patch_values] ]
identifier[p_idx] = literal[int]
keyword[while] ( identifier[self] . identifier[token] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] ) keyword[or]
( identifier[self] . identifier[prior_token] , identifier[self] . identifier[token] ) keyword[in] (( literal[string] , literal[string] ),( literal[string] , literal[string] ))):
keyword[if] identifier[self] . identifier[token] == literal[string] :
identifier[n_vals] = identifier[self] . identifier[_parse_value] ()
keyword[assert] identifier[isinstance] ( identifier[n_vals] , identifier[int] )
identifier[self] . identifier[_update_tokens] ()
keyword[elif] keyword[not] identifier[n_vals] :
identifier[n_vals] = literal[int]
keyword[if] identifier[self] . identifier[prior_token] keyword[in] ( literal[string] , literal[string] , literal[string] ):
keyword[if] ( identifier[self] . identifier[token] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ) keyword[and]
keyword[not] ( identifier[self] . identifier[prior_token] == literal[string] keyword[and]
identifier[self] . identifier[token] keyword[in] ( literal[string] , literal[string] , literal[string] ))):
identifier[self] . identifier[_append_value] ( identifier[v_values] , keyword[None] , identifier[v_idx] , identifier[n_vals] )
keyword[elif] identifier[self] . identifier[prior_token] == literal[string] :
keyword[if] identifier[self] . identifier[token] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] ):
identifier[self] . identifier[_update_tokens] ()
keyword[if] ( identifier[self] . identifier[token] == literal[string] keyword[or] ( identifier[self] . identifier[token] keyword[in] ( literal[string] , literal[string] , literal[string] ) keyword[and]
identifier[self] . identifier[prior_token] == literal[string] )):
identifier[next_value] = keyword[None]
keyword[else] :
identifier[next_value] = identifier[self] . identifier[_parse_value] ()
identifier[self] . identifier[_append_value] ( identifier[v_values] , identifier[next_value] , identifier[v_idx] , identifier[n_vals] )
keyword[else] :
identifier[next_value] = identifier[self] . identifier[_parse_value] ()
identifier[self] . identifier[_append_value] ( identifier[v_values] , identifier[next_value] , identifier[v_idx] , identifier[n_vals] )
identifier[n_vals] = literal[int]
keyword[if] identifier[self] . identifier[token] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ):
keyword[break]
keyword[else] :
identifier[self] . identifier[tokens] , identifier[lookahead] = identifier[itertools] . identifier[tee] ( identifier[self] . identifier[tokens] )
identifier[n_vals_remain] = identifier[count_values] ( identifier[lookahead] )
keyword[if] identifier[patch_values] :
keyword[if] ( identifier[p_idx] < identifier[len] ( identifier[patch_values] ) keyword[and]
( identifier[p_idx] - literal[int] )<= identifier[n_vals_remain] keyword[and]
identifier[len] ( identifier[patch_values] )> literal[int] keyword[and] identifier[self] . identifier[token] != literal[string] ):
identifier[p_val] = identifier[patch_values] [ identifier[p_idx] ]
identifier[p_repr] = identifier[patch_nml] . identifier[_f90repr] ( identifier[patch_values] [ identifier[p_idx] ])
identifier[p_idx] += literal[int]
identifier[self] . identifier[_update_tokens] ( identifier[override] = identifier[p_repr] )
keyword[if] identifier[isinstance] ( identifier[p_val] , identifier[complex] ):
identifier[self] . identifier[_update_tokens] ( identifier[write_token] = keyword[False] )
identifier[self] . identifier[_update_tokens] ( identifier[write_token] = keyword[False] )
identifier[self] . identifier[_update_tokens] ( identifier[write_token] = keyword[False] )
identifier[self] . identifier[_update_tokens] ( identifier[write_token] = keyword[False] )
keyword[else] :
identifier[skip] =( identifier[p_idx] >= identifier[len] ( identifier[patch_values] ))
identifier[self] . identifier[_update_tokens] ( identifier[patch_skip] = identifier[skip] )
keyword[else] :
identifier[self] . identifier[_update_tokens] ()
keyword[if] identifier[patch_values] :
identifier[v_values] = identifier[patch_values]
keyword[if] keyword[not] identifier[v_idx] :
identifier[v_values] = identifier[delist] ( identifier[v_values] )
keyword[return] identifier[v_name] , identifier[v_values] | def _parse_variable(self, parent, patch_nml=None):
"""Parse a variable and return its name and values."""
if not patch_nml:
patch_nml = Namelist() # depends on [control=['if'], data=[]]
v_name = self.prior_token
v_values = []
# Patch state
patch_values = None
# Derived type parent index (see notes below)
dt_idx = None
if self.token == '(':
v_idx_bounds = self._parse_indices()
v_idx = FIndex(v_idx_bounds, self.global_start_index)
# Update starting index against namelist record
if v_name.lower() in parent.start_index:
p_idx = parent.start_index[v_name.lower()]
for (idx, pv) in enumerate(zip(p_idx, v_idx.first)):
if all((i is None for i in pv)):
i_first = None # depends on [control=['if'], data=[]]
else:
i_first = min((i for i in pv if i is not None))
v_idx.first[idx] = i_first # depends on [control=['for'], data=[]]
# Resize vector based on starting index
for (i_p, i_v) in zip(p_idx, v_idx.first):
if i_p is not None and i_v is not None and (i_v < i_p):
pad = [None for _ in range(i_p - i_v)]
parent[v_name] = pad + parent[v_name] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
# If variable already existed without an index, then assume a
# 1-based index
# FIXME: Need to respect undefined `None` starting indexes?
elif v_name in parent:
v_idx.first = [self.default_start_index for _ in v_idx.first] # depends on [control=['if'], data=[]]
parent.start_index[v_name.lower()] = v_idx.first
self._update_tokens()
# Derived type parent check
# NOTE: This assumes single-dimension derived type vectors
# (which I think is the only case supported in Fortran)
if self.token == '%':
assert v_idx_bounds[0][1] - v_idx_bounds[0][0] == 1
dt_idx = v_idx_bounds[0][0] - v_idx.first[0] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# NOTE: This is the sensible play to call `parse_variable`
# but not yet sure how to implement it, so we currently pass
# along `dt_idx` to the `%` handler.
v_idx = None
# If indexed variable already exists, then re-index this new
# non-indexed variable using the global start index
if v_name in parent.start_index:
p_start = parent.start_index[v_name.lower()]
v_start = [self.default_start_index for _ in p_start]
# Resize vector based on new starting index
for (i_p, i_v) in zip(p_start, v_start):
if i_v < i_p:
pad = [None for _ in range(i_p - i_v)]
parent[v_name] = pad + parent[v_name] # depends on [control=['if'], data=['i_v', 'i_p']] # depends on [control=['for'], data=[]]
parent.start_index[v_name.lower()] = v_start # depends on [control=['if'], data=['v_name']]
if self.token == '%':
# Resolve the derived type
# Check for value in patch
v_patch_nml = None
if v_name in patch_nml:
v_patch_nml = patch_nml.pop(v_name.lower()) # depends on [control=['if'], data=['v_name', 'patch_nml']]
if parent:
vpar = parent.get(v_name.lower())
if vpar and isinstance(vpar, list):
# If new element is not a list, then assume it's the first
# element of the list.
if dt_idx is None:
dt_idx = self.default_start_index # depends on [control=['if'], data=['dt_idx']]
try:
v_parent = vpar[dt_idx] # depends on [control=['try'], data=[]]
except IndexError:
v_parent = Namelist() # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif vpar:
v_parent = vpar # depends on [control=['if'], data=[]]
else:
v_parent = Namelist() # depends on [control=['if'], data=[]]
else:
v_parent = Namelist()
parent[v_name] = v_parent
self._update_tokens()
self._update_tokens()
(v_att, v_att_vals) = self._parse_variable(v_parent, patch_nml=v_patch_nml)
next_value = Namelist()
next_value[v_att] = v_att_vals
self._append_value(v_values, next_value, v_idx) # depends on [control=['if'], data=[]]
else:
# Construct the variable array
assert self.token == '='
n_vals = None
self._update_tokens()
# Check if value is in the namelist patch
# TODO: Edit `Namelist` to support case-insensitive `pop` calls
# (Currently only a problem in PyPy2)
if v_name in patch_nml:
patch_values = patch_nml.pop(v_name.lower())
if not isinstance(patch_values, list):
patch_values = [patch_values] # depends on [control=['if'], data=[]]
p_idx = 0 # depends on [control=['if'], data=['v_name', 'patch_nml']]
# Add variables until next variable trigger
while self.token not in ('=', '(', '%') or (self.prior_token, self.token) in (('=', '('), (',', '(')):
# Check for repeated values
if self.token == '*':
n_vals = self._parse_value()
assert isinstance(n_vals, int)
self._update_tokens() # depends on [control=['if'], data=[]]
elif not n_vals:
n_vals = 1 # depends on [control=['if'], data=[]]
# First check for implicit null values
if self.prior_token in ('=', '%', ','):
if self.token in (',', '/', '&', '$') and (not (self.prior_token == ',' and self.token in ('/', '&', '$'))):
self._append_value(v_values, None, v_idx, n_vals) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif self.prior_token == '*':
if self.token not in ('/', '&', '$'):
self._update_tokens() # depends on [control=['if'], data=[]]
if self.token == '=' or (self.token in ('/', '&', '$') and self.prior_token == '*'):
next_value = None # depends on [control=['if'], data=[]]
else:
next_value = self._parse_value()
self._append_value(v_values, next_value, v_idx, n_vals) # depends on [control=['if'], data=[]]
else:
next_value = self._parse_value()
self._append_value(v_values, next_value, v_idx, n_vals)
# Reset default repeat factor for subsequent values
n_vals = 1
# Exit for end of nml group (/, &, $) or null broadcast (=)
if self.token in ('/', '&', '$', '='):
break # depends on [control=['if'], data=[]]
else:
# Get the remaining length of the unpatched vector?
# NOTE: it is probably very inefficient to keep re-creating
# iterators upon every element; this solution reflects the
# absence of mature lookahead in the script.
#
# This is a temporary fix to address errors caused by
# patches of different length from the original value, and
# represents a direction to fully rewrite the parser using
# `tee`.
(self.tokens, lookahead) = itertools.tee(self.tokens)
n_vals_remain = count_values(lookahead)
if patch_values:
# XXX: The (p_idx - 1) <= n_vals_remain test is dodgy
# and does not really make sense to me, but it appears
# to work.
# TODO: Patch indices that are not set in the namelist
if p_idx < len(patch_values) and p_idx - 1 <= n_vals_remain and (len(patch_values) > 0) and (self.token != ','):
p_val = patch_values[p_idx]
p_repr = patch_nml._f90repr(patch_values[p_idx])
p_idx += 1
self._update_tokens(override=p_repr)
if isinstance(p_val, complex):
# Skip over the complex content
# NOTE: Assumes input and patch are complex
self._update_tokens(write_token=False)
self._update_tokens(write_token=False)
self._update_tokens(write_token=False)
self._update_tokens(write_token=False) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# Skip any values beyond the patch size
skip = p_idx >= len(patch_values)
self._update_tokens(patch_skip=skip) # depends on [control=['if'], data=[]]
else:
self._update_tokens() # depends on [control=['while'], data=[]]
if patch_values:
v_values = patch_values # depends on [control=['if'], data=[]]
if not v_idx:
v_values = delist(v_values) # depends on [control=['if'], data=[]]
return (v_name, v_values) |
def reorder_matrix(m1, cost='line', verbose=False, H=1e4, Texp=10, T0=1e-3, Hbrk=10):
'''
This function rearranges the nodes in matrix M1 such that the matrix
elements are squeezed along the main diagonal. The function uses a
version of simulated annealing.
Parameters
----------
M1 : NxN np.ndarray
connection matrix weighted/binary directed/undirected
cost : str
'line' or 'circ' for shape of lattice (linear or ring lattice).
Default is linear lattice.
verbose : bool
print out cost at each iteration. Default False.
H : int
annealing parameter, default value 1e6
Texp : int
annealing parameter, default value 1. Coefficient of H s.t.
Texp0=1-Texp/H
T0 : float
annealing parameter, default value 1e-3
Hbrk : int
annealing parameter, default value = 10. Coefficient of H s.t.
Hbrk0 = H/Hkbr
Returns
-------
Mreordered : NxN np.ndarray
reordered connection matrix
Mindices : Nx1 np.ndarray
reordered indices
Mcost : float
objective function cost of reordered matrix
Notes
-----
Note that in general, the outcome will depend on the initial condition
(the setting of the random number seed). Also, there is no good way to
determine optimal annealing parameters in advance - these paramters
will need to be adjusted "by hand" (particularly H, Texp, and T0).
For large and/or dense matrices, it is highly recommended to perform
exploratory runs varying the settings of 'H' and 'Texp' and then select
the best values.
Based on extensive testing, it appears that T0 and Hbrk can remain
unchanged in most cases. Texp may be varied from 1-1/H to 1-10/H, for
example. H is the most important parameter - set to larger values as
the problem size increases. It is advisable to run this function
multiple times and select the solution(s) with the lowest 'cost'.
Setting 'Texp' to zero cancels annealing and uses a greedy algorithm
instead.
'''
from scipy import linalg, stats
n = len(m1)
if n < 2:
raise BCTParamError("align_matrix will infinite loop on a singleton "
"or null matrix.")
# generate cost function
if cost == 'line':
profile = stats.norm.pdf(range(1, n + 1), loc=0, scale=n / 2)[::-1]
elif cost == 'circ':
profile = stats.norm.pdf(
range(1, n + 1), loc=n / 2, scale=n / 4)[::-1]
else:
raise BCTParamError('cost must be line or circ')
costf = linalg.toeplitz(profile, r=profile) * np.logical_not(np.eye(n))
costf /= np.sum(costf)
# establish maxcost, lowcost, mincost
maxcost = np.sum(np.sort(costf.flat) * np.sort(m1.flat))
lowcost = np.sum(m1 * costf) / maxcost
mincost = lowcost
# initialize
anew = np.arange(n)
amin = np.arange(n)
h = 0
hcnt = 0
# adjust annealing parameters
# H determines the maximal number of steps (user specified)
# Texp determines the steepness of the temperature gradient
Texp = 1 - Texp / H
# T0 sets the initial temperature and scales the energy term (user provided)
# Hbrk sets a break point for the stimulation
Hbrk = H / Hbrk
while h < H:
h += 1
hcnt += 1
# terminate if no new mincost has been found for some time
if hcnt > Hbrk:
break
T = T0 * Texp**h
atmp = anew.copy()
r1, r2 = rng.randint(n, size=(2,))
while r1 == r2:
r2 = rng.randint(n)
atmp[r1] = anew[r2]
atmp[r2] = anew[r1]
costnew = np.sum((m1[np.ix_(atmp, atmp)]) * costf) / maxcost
# annealing
if costnew < lowcost or rng.random_sample() < np.exp(-(costnew - lowcost) / T):
anew = atmp
lowcost = costnew
# is this a new absolute best?
if lowcost < mincost:
amin = anew
mincost = lowcost
if verbose:
print('step %i ... current lowest cost = %f' % (h, mincost))
hcnt = 0
if verbose:
print('step %i ... final lowest cost = %f' % (h, mincost))
M_reordered = m1[np.ix_(amin, amin)]
M_indices = amin
cost = mincost
return M_reordered, M_indices, cost | def function[reorder_matrix, parameter[m1, cost, verbose, H, Texp, T0, Hbrk]]:
constant[
This function rearranges the nodes in matrix M1 such that the matrix
elements are squeezed along the main diagonal. The function uses a
version of simulated annealing.
Parameters
----------
M1 : NxN np.ndarray
connection matrix weighted/binary directed/undirected
cost : str
'line' or 'circ' for shape of lattice (linear or ring lattice).
Default is linear lattice.
verbose : bool
print out cost at each iteration. Default False.
H : int
annealing parameter, default value 1e6
Texp : int
annealing parameter, default value 1. Coefficient of H s.t.
Texp0=1-Texp/H
T0 : float
annealing parameter, default value 1e-3
Hbrk : int
annealing parameter, default value = 10. Coefficient of H s.t.
Hbrk0 = H/Hkbr
Returns
-------
Mreordered : NxN np.ndarray
reordered connection matrix
Mindices : Nx1 np.ndarray
reordered indices
Mcost : float
objective function cost of reordered matrix
Notes
-----
Note that in general, the outcome will depend on the initial condition
(the setting of the random number seed). Also, there is no good way to
determine optimal annealing parameters in advance - these paramters
will need to be adjusted "by hand" (particularly H, Texp, and T0).
For large and/or dense matrices, it is highly recommended to perform
exploratory runs varying the settings of 'H' and 'Texp' and then select
the best values.
Based on extensive testing, it appears that T0 and Hbrk can remain
unchanged in most cases. Texp may be varied from 1-1/H to 1-10/H, for
example. H is the most important parameter - set to larger values as
the problem size increases. It is advisable to run this function
multiple times and select the solution(s) with the lowest 'cost'.
Setting 'Texp' to zero cancels annealing and uses a greedy algorithm
instead.
]
from relative_module[scipy] import module[linalg], module[stats]
variable[n] assign[=] call[name[len], parameter[name[m1]]]
if compare[name[n] less[<] constant[2]] begin[:]
<ast.Raise object at 0x7da1b0833700>
if compare[name[cost] equal[==] constant[line]] begin[:]
variable[profile] assign[=] call[call[name[stats].norm.pdf, parameter[call[name[range], parameter[constant[1], binary_operation[name[n] + constant[1]]]]]]][<ast.Slice object at 0x7da1b08318d0>]
variable[costf] assign[=] binary_operation[call[name[linalg].toeplitz, parameter[name[profile]]] * call[name[np].logical_not, parameter[call[name[np].eye, parameter[name[n]]]]]]
<ast.AugAssign object at 0x7da20e9b3220>
variable[maxcost] assign[=] call[name[np].sum, parameter[binary_operation[call[name[np].sort, parameter[name[costf].flat]] * call[name[np].sort, parameter[name[m1].flat]]]]]
variable[lowcost] assign[=] binary_operation[call[name[np].sum, parameter[binary_operation[name[m1] * name[costf]]]] / name[maxcost]]
variable[mincost] assign[=] name[lowcost]
variable[anew] assign[=] call[name[np].arange, parameter[name[n]]]
variable[amin] assign[=] call[name[np].arange, parameter[name[n]]]
variable[h] assign[=] constant[0]
variable[hcnt] assign[=] constant[0]
variable[Texp] assign[=] binary_operation[constant[1] - binary_operation[name[Texp] / name[H]]]
variable[Hbrk] assign[=] binary_operation[name[H] / name[Hbrk]]
while compare[name[h] less[<] name[H]] begin[:]
<ast.AugAssign object at 0x7da20e9b12a0>
<ast.AugAssign object at 0x7da20e9b0970>
if compare[name[hcnt] greater[>] name[Hbrk]] begin[:]
break
variable[T] assign[=] binary_operation[name[T0] * binary_operation[name[Texp] ** name[h]]]
variable[atmp] assign[=] call[name[anew].copy, parameter[]]
<ast.Tuple object at 0x7da1b08335b0> assign[=] call[name[rng].randint, parameter[name[n]]]
while compare[name[r1] equal[==] name[r2]] begin[:]
variable[r2] assign[=] call[name[rng].randint, parameter[name[n]]]
call[name[atmp]][name[r1]] assign[=] call[name[anew]][name[r2]]
call[name[atmp]][name[r2]] assign[=] call[name[anew]][name[r1]]
variable[costnew] assign[=] binary_operation[call[name[np].sum, parameter[binary_operation[call[name[m1]][call[name[np].ix_, parameter[name[atmp], name[atmp]]]] * name[costf]]]] / name[maxcost]]
if <ast.BoolOp object at 0x7da1b0833190> begin[:]
variable[anew] assign[=] name[atmp]
variable[lowcost] assign[=] name[costnew]
if compare[name[lowcost] less[<] name[mincost]] begin[:]
variable[amin] assign[=] name[anew]
variable[mincost] assign[=] name[lowcost]
if name[verbose] begin[:]
call[name[print], parameter[binary_operation[constant[step %i ... current lowest cost = %f] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0832710>, <ast.Name object at 0x7da1b08304f0>]]]]]
variable[hcnt] assign[=] constant[0]
if name[verbose] begin[:]
call[name[print], parameter[binary_operation[constant[step %i ... final lowest cost = %f] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0833970>, <ast.Name object at 0x7da1b0833610>]]]]]
variable[M_reordered] assign[=] call[name[m1]][call[name[np].ix_, parameter[name[amin], name[amin]]]]
variable[M_indices] assign[=] name[amin]
variable[cost] assign[=] name[mincost]
return[tuple[[<ast.Name object at 0x7da1b0831090>, <ast.Name object at 0x7da1b0832e60>, <ast.Name object at 0x7da1b0830ee0>]]] | keyword[def] identifier[reorder_matrix] ( identifier[m1] , identifier[cost] = literal[string] , identifier[verbose] = keyword[False] , identifier[H] = literal[int] , identifier[Texp] = literal[int] , identifier[T0] = literal[int] , identifier[Hbrk] = literal[int] ):
literal[string]
keyword[from] identifier[scipy] keyword[import] identifier[linalg] , identifier[stats]
identifier[n] = identifier[len] ( identifier[m1] )
keyword[if] identifier[n] < literal[int] :
keyword[raise] identifier[BCTParamError] ( literal[string]
literal[string] )
keyword[if] identifier[cost] == literal[string] :
identifier[profile] = identifier[stats] . identifier[norm] . identifier[pdf] ( identifier[range] ( literal[int] , identifier[n] + literal[int] ), identifier[loc] = literal[int] , identifier[scale] = identifier[n] / literal[int] )[::- literal[int] ]
keyword[elif] identifier[cost] == literal[string] :
identifier[profile] = identifier[stats] . identifier[norm] . identifier[pdf] (
identifier[range] ( literal[int] , identifier[n] + literal[int] ), identifier[loc] = identifier[n] / literal[int] , identifier[scale] = identifier[n] / literal[int] )[::- literal[int] ]
keyword[else] :
keyword[raise] identifier[BCTParamError] ( literal[string] )
identifier[costf] = identifier[linalg] . identifier[toeplitz] ( identifier[profile] , identifier[r] = identifier[profile] )* identifier[np] . identifier[logical_not] ( identifier[np] . identifier[eye] ( identifier[n] ))
identifier[costf] /= identifier[np] . identifier[sum] ( identifier[costf] )
identifier[maxcost] = identifier[np] . identifier[sum] ( identifier[np] . identifier[sort] ( identifier[costf] . identifier[flat] )* identifier[np] . identifier[sort] ( identifier[m1] . identifier[flat] ))
identifier[lowcost] = identifier[np] . identifier[sum] ( identifier[m1] * identifier[costf] )/ identifier[maxcost]
identifier[mincost] = identifier[lowcost]
identifier[anew] = identifier[np] . identifier[arange] ( identifier[n] )
identifier[amin] = identifier[np] . identifier[arange] ( identifier[n] )
identifier[h] = literal[int]
identifier[hcnt] = literal[int]
identifier[Texp] = literal[int] - identifier[Texp] / identifier[H]
identifier[Hbrk] = identifier[H] / identifier[Hbrk]
keyword[while] identifier[h] < identifier[H] :
identifier[h] += literal[int]
identifier[hcnt] += literal[int]
keyword[if] identifier[hcnt] > identifier[Hbrk] :
keyword[break]
identifier[T] = identifier[T0] * identifier[Texp] ** identifier[h]
identifier[atmp] = identifier[anew] . identifier[copy] ()
identifier[r1] , identifier[r2] = identifier[rng] . identifier[randint] ( identifier[n] , identifier[size] =( literal[int] ,))
keyword[while] identifier[r1] == identifier[r2] :
identifier[r2] = identifier[rng] . identifier[randint] ( identifier[n] )
identifier[atmp] [ identifier[r1] ]= identifier[anew] [ identifier[r2] ]
identifier[atmp] [ identifier[r2] ]= identifier[anew] [ identifier[r1] ]
identifier[costnew] = identifier[np] . identifier[sum] (( identifier[m1] [ identifier[np] . identifier[ix_] ( identifier[atmp] , identifier[atmp] )])* identifier[costf] )/ identifier[maxcost]
keyword[if] identifier[costnew] < identifier[lowcost] keyword[or] identifier[rng] . identifier[random_sample] ()< identifier[np] . identifier[exp] (-( identifier[costnew] - identifier[lowcost] )/ identifier[T] ):
identifier[anew] = identifier[atmp]
identifier[lowcost] = identifier[costnew]
keyword[if] identifier[lowcost] < identifier[mincost] :
identifier[amin] = identifier[anew]
identifier[mincost] = identifier[lowcost]
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] %( identifier[h] , identifier[mincost] ))
identifier[hcnt] = literal[int]
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] %( identifier[h] , identifier[mincost] ))
identifier[M_reordered] = identifier[m1] [ identifier[np] . identifier[ix_] ( identifier[amin] , identifier[amin] )]
identifier[M_indices] = identifier[amin]
identifier[cost] = identifier[mincost]
keyword[return] identifier[M_reordered] , identifier[M_indices] , identifier[cost] | def reorder_matrix(m1, cost='line', verbose=False, H=10000.0, Texp=10, T0=0.001, Hbrk=10):
"""
This function rearranges the nodes in matrix M1 such that the matrix
elements are squeezed along the main diagonal. The function uses a
version of simulated annealing.
Parameters
----------
M1 : NxN np.ndarray
connection matrix weighted/binary directed/undirected
cost : str
'line' or 'circ' for shape of lattice (linear or ring lattice).
Default is linear lattice.
verbose : bool
print out cost at each iteration. Default False.
H : int
annealing parameter, default value 1e6
Texp : int
annealing parameter, default value 1. Coefficient of H s.t.
Texp0=1-Texp/H
T0 : float
annealing parameter, default value 1e-3
Hbrk : int
annealing parameter, default value = 10. Coefficient of H s.t.
Hbrk0 = H/Hkbr
Returns
-------
Mreordered : NxN np.ndarray
reordered connection matrix
Mindices : Nx1 np.ndarray
reordered indices
Mcost : float
objective function cost of reordered matrix
Notes
-----
Note that in general, the outcome will depend on the initial condition
(the setting of the random number seed). Also, there is no good way to
determine optimal annealing parameters in advance - these paramters
will need to be adjusted "by hand" (particularly H, Texp, and T0).
For large and/or dense matrices, it is highly recommended to perform
exploratory runs varying the settings of 'H' and 'Texp' and then select
the best values.
Based on extensive testing, it appears that T0 and Hbrk can remain
unchanged in most cases. Texp may be varied from 1-1/H to 1-10/H, for
example. H is the most important parameter - set to larger values as
the problem size increases. It is advisable to run this function
multiple times and select the solution(s) with the lowest 'cost'.
Setting 'Texp' to zero cancels annealing and uses a greedy algorithm
instead.
"""
from scipy import linalg, stats
n = len(m1)
if n < 2:
raise BCTParamError('align_matrix will infinite loop on a singleton or null matrix.') # depends on [control=['if'], data=[]]
# generate cost function
if cost == 'line':
profile = stats.norm.pdf(range(1, n + 1), loc=0, scale=n / 2)[::-1] # depends on [control=['if'], data=[]]
elif cost == 'circ':
profile = stats.norm.pdf(range(1, n + 1), loc=n / 2, scale=n / 4)[::-1] # depends on [control=['if'], data=[]]
else:
raise BCTParamError('cost must be line or circ')
costf = linalg.toeplitz(profile, r=profile) * np.logical_not(np.eye(n))
costf /= np.sum(costf)
# establish maxcost, lowcost, mincost
maxcost = np.sum(np.sort(costf.flat) * np.sort(m1.flat))
lowcost = np.sum(m1 * costf) / maxcost
mincost = lowcost
# initialize
anew = np.arange(n)
amin = np.arange(n)
h = 0
hcnt = 0
# adjust annealing parameters
# H determines the maximal number of steps (user specified)
# Texp determines the steepness of the temperature gradient
Texp = 1 - Texp / H
# T0 sets the initial temperature and scales the energy term (user provided)
# Hbrk sets a break point for the stimulation
Hbrk = H / Hbrk
while h < H:
h += 1
hcnt += 1
# terminate if no new mincost has been found for some time
if hcnt > Hbrk:
break # depends on [control=['if'], data=[]]
T = T0 * Texp ** h
atmp = anew.copy()
(r1, r2) = rng.randint(n, size=(2,))
while r1 == r2:
r2 = rng.randint(n) # depends on [control=['while'], data=['r2']]
atmp[r1] = anew[r2]
atmp[r2] = anew[r1]
costnew = np.sum(m1[np.ix_(atmp, atmp)] * costf) / maxcost
# annealing
if costnew < lowcost or rng.random_sample() < np.exp(-(costnew - lowcost) / T):
anew = atmp
lowcost = costnew
# is this a new absolute best?
if lowcost < mincost:
amin = anew
mincost = lowcost
if verbose:
print('step %i ... current lowest cost = %f' % (h, mincost)) # depends on [control=['if'], data=[]]
hcnt = 0 # depends on [control=['if'], data=['lowcost', 'mincost']] # depends on [control=['if'], data=[]] # depends on [control=['while'], data=['h']]
if verbose:
print('step %i ... final lowest cost = %f' % (h, mincost)) # depends on [control=['if'], data=[]]
M_reordered = m1[np.ix_(amin, amin)]
M_indices = amin
cost = mincost
return (M_reordered, M_indices, cost) |
def _dequeue_update(self,change):
""" Only update when all changes are done """
self._update_count -=1
if self._update_count !=0:
return
self.update_shape(change) | def function[_dequeue_update, parameter[self, change]]:
constant[ Only update when all changes are done ]
<ast.AugAssign object at 0x7da2041dbbb0>
if compare[name[self]._update_count not_equal[!=] constant[0]] begin[:]
return[None]
call[name[self].update_shape, parameter[name[change]]] | keyword[def] identifier[_dequeue_update] ( identifier[self] , identifier[change] ):
literal[string]
identifier[self] . identifier[_update_count] -= literal[int]
keyword[if] identifier[self] . identifier[_update_count] != literal[int] :
keyword[return]
identifier[self] . identifier[update_shape] ( identifier[change] ) | def _dequeue_update(self, change):
""" Only update when all changes are done """
self._update_count -= 1
if self._update_count != 0:
return # depends on [control=['if'], data=[]]
self.update_shape(change) |
def sub(self, key):
"""Returns new Vyper instance representing a sub tree of this instance.
"""
subv = Vyper()
data = self.get(key)
if isinstance(data, dict):
subv._config = data
return subv
else:
return None | def function[sub, parameter[self, key]]:
constant[Returns new Vyper instance representing a sub tree of this instance.
]
variable[subv] assign[=] call[name[Vyper], parameter[]]
variable[data] assign[=] call[name[self].get, parameter[name[key]]]
if call[name[isinstance], parameter[name[data], name[dict]]] begin[:]
name[subv]._config assign[=] name[data]
return[name[subv]] | keyword[def] identifier[sub] ( identifier[self] , identifier[key] ):
literal[string]
identifier[subv] = identifier[Vyper] ()
identifier[data] = identifier[self] . identifier[get] ( identifier[key] )
keyword[if] identifier[isinstance] ( identifier[data] , identifier[dict] ):
identifier[subv] . identifier[_config] = identifier[data]
keyword[return] identifier[subv]
keyword[else] :
keyword[return] keyword[None] | def sub(self, key):
"""Returns new Vyper instance representing a sub tree of this instance.
"""
subv = Vyper()
data = self.get(key)
if isinstance(data, dict):
subv._config = data
return subv # depends on [control=['if'], data=[]]
else:
return None |
def nupicBindingsPrereleaseInstalled():
"""
Make an attempt to determine if a pre-release version of nupic.bindings is
installed already.
@return: boolean
"""
try:
nupicDistribution = pkg_resources.get_distribution("nupic.bindings")
if pkg_resources.parse_version(nupicDistribution.version).is_prerelease:
# A pre-release dev version of nupic.bindings is installed.
return True
except pkg_resources.DistributionNotFound:
pass # Silently ignore. The absence of nupic.bindings will be handled by
# setuptools by default
return False | def function[nupicBindingsPrereleaseInstalled, parameter[]]:
constant[
Make an attempt to determine if a pre-release version of nupic.bindings is
installed already.
@return: boolean
]
<ast.Try object at 0x7da18dc98b20>
return[constant[False]] | keyword[def] identifier[nupicBindingsPrereleaseInstalled] ():
literal[string]
keyword[try] :
identifier[nupicDistribution] = identifier[pkg_resources] . identifier[get_distribution] ( literal[string] )
keyword[if] identifier[pkg_resources] . identifier[parse_version] ( identifier[nupicDistribution] . identifier[version] ). identifier[is_prerelease] :
keyword[return] keyword[True]
keyword[except] identifier[pkg_resources] . identifier[DistributionNotFound] :
keyword[pass]
keyword[return] keyword[False] | def nupicBindingsPrereleaseInstalled():
"""
Make an attempt to determine if a pre-release version of nupic.bindings is
installed already.
@return: boolean
"""
try:
nupicDistribution = pkg_resources.get_distribution('nupic.bindings')
if pkg_resources.parse_version(nupicDistribution.version).is_prerelease:
# A pre-release dev version of nupic.bindings is installed.
return True # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except pkg_resources.DistributionNotFound:
pass # Silently ignore. The absence of nupic.bindings will be handled by # depends on [control=['except'], data=[]]
# setuptools by default
return False |
def get_parent_id(chebi_id):
'''Returns parent id'''
if len(__PARENT_IDS) == 0:
__parse_compounds()
return __PARENT_IDS[chebi_id] if chebi_id in __PARENT_IDS else float('NaN') | def function[get_parent_id, parameter[chebi_id]]:
constant[Returns parent id]
if compare[call[name[len], parameter[name[__PARENT_IDS]]] equal[==] constant[0]] begin[:]
call[name[__parse_compounds], parameter[]]
return[<ast.IfExp object at 0x7da1b0283f40>] | keyword[def] identifier[get_parent_id] ( identifier[chebi_id] ):
literal[string]
keyword[if] identifier[len] ( identifier[__PARENT_IDS] )== literal[int] :
identifier[__parse_compounds] ()
keyword[return] identifier[__PARENT_IDS] [ identifier[chebi_id] ] keyword[if] identifier[chebi_id] keyword[in] identifier[__PARENT_IDS] keyword[else] identifier[float] ( literal[string] ) | def get_parent_id(chebi_id):
"""Returns parent id"""
if len(__PARENT_IDS) == 0:
__parse_compounds() # depends on [control=['if'], data=[]]
return __PARENT_IDS[chebi_id] if chebi_id in __PARENT_IDS else float('NaN') |
def dropKey(self, key):
'''Drop an attribute/element/key-value pair from all the dictionaries.
If the dictionary key does not exist in a particular dictionary, then
that dictionary is left unchanged.
Side effect: if the key is a number and it matches a list (interpreted
as a dictionary), it will cause the "keys" to shift just as a list
would be expected to.
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "wigs": 68 },
... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]},
... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]},
... {"name": "Jim", "age": 29, "zim": {"zam": "99"} },
... {"name": "Bill", "age": 19, "income": 29000 },
... ]
>>> print PLOD(test).dropKey("income").returnString()
[
{age: 18, name: 'Jim' , wigs: 68, zim: None },
{age: 18, name: 'Larry', wigs: [3, 2, 9], zim: None },
{age: 20, name: 'Joe' , wigs: [1, 2, 3], zim: None },
{age: 29, name: 'Jim' , wigs: None , zim: {'zam': '99'}},
{age: 19, name: 'Bill' , wigs: None , zim: None }
]
.. versionadded:: 0.1.2
:param key:
The dictionary key (or cascading list of keys point to final key)
that should be removed.
:returns: self
'''
result = []
for row in self.table:
result.append(internal.remove_member(row, key))
self.table = result
return self | def function[dropKey, parameter[self, key]]:
constant[Drop an attribute/element/key-value pair from all the dictionaries.
If the dictionary key does not exist in a particular dictionary, then
that dictionary is left unchanged.
Side effect: if the key is a number and it matches a list (interpreted
as a dictionary), it will cause the "keys" to shift just as a list
would be expected to.
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "wigs": 68 },
... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]},
... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]},
... {"name": "Jim", "age": 29, "zim": {"zam": "99"} },
... {"name": "Bill", "age": 19, "income": 29000 },
... ]
>>> print PLOD(test).dropKey("income").returnString()
[
{age: 18, name: 'Jim' , wigs: 68, zim: None },
{age: 18, name: 'Larry', wigs: [3, 2, 9], zim: None },
{age: 20, name: 'Joe' , wigs: [1, 2, 3], zim: None },
{age: 29, name: 'Jim' , wigs: None , zim: {'zam': '99'}},
{age: 19, name: 'Bill' , wigs: None , zim: None }
]
.. versionadded:: 0.1.2
:param key:
The dictionary key (or cascading list of keys point to final key)
that should be removed.
:returns: self
]
variable[result] assign[=] list[[]]
for taget[name[row]] in starred[name[self].table] begin[:]
call[name[result].append, parameter[call[name[internal].remove_member, parameter[name[row], name[key]]]]]
name[self].table assign[=] name[result]
return[name[self]] | keyword[def] identifier[dropKey] ( identifier[self] , identifier[key] ):
literal[string]
identifier[result] =[]
keyword[for] identifier[row] keyword[in] identifier[self] . identifier[table] :
identifier[result] . identifier[append] ( identifier[internal] . identifier[remove_member] ( identifier[row] , identifier[key] ))
identifier[self] . identifier[table] = identifier[result]
keyword[return] identifier[self] | def dropKey(self, key):
"""Drop an attribute/element/key-value pair from all the dictionaries.
If the dictionary key does not exist in a particular dictionary, then
that dictionary is left unchanged.
Side effect: if the key is a number and it matches a list (interpreted
as a dictionary), it will cause the "keys" to shift just as a list
would be expected to.
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "wigs": 68 },
... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]},
... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]},
... {"name": "Jim", "age": 29, "zim": {"zam": "99"} },
... {"name": "Bill", "age": 19, "income": 29000 },
... ]
>>> print PLOD(test).dropKey("income").returnString()
[
{age: 18, name: 'Jim' , wigs: 68, zim: None },
{age: 18, name: 'Larry', wigs: [3, 2, 9], zim: None },
{age: 20, name: 'Joe' , wigs: [1, 2, 3], zim: None },
{age: 29, name: 'Jim' , wigs: None , zim: {'zam': '99'}},
{age: 19, name: 'Bill' , wigs: None , zim: None }
]
.. versionadded:: 0.1.2
:param key:
The dictionary key (or cascading list of keys point to final key)
that should be removed.
:returns: self
"""
result = []
for row in self.table:
result.append(internal.remove_member(row, key)) # depends on [control=['for'], data=['row']]
self.table = result
return self |
def static(self, uri, file_or_directory, pattern=r'/?.+',
use_modified_since=True, use_content_range=False):
'''Register a root to serve files from. The input can either be a
file or a directory. See
'''
static_register(self, uri, file_or_directory, pattern,
use_modified_since, use_content_range) | def function[static, parameter[self, uri, file_or_directory, pattern, use_modified_since, use_content_range]]:
constant[Register a root to serve files from. The input can either be a
file or a directory. See
]
call[name[static_register], parameter[name[self], name[uri], name[file_or_directory], name[pattern], name[use_modified_since], name[use_content_range]]] | keyword[def] identifier[static] ( identifier[self] , identifier[uri] , identifier[file_or_directory] , identifier[pattern] = literal[string] ,
identifier[use_modified_since] = keyword[True] , identifier[use_content_range] = keyword[False] ):
literal[string]
identifier[static_register] ( identifier[self] , identifier[uri] , identifier[file_or_directory] , identifier[pattern] ,
identifier[use_modified_since] , identifier[use_content_range] ) | def static(self, uri, file_or_directory, pattern='/?.+', use_modified_since=True, use_content_range=False):
"""Register a root to serve files from. The input can either be a
file or a directory. See
"""
static_register(self, uri, file_or_directory, pattern, use_modified_since, use_content_range) |
def check_actors(self, actors):
"""
Performs checks on the actors that are to be used. Raises an exception if invalid setup.
:param actors: the actors to check
:type actors: list
"""
super(Tee, self).check_actors(actors)
actor = self.first_active
if actor is None:
if self._requires_active_actors:
raise Exception("No active actor!")
elif not isinstance(actor, InputConsumer):
raise Exception("First active actor does not accept input: " + actor.full_name) | def function[check_actors, parameter[self, actors]]:
constant[
Performs checks on the actors that are to be used. Raises an exception if invalid setup.
:param actors: the actors to check
:type actors: list
]
call[call[name[super], parameter[name[Tee], name[self]]].check_actors, parameter[name[actors]]]
variable[actor] assign[=] name[self].first_active
if compare[name[actor] is constant[None]] begin[:]
if name[self]._requires_active_actors begin[:]
<ast.Raise object at 0x7da1b069eb30> | keyword[def] identifier[check_actors] ( identifier[self] , identifier[actors] ):
literal[string]
identifier[super] ( identifier[Tee] , identifier[self] ). identifier[check_actors] ( identifier[actors] )
identifier[actor] = identifier[self] . identifier[first_active]
keyword[if] identifier[actor] keyword[is] keyword[None] :
keyword[if] identifier[self] . identifier[_requires_active_actors] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[elif] keyword[not] identifier[isinstance] ( identifier[actor] , identifier[InputConsumer] ):
keyword[raise] identifier[Exception] ( literal[string] + identifier[actor] . identifier[full_name] ) | def check_actors(self, actors):
"""
Performs checks on the actors that are to be used. Raises an exception if invalid setup.
:param actors: the actors to check
:type actors: list
"""
super(Tee, self).check_actors(actors)
actor = self.first_active
if actor is None:
if self._requires_active_actors:
raise Exception('No active actor!') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif not isinstance(actor, InputConsumer):
raise Exception('First active actor does not accept input: ' + actor.full_name) # depends on [control=['if'], data=[]] |
def number(self) -> int:
"""Episode number.
Unique for an anime and episode type, but not unique across episode
types for the same anime.
"""
match = self._NUMBER_SUFFIX.search(self.epno)
return int(match.group(1)) | def function[number, parameter[self]]:
constant[Episode number.
Unique for an anime and episode type, but not unique across episode
types for the same anime.
]
variable[match] assign[=] call[name[self]._NUMBER_SUFFIX.search, parameter[name[self].epno]]
return[call[name[int], parameter[call[name[match].group, parameter[constant[1]]]]]] | keyword[def] identifier[number] ( identifier[self] )-> identifier[int] :
literal[string]
identifier[match] = identifier[self] . identifier[_NUMBER_SUFFIX] . identifier[search] ( identifier[self] . identifier[epno] )
keyword[return] identifier[int] ( identifier[match] . identifier[group] ( literal[int] )) | def number(self) -> int:
"""Episode number.
Unique for an anime and episode type, but not unique across episode
types for the same anime.
"""
match = self._NUMBER_SUFFIX.search(self.epno)
return int(match.group(1)) |
def extract_tmaster(self, topology):
"""
Returns the representation of tmaster that will
be returned from Tracker.
"""
tmasterLocation = {
"name": None,
"id": None,
"host": None,
"controller_port": None,
"master_port": None,
"stats_port": None,
}
if topology.tmaster:
tmasterLocation["name"] = topology.tmaster.topology_name
tmasterLocation["id"] = topology.tmaster.topology_id
tmasterLocation["host"] = topology.tmaster.host
tmasterLocation["controller_port"] = topology.tmaster.controller_port
tmasterLocation["master_port"] = topology.tmaster.master_port
tmasterLocation["stats_port"] = topology.tmaster.stats_port
return tmasterLocation | def function[extract_tmaster, parameter[self, topology]]:
constant[
Returns the representation of tmaster that will
be returned from Tracker.
]
variable[tmasterLocation] assign[=] dictionary[[<ast.Constant object at 0x7da20c76f1f0>, <ast.Constant object at 0x7da20c76d7e0>, <ast.Constant object at 0x7da20c76e9e0>, <ast.Constant object at 0x7da20c76c3a0>, <ast.Constant object at 0x7da20c76d6c0>, <ast.Constant object at 0x7da20c76c0a0>], [<ast.Constant object at 0x7da20c76ca00>, <ast.Constant object at 0x7da20c76e8c0>, <ast.Constant object at 0x7da20c76dc30>, <ast.Constant object at 0x7da20c76fee0>, <ast.Constant object at 0x7da20c76c1f0>, <ast.Constant object at 0x7da20c76f010>]]
if name[topology].tmaster begin[:]
call[name[tmasterLocation]][constant[name]] assign[=] name[topology].tmaster.topology_name
call[name[tmasterLocation]][constant[id]] assign[=] name[topology].tmaster.topology_id
call[name[tmasterLocation]][constant[host]] assign[=] name[topology].tmaster.host
call[name[tmasterLocation]][constant[controller_port]] assign[=] name[topology].tmaster.controller_port
call[name[tmasterLocation]][constant[master_port]] assign[=] name[topology].tmaster.master_port
call[name[tmasterLocation]][constant[stats_port]] assign[=] name[topology].tmaster.stats_port
return[name[tmasterLocation]] | keyword[def] identifier[extract_tmaster] ( identifier[self] , identifier[topology] ):
literal[string]
identifier[tmasterLocation] ={
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
}
keyword[if] identifier[topology] . identifier[tmaster] :
identifier[tmasterLocation] [ literal[string] ]= identifier[topology] . identifier[tmaster] . identifier[topology_name]
identifier[tmasterLocation] [ literal[string] ]= identifier[topology] . identifier[tmaster] . identifier[topology_id]
identifier[tmasterLocation] [ literal[string] ]= identifier[topology] . identifier[tmaster] . identifier[host]
identifier[tmasterLocation] [ literal[string] ]= identifier[topology] . identifier[tmaster] . identifier[controller_port]
identifier[tmasterLocation] [ literal[string] ]= identifier[topology] . identifier[tmaster] . identifier[master_port]
identifier[tmasterLocation] [ literal[string] ]= identifier[topology] . identifier[tmaster] . identifier[stats_port]
keyword[return] identifier[tmasterLocation] | def extract_tmaster(self, topology):
"""
Returns the representation of tmaster that will
be returned from Tracker.
"""
tmasterLocation = {'name': None, 'id': None, 'host': None, 'controller_port': None, 'master_port': None, 'stats_port': None}
if topology.tmaster:
tmasterLocation['name'] = topology.tmaster.topology_name
tmasterLocation['id'] = topology.tmaster.topology_id
tmasterLocation['host'] = topology.tmaster.host
tmasterLocation['controller_port'] = topology.tmaster.controller_port
tmasterLocation['master_port'] = topology.tmaster.master_port
tmasterLocation['stats_port'] = topology.tmaster.stats_port # depends on [control=['if'], data=[]]
return tmasterLocation |
def set_default_moe_hparams(hparams):
"""Add necessary hyperparameters for mixture-of-experts."""
hparams.moe_num_experts = 16
hparams.moe_loss_coef = 1e-2
hparams.add_hparam("moe_gating", "top_2")
# Experts have fixed capacity per batch. We need some extra capacity
# in case gating is not perfectly balanced.
# moe_capacity_factor_* should be set to a value >=1.
hparams.add_hparam("moe_capacity_factor_train", 1.25)
hparams.add_hparam("moe_capacity_factor_eval", 2.0)
hparams.add_hparam("moe_capacity_factor_second_level", 1.0)
# Each expert has a hidden layer with this size.
hparams.add_hparam("moe_hidden_size", 4096)
# For gating, divide inputs into groups of this size before gating.
# Each group sends the same number of inputs to each expert.
# Ideally, the group size would be the whole batch, but this is expensive
# due to our use of matrix multiplication for reordering.
hparams.add_hparam("moe_group_size", 1024)
# For top_2 gating, whether to impose an additional loss in order to make
# the experts equally used as the second-place expert.
hparams.add_hparam("moe_use_second_place_loss", 0)
# In top_2 gating, policy for whether to use a second-place expert.
# Legal values are:
# "all": always
# "none": never
# "threshold": if gate value > the given threshold
# "random": if gate value > threshold*random_uniform(0,1)
hparams.add_hparam("moe_second_policy_train", "random")
hparams.add_hparam("moe_second_policy_eval", "random")
hparams.add_hparam("moe_second_threshold_train", 0.2)
hparams.add_hparam("moe_second_threshold_eval", 0.2) | def function[set_default_moe_hparams, parameter[hparams]]:
constant[Add necessary hyperparameters for mixture-of-experts.]
name[hparams].moe_num_experts assign[=] constant[16]
name[hparams].moe_loss_coef assign[=] constant[0.01]
call[name[hparams].add_hparam, parameter[constant[moe_gating], constant[top_2]]]
call[name[hparams].add_hparam, parameter[constant[moe_capacity_factor_train], constant[1.25]]]
call[name[hparams].add_hparam, parameter[constant[moe_capacity_factor_eval], constant[2.0]]]
call[name[hparams].add_hparam, parameter[constant[moe_capacity_factor_second_level], constant[1.0]]]
call[name[hparams].add_hparam, parameter[constant[moe_hidden_size], constant[4096]]]
call[name[hparams].add_hparam, parameter[constant[moe_group_size], constant[1024]]]
call[name[hparams].add_hparam, parameter[constant[moe_use_second_place_loss], constant[0]]]
call[name[hparams].add_hparam, parameter[constant[moe_second_policy_train], constant[random]]]
call[name[hparams].add_hparam, parameter[constant[moe_second_policy_eval], constant[random]]]
call[name[hparams].add_hparam, parameter[constant[moe_second_threshold_train], constant[0.2]]]
call[name[hparams].add_hparam, parameter[constant[moe_second_threshold_eval], constant[0.2]]] | keyword[def] identifier[set_default_moe_hparams] ( identifier[hparams] ):
literal[string]
identifier[hparams] . identifier[moe_num_experts] = literal[int]
identifier[hparams] . identifier[moe_loss_coef] = literal[int]
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[string] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[string] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[string] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] ) | def set_default_moe_hparams(hparams):
"""Add necessary hyperparameters for mixture-of-experts."""
hparams.moe_num_experts = 16
hparams.moe_loss_coef = 0.01
hparams.add_hparam('moe_gating', 'top_2')
# Experts have fixed capacity per batch. We need some extra capacity
# in case gating is not perfectly balanced.
# moe_capacity_factor_* should be set to a value >=1.
hparams.add_hparam('moe_capacity_factor_train', 1.25)
hparams.add_hparam('moe_capacity_factor_eval', 2.0)
hparams.add_hparam('moe_capacity_factor_second_level', 1.0)
# Each expert has a hidden layer with this size.
hparams.add_hparam('moe_hidden_size', 4096)
# For gating, divide inputs into groups of this size before gating.
# Each group sends the same number of inputs to each expert.
# Ideally, the group size would be the whole batch, but this is expensive
# due to our use of matrix multiplication for reordering.
hparams.add_hparam('moe_group_size', 1024)
# For top_2 gating, whether to impose an additional loss in order to make
# the experts equally used as the second-place expert.
hparams.add_hparam('moe_use_second_place_loss', 0)
# In top_2 gating, policy for whether to use a second-place expert.
# Legal values are:
# "all": always
# "none": never
# "threshold": if gate value > the given threshold
# "random": if gate value > threshold*random_uniform(0,1)
hparams.add_hparam('moe_second_policy_train', 'random')
hparams.add_hparam('moe_second_policy_eval', 'random')
hparams.add_hparam('moe_second_threshold_train', 0.2)
hparams.add_hparam('moe_second_threshold_eval', 0.2) |
def add_request_log_fields(
self, log_fields: LogFields,
call_details: Union[grpc.HandlerCallDetails,
grpc.ClientCallDetails]
):
"""Add log fields related to a request to the provided log fields
:param log_fields: log fields instance to which to add the fields
:param call_details: some information regarding the call
"""
service, method = call_details.method[1:].split("/")
log_fields.add_fields({
"system": "grpc",
"span.kind": self.KIND,
"grpc.service": service,
"grpc.method": method,
}) | def function[add_request_log_fields, parameter[self, log_fields, call_details]]:
constant[Add log fields related to a request to the provided log fields
:param log_fields: log fields instance to which to add the fields
:param call_details: some information regarding the call
]
<ast.Tuple object at 0x7da18ede6020> assign[=] call[call[name[call_details].method][<ast.Slice object at 0x7da18ede5c90>].split, parameter[constant[/]]]
call[name[log_fields].add_fields, parameter[dictionary[[<ast.Constant object at 0x7da18ede6080>, <ast.Constant object at 0x7da18ede4070>, <ast.Constant object at 0x7da18ede7280>, <ast.Constant object at 0x7da18ede4f40>], [<ast.Constant object at 0x7da18ede62c0>, <ast.Attribute object at 0x7da18ede7d30>, <ast.Name object at 0x7da18ede4610>, <ast.Name object at 0x7da18ede73d0>]]]] | keyword[def] identifier[add_request_log_fields] (
identifier[self] , identifier[log_fields] : identifier[LogFields] ,
identifier[call_details] : identifier[Union] [ identifier[grpc] . identifier[HandlerCallDetails] ,
identifier[grpc] . identifier[ClientCallDetails] ]
):
literal[string]
identifier[service] , identifier[method] = identifier[call_details] . identifier[method] [ literal[int] :]. identifier[split] ( literal[string] )
identifier[log_fields] . identifier[add_fields] ({
literal[string] : literal[string] ,
literal[string] : identifier[self] . identifier[KIND] ,
literal[string] : identifier[service] ,
literal[string] : identifier[method] ,
}) | def add_request_log_fields(self, log_fields: LogFields, call_details: Union[grpc.HandlerCallDetails, grpc.ClientCallDetails]):
"""Add log fields related to a request to the provided log fields
:param log_fields: log fields instance to which to add the fields
:param call_details: some information regarding the call
"""
(service, method) = call_details.method[1:].split('/')
log_fields.add_fields({'system': 'grpc', 'span.kind': self.KIND, 'grpc.service': service, 'grpc.method': method}) |
def data_log_send(self, fl_1, fl_2, fl_3, fl_4, fl_5, fl_6, force_mavlink1=False):
'''
Configurable data log probes to be used inside Simulink
fl_1 : Log value 1 (float)
fl_2 : Log value 2 (float)
fl_3 : Log value 3 (float)
fl_4 : Log value 4 (float)
fl_5 : Log value 5 (float)
fl_6 : Log value 6 (float)
'''
return self.send(self.data_log_encode(fl_1, fl_2, fl_3, fl_4, fl_5, fl_6), force_mavlink1=force_mavlink1) | def function[data_log_send, parameter[self, fl_1, fl_2, fl_3, fl_4, fl_5, fl_6, force_mavlink1]]:
constant[
Configurable data log probes to be used inside Simulink
fl_1 : Log value 1 (float)
fl_2 : Log value 2 (float)
fl_3 : Log value 3 (float)
fl_4 : Log value 4 (float)
fl_5 : Log value 5 (float)
fl_6 : Log value 6 (float)
]
return[call[name[self].send, parameter[call[name[self].data_log_encode, parameter[name[fl_1], name[fl_2], name[fl_3], name[fl_4], name[fl_5], name[fl_6]]]]]] | keyword[def] identifier[data_log_send] ( identifier[self] , identifier[fl_1] , identifier[fl_2] , identifier[fl_3] , identifier[fl_4] , identifier[fl_5] , identifier[fl_6] , identifier[force_mavlink1] = keyword[False] ):
literal[string]
keyword[return] identifier[self] . identifier[send] ( identifier[self] . identifier[data_log_encode] ( identifier[fl_1] , identifier[fl_2] , identifier[fl_3] , identifier[fl_4] , identifier[fl_5] , identifier[fl_6] ), identifier[force_mavlink1] = identifier[force_mavlink1] ) | def data_log_send(self, fl_1, fl_2, fl_3, fl_4, fl_5, fl_6, force_mavlink1=False):
"""
Configurable data log probes to be used inside Simulink
fl_1 : Log value 1 (float)
fl_2 : Log value 2 (float)
fl_3 : Log value 3 (float)
fl_4 : Log value 4 (float)
fl_5 : Log value 5 (float)
fl_6 : Log value 6 (float)
"""
return self.send(self.data_log_encode(fl_1, fl_2, fl_3, fl_4, fl_5, fl_6), force_mavlink1=force_mavlink1) |
def _get_vi_mode(cli):
"""Get the current vi mode for display."""
return {
InputMode.INSERT: 'I',
InputMode.NAVIGATION: 'N',
InputMode.REPLACE: 'R',
InputMode.INSERT_MULTIPLE: 'M'
}[cli.vi_state.input_mode] | def function[_get_vi_mode, parameter[cli]]:
constant[Get the current vi mode for display.]
return[call[dictionary[[<ast.Attribute object at 0x7da2041d85b0>, <ast.Attribute object at 0x7da2041d89d0>, <ast.Attribute object at 0x7da2041dac50>, <ast.Attribute object at 0x7da2041d9db0>], [<ast.Constant object at 0x7da2041d86d0>, <ast.Constant object at 0x7da2041db6d0>, <ast.Constant object at 0x7da2041d96f0>, <ast.Constant object at 0x7da2041db1f0>]]][name[cli].vi_state.input_mode]] | keyword[def] identifier[_get_vi_mode] ( identifier[cli] ):
literal[string]
keyword[return] {
identifier[InputMode] . identifier[INSERT] : literal[string] ,
identifier[InputMode] . identifier[NAVIGATION] : literal[string] ,
identifier[InputMode] . identifier[REPLACE] : literal[string] ,
identifier[InputMode] . identifier[INSERT_MULTIPLE] : literal[string]
}[ identifier[cli] . identifier[vi_state] . identifier[input_mode] ] | def _get_vi_mode(cli):
"""Get the current vi mode for display."""
return {InputMode.INSERT: 'I', InputMode.NAVIGATION: 'N', InputMode.REPLACE: 'R', InputMode.INSERT_MULTIPLE: 'M'}[cli.vi_state.input_mode] |
def pages(self):
"""The total number of pages"""
if self.per_page == 0 or self.total is None:
pages = 0
else:
pages = int(ceil(self.total / float(self.per_page)))
return pages | def function[pages, parameter[self]]:
constant[The total number of pages]
if <ast.BoolOp object at 0x7da1b21bbdf0> begin[:]
variable[pages] assign[=] constant[0]
return[name[pages]] | keyword[def] identifier[pages] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[per_page] == literal[int] keyword[or] identifier[self] . identifier[total] keyword[is] keyword[None] :
identifier[pages] = literal[int]
keyword[else] :
identifier[pages] = identifier[int] ( identifier[ceil] ( identifier[self] . identifier[total] / identifier[float] ( identifier[self] . identifier[per_page] )))
keyword[return] identifier[pages] | def pages(self):
"""The total number of pages"""
if self.per_page == 0 or self.total is None:
pages = 0 # depends on [control=['if'], data=[]]
else:
pages = int(ceil(self.total / float(self.per_page)))
return pages |
def sweep(crypto, private_key, to_address, fee=None, password=None, **modes):
"""
Move all funds by private key to another address.
"""
from moneywagon.tx import Transaction
tx = Transaction(crypto, verbose=modes.get('verbose', False))
tx.add_inputs(private_key=private_key, password=password, **modes)
tx.change_address = to_address
tx.fee(fee)
return tx.push() | def function[sweep, parameter[crypto, private_key, to_address, fee, password]]:
constant[
Move all funds by private key to another address.
]
from relative_module[moneywagon.tx] import module[Transaction]
variable[tx] assign[=] call[name[Transaction], parameter[name[crypto]]]
call[name[tx].add_inputs, parameter[]]
name[tx].change_address assign[=] name[to_address]
call[name[tx].fee, parameter[name[fee]]]
return[call[name[tx].push, parameter[]]] | keyword[def] identifier[sweep] ( identifier[crypto] , identifier[private_key] , identifier[to_address] , identifier[fee] = keyword[None] , identifier[password] = keyword[None] ,** identifier[modes] ):
literal[string]
keyword[from] identifier[moneywagon] . identifier[tx] keyword[import] identifier[Transaction]
identifier[tx] = identifier[Transaction] ( identifier[crypto] , identifier[verbose] = identifier[modes] . identifier[get] ( literal[string] , keyword[False] ))
identifier[tx] . identifier[add_inputs] ( identifier[private_key] = identifier[private_key] , identifier[password] = identifier[password] ,** identifier[modes] )
identifier[tx] . identifier[change_address] = identifier[to_address]
identifier[tx] . identifier[fee] ( identifier[fee] )
keyword[return] identifier[tx] . identifier[push] () | def sweep(crypto, private_key, to_address, fee=None, password=None, **modes):
"""
Move all funds by private key to another address.
"""
from moneywagon.tx import Transaction
tx = Transaction(crypto, verbose=modes.get('verbose', False))
tx.add_inputs(private_key=private_key, password=password, **modes)
tx.change_address = to_address
tx.fee(fee)
return tx.push() |
def strip_seq_cntrl(self, idx):
"""strip(2 byte) wlan.seq(12 bit) and wlan.fram(4 bit)
number information.
:seq_cntrl: ctypes.Structure
:return: int
sequence number
:return: int
fragment number
"""
seq_cntrl = struct.unpack('H', self._packet[idx:idx + 2])[0]
seq_num = seq_cntrl >> 4
frag_num = seq_cntrl & 0x000f
return seq_num, frag_num | def function[strip_seq_cntrl, parameter[self, idx]]:
constant[strip(2 byte) wlan.seq(12 bit) and wlan.fram(4 bit)
number information.
:seq_cntrl: ctypes.Structure
:return: int
sequence number
:return: int
fragment number
]
variable[seq_cntrl] assign[=] call[call[name[struct].unpack, parameter[constant[H], call[name[self]._packet][<ast.Slice object at 0x7da1affc3a90>]]]][constant[0]]
variable[seq_num] assign[=] binary_operation[name[seq_cntrl] <ast.RShift object at 0x7da2590d6a40> constant[4]]
variable[frag_num] assign[=] binary_operation[name[seq_cntrl] <ast.BitAnd object at 0x7da2590d6b60> constant[15]]
return[tuple[[<ast.Name object at 0x7da1affc38e0>, <ast.Name object at 0x7da1affc1a50>]]] | keyword[def] identifier[strip_seq_cntrl] ( identifier[self] , identifier[idx] ):
literal[string]
identifier[seq_cntrl] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[self] . identifier[_packet] [ identifier[idx] : identifier[idx] + literal[int] ])[ literal[int] ]
identifier[seq_num] = identifier[seq_cntrl] >> literal[int]
identifier[frag_num] = identifier[seq_cntrl] & literal[int]
keyword[return] identifier[seq_num] , identifier[frag_num] | def strip_seq_cntrl(self, idx):
"""strip(2 byte) wlan.seq(12 bit) and wlan.fram(4 bit)
number information.
:seq_cntrl: ctypes.Structure
:return: int
sequence number
:return: int
fragment number
"""
seq_cntrl = struct.unpack('H', self._packet[idx:idx + 2])[0]
seq_num = seq_cntrl >> 4
frag_num = seq_cntrl & 15
return (seq_num, frag_num) |
def _save_files(self, data, dtype_out_time):
"""Save the data to netcdf files in direc_out."""
path = self.path_out[dtype_out_time]
if not os.path.isdir(self.dir_out):
os.makedirs(self.dir_out)
if 'reg' in dtype_out_time:
try:
reg_data = xr.open_dataset(path)
except (EOFError, RuntimeError, IOError):
reg_data = xr.Dataset()
reg_data.update(data)
data_out = reg_data
else:
data_out = data
if isinstance(data_out, xr.DataArray):
data_out = xr.Dataset({self.name: data_out})
data_out.to_netcdf(path, engine='netcdf4', format='NETCDF3_64BIT') | def function[_save_files, parameter[self, data, dtype_out_time]]:
constant[Save the data to netcdf files in direc_out.]
variable[path] assign[=] call[name[self].path_out][name[dtype_out_time]]
if <ast.UnaryOp object at 0x7da1b0477d30> begin[:]
call[name[os].makedirs, parameter[name[self].dir_out]]
if compare[constant[reg] in name[dtype_out_time]] begin[:]
<ast.Try object at 0x7da1b0476a10>
call[name[reg_data].update, parameter[name[data]]]
variable[data_out] assign[=] name[reg_data]
if call[name[isinstance], parameter[name[data_out], name[xr].DataArray]] begin[:]
variable[data_out] assign[=] call[name[xr].Dataset, parameter[dictionary[[<ast.Attribute object at 0x7da1b0476dd0>], [<ast.Name object at 0x7da1b04778b0>]]]]
call[name[data_out].to_netcdf, parameter[name[path]]] | keyword[def] identifier[_save_files] ( identifier[self] , identifier[data] , identifier[dtype_out_time] ):
literal[string]
identifier[path] = identifier[self] . identifier[path_out] [ identifier[dtype_out_time] ]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[self] . identifier[dir_out] ):
identifier[os] . identifier[makedirs] ( identifier[self] . identifier[dir_out] )
keyword[if] literal[string] keyword[in] identifier[dtype_out_time] :
keyword[try] :
identifier[reg_data] = identifier[xr] . identifier[open_dataset] ( identifier[path] )
keyword[except] ( identifier[EOFError] , identifier[RuntimeError] , identifier[IOError] ):
identifier[reg_data] = identifier[xr] . identifier[Dataset] ()
identifier[reg_data] . identifier[update] ( identifier[data] )
identifier[data_out] = identifier[reg_data]
keyword[else] :
identifier[data_out] = identifier[data]
keyword[if] identifier[isinstance] ( identifier[data_out] , identifier[xr] . identifier[DataArray] ):
identifier[data_out] = identifier[xr] . identifier[Dataset] ({ identifier[self] . identifier[name] : identifier[data_out] })
identifier[data_out] . identifier[to_netcdf] ( identifier[path] , identifier[engine] = literal[string] , identifier[format] = literal[string] ) | def _save_files(self, data, dtype_out_time):
"""Save the data to netcdf files in direc_out."""
path = self.path_out[dtype_out_time]
if not os.path.isdir(self.dir_out):
os.makedirs(self.dir_out) # depends on [control=['if'], data=[]]
if 'reg' in dtype_out_time:
try:
reg_data = xr.open_dataset(path) # depends on [control=['try'], data=[]]
except (EOFError, RuntimeError, IOError):
reg_data = xr.Dataset() # depends on [control=['except'], data=[]]
reg_data.update(data)
data_out = reg_data # depends on [control=['if'], data=[]]
else:
data_out = data
if isinstance(data_out, xr.DataArray):
data_out = xr.Dataset({self.name: data_out}) # depends on [control=['if'], data=[]]
data_out.to_netcdf(path, engine='netcdf4', format='NETCDF3_64BIT') |
def regexes_match_sublist(regexes, match_candidates):
"""
Filter the match_candidates list to return only the candidate that match the regex
:param regexes: a list of regex used to filter the list of candidates
:param match_candidates: the list of candidates
:return: the filtered list of only the candidates that match the regex
"""
return [match for sublist in [regex_match_sublist(rgx, match_candidates) for rgx in regexes] for match in sublist] | def function[regexes_match_sublist, parameter[regexes, match_candidates]]:
constant[
Filter the match_candidates list to return only the candidate that match the regex
:param regexes: a list of regex used to filter the list of candidates
:param match_candidates: the list of candidates
:return: the filtered list of only the candidates that match the regex
]
return[<ast.ListComp object at 0x7da1b26ae050>] | keyword[def] identifier[regexes_match_sublist] ( identifier[regexes] , identifier[match_candidates] ):
literal[string]
keyword[return] [ identifier[match] keyword[for] identifier[sublist] keyword[in] [ identifier[regex_match_sublist] ( identifier[rgx] , identifier[match_candidates] ) keyword[for] identifier[rgx] keyword[in] identifier[regexes] ] keyword[for] identifier[match] keyword[in] identifier[sublist] ] | def regexes_match_sublist(regexes, match_candidates):
"""
Filter the match_candidates list to return only the candidate that match the regex
:param regexes: a list of regex used to filter the list of candidates
:param match_candidates: the list of candidates
:return: the filtered list of only the candidates that match the regex
"""
return [match for sublist in [regex_match_sublist(rgx, match_candidates) for rgx in regexes] for match in sublist] |
def update(self, **kwargs):
"""Updates the server with any changes you've made to the current input
along with any additional arguments you specify.
:param kwargs: Additional arguments (optional). For more about the
available parameters, see `Input parameters <http://dev.splunk.com/view/SP-CAAAEE6#inputparams>`_ on Splunk Developer Portal.
:type kwargs: ``dict``
:return: The input this method was called on.
:rtype: class:`Input`
"""
# UDP and TCP inputs require special handling due to their restrictToHost
# field. For all other inputs kinds, we can dispatch to the superclass method.
if self.kind not in ['tcp', 'splunktcp', 'tcp/raw', 'tcp/cooked', 'udp']:
return super(Input, self).update(**kwargs)
else:
# The behavior of restrictToHost is inconsistent across input kinds and versions of Splunk.
# In Splunk 4.x, the name of the entity is only the port, independent of the value of
# restrictToHost. In Splunk 5.0 this changed so the name will be of the form <restrictToHost>:<port>.
# In 5.0 and 5.0.1, if you don't supply the restrictToHost value on every update, it will
# remove the host restriction from the input. As of 5.0.2 you simply can't change restrictToHost
# on an existing input.
# The logic to handle all these cases:
# - Throw an exception if the user tries to set restrictToHost on an existing input
# for *any* version of Splunk.
# - Set the existing restrictToHost value on the update args internally so we don't
# cause it to change in Splunk 5.0 and 5.0.1.
to_update = kwargs.copy()
if 'restrictToHost' in kwargs:
raise IllegalOperationException("Cannot set restrictToHost on an existing input with the SDK.")
elif 'restrictToHost' in self._state.content and self.kind != 'udp':
to_update['restrictToHost'] = self._state.content['restrictToHost']
# Do the actual update operation.
return super(Input, self).update(**to_update) | def function[update, parameter[self]]:
constant[Updates the server with any changes you've made to the current input
along with any additional arguments you specify.
:param kwargs: Additional arguments (optional). For more about the
available parameters, see `Input parameters <http://dev.splunk.com/view/SP-CAAAEE6#inputparams>`_ on Splunk Developer Portal.
:type kwargs: ``dict``
:return: The input this method was called on.
:rtype: class:`Input`
]
if compare[name[self].kind <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da1b1951450>, <ast.Constant object at 0x7da1b1951b40>, <ast.Constant object at 0x7da1b1951e40>, <ast.Constant object at 0x7da1b1952680>, <ast.Constant object at 0x7da1b1951c30>]]] begin[:]
return[call[call[name[super], parameter[name[Input], name[self]]].update, parameter[]]] | keyword[def] identifier[update] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[self] . identifier[kind] keyword[not] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]:
keyword[return] identifier[super] ( identifier[Input] , identifier[self] ). identifier[update] (** identifier[kwargs] )
keyword[else] :
identifier[to_update] = identifier[kwargs] . identifier[copy] ()
keyword[if] literal[string] keyword[in] identifier[kwargs] :
keyword[raise] identifier[IllegalOperationException] ( literal[string] )
keyword[elif] literal[string] keyword[in] identifier[self] . identifier[_state] . identifier[content] keyword[and] identifier[self] . identifier[kind] != literal[string] :
identifier[to_update] [ literal[string] ]= identifier[self] . identifier[_state] . identifier[content] [ literal[string] ]
keyword[return] identifier[super] ( identifier[Input] , identifier[self] ). identifier[update] (** identifier[to_update] ) | def update(self, **kwargs):
"""Updates the server with any changes you've made to the current input
along with any additional arguments you specify.
:param kwargs: Additional arguments (optional). For more about the
available parameters, see `Input parameters <http://dev.splunk.com/view/SP-CAAAEE6#inputparams>`_ on Splunk Developer Portal.
:type kwargs: ``dict``
:return: The input this method was called on.
:rtype: class:`Input`
"""
# UDP and TCP inputs require special handling due to their restrictToHost
# field. For all other inputs kinds, we can dispatch to the superclass method.
if self.kind not in ['tcp', 'splunktcp', 'tcp/raw', 'tcp/cooked', 'udp']:
return super(Input, self).update(**kwargs) # depends on [control=['if'], data=[]]
else:
# The behavior of restrictToHost is inconsistent across input kinds and versions of Splunk.
# In Splunk 4.x, the name of the entity is only the port, independent of the value of
# restrictToHost. In Splunk 5.0 this changed so the name will be of the form <restrictToHost>:<port>.
# In 5.0 and 5.0.1, if you don't supply the restrictToHost value on every update, it will
# remove the host restriction from the input. As of 5.0.2 you simply can't change restrictToHost
# on an existing input.
# The logic to handle all these cases:
# - Throw an exception if the user tries to set restrictToHost on an existing input
# for *any* version of Splunk.
# - Set the existing restrictToHost value on the update args internally so we don't
# cause it to change in Splunk 5.0 and 5.0.1.
to_update = kwargs.copy()
if 'restrictToHost' in kwargs:
raise IllegalOperationException('Cannot set restrictToHost on an existing input with the SDK.') # depends on [control=['if'], data=[]]
elif 'restrictToHost' in self._state.content and self.kind != 'udp':
to_update['restrictToHost'] = self._state.content['restrictToHost'] # depends on [control=['if'], data=[]]
# Do the actual update operation.
return super(Input, self).update(**to_update) |
def load(self, data, many=None, partial=None, unknown=None):
"""Deserialize a data structure to an object defined by this Schema's fields.
:param dict data: The data to deserialize.
:param bool many: Whether to deserialize `data` as a collection. If `None`, the
value for `self.many` is used.
:param bool|tuple partial: Whether to ignore missing fields and not require
any fields declared. Propagates down to ``Nested`` fields as well. If
its value is an iterable, only missing fields listed in that iterable
will be ignored. Use dot delimiters to specify nested fields.
:param unknown: Whether to exclude, include, or raise an error for unknown
fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`.
If `None`, the value for `self.unknown` is used.
:return: A dict of deserialized data
:rtype: dict
.. versionadded:: 1.0.0
.. versionchanged:: 3.0.0b7
This method returns the deserialized data rather than a ``(data, errors)`` duple.
A :exc:`ValidationError <marshmallow.exceptions.ValidationError>` is raised
if invalid data are passed.
"""
return self._do_load(
data, many, partial=partial, unknown=unknown,
postprocess=True,
) | def function[load, parameter[self, data, many, partial, unknown]]:
constant[Deserialize a data structure to an object defined by this Schema's fields.
:param dict data: The data to deserialize.
:param bool many: Whether to deserialize `data` as a collection. If `None`, the
value for `self.many` is used.
:param bool|tuple partial: Whether to ignore missing fields and not require
any fields declared. Propagates down to ``Nested`` fields as well. If
its value is an iterable, only missing fields listed in that iterable
will be ignored. Use dot delimiters to specify nested fields.
:param unknown: Whether to exclude, include, or raise an error for unknown
fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`.
If `None`, the value for `self.unknown` is used.
:return: A dict of deserialized data
:rtype: dict
.. versionadded:: 1.0.0
.. versionchanged:: 3.0.0b7
This method returns the deserialized data rather than a ``(data, errors)`` duple.
A :exc:`ValidationError <marshmallow.exceptions.ValidationError>` is raised
if invalid data are passed.
]
return[call[name[self]._do_load, parameter[name[data], name[many]]]] | keyword[def] identifier[load] ( identifier[self] , identifier[data] , identifier[many] = keyword[None] , identifier[partial] = keyword[None] , identifier[unknown] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[_do_load] (
identifier[data] , identifier[many] , identifier[partial] = identifier[partial] , identifier[unknown] = identifier[unknown] ,
identifier[postprocess] = keyword[True] ,
) | def load(self, data, many=None, partial=None, unknown=None):
"""Deserialize a data structure to an object defined by this Schema's fields.
:param dict data: The data to deserialize.
:param bool many: Whether to deserialize `data` as a collection. If `None`, the
value for `self.many` is used.
:param bool|tuple partial: Whether to ignore missing fields and not require
any fields declared. Propagates down to ``Nested`` fields as well. If
its value is an iterable, only missing fields listed in that iterable
will be ignored. Use dot delimiters to specify nested fields.
:param unknown: Whether to exclude, include, or raise an error for unknown
fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`.
If `None`, the value for `self.unknown` is used.
:return: A dict of deserialized data
:rtype: dict
.. versionadded:: 1.0.0
.. versionchanged:: 3.0.0b7
This method returns the deserialized data rather than a ``(data, errors)`` duple.
A :exc:`ValidationError <marshmallow.exceptions.ValidationError>` is raised
if invalid data are passed.
"""
return self._do_load(data, many, partial=partial, unknown=unknown, postprocess=True) |
def to_yaml(self):
"""Store an instance to the referenced YAML file."""
import yaml
with self.__reference__.open('w') as fp:
yaml.dump(self.asjsonld(), fp, default_flow_style=False) | def function[to_yaml, parameter[self]]:
constant[Store an instance to the referenced YAML file.]
import module[yaml]
with call[name[self].__reference__.open, parameter[constant[w]]] begin[:]
call[name[yaml].dump, parameter[call[name[self].asjsonld, parameter[]], name[fp]]] | keyword[def] identifier[to_yaml] ( identifier[self] ):
literal[string]
keyword[import] identifier[yaml]
keyword[with] identifier[self] . identifier[__reference__] . identifier[open] ( literal[string] ) keyword[as] identifier[fp] :
identifier[yaml] . identifier[dump] ( identifier[self] . identifier[asjsonld] (), identifier[fp] , identifier[default_flow_style] = keyword[False] ) | def to_yaml(self):
"""Store an instance to the referenced YAML file."""
import yaml
with self.__reference__.open('w') as fp:
yaml.dump(self.asjsonld(), fp, default_flow_style=False) # depends on [control=['with'], data=['fp']] |
def CallApiHandler(handler, args, token=None):
"""Handles API call to a given handler with given args and token."""
result = handler.Handle(args, token=token)
expected_type = handler.result_type
if expected_type is None:
expected_type = None.__class__
if result.__class__ != expected_type:
raise UnexpectedResultTypeError(
"Expected %s, but got %s." %
(expected_type.__name__, result.__class__.__name__))
return result | def function[CallApiHandler, parameter[handler, args, token]]:
constant[Handles API call to a given handler with given args and token.]
variable[result] assign[=] call[name[handler].Handle, parameter[name[args]]]
variable[expected_type] assign[=] name[handler].result_type
if compare[name[expected_type] is constant[None]] begin[:]
variable[expected_type] assign[=] constant[None].__class__
if compare[name[result].__class__ not_equal[!=] name[expected_type]] begin[:]
<ast.Raise object at 0x7da1b1cedcc0>
return[name[result]] | keyword[def] identifier[CallApiHandler] ( identifier[handler] , identifier[args] , identifier[token] = keyword[None] ):
literal[string]
identifier[result] = identifier[handler] . identifier[Handle] ( identifier[args] , identifier[token] = identifier[token] )
identifier[expected_type] = identifier[handler] . identifier[result_type]
keyword[if] identifier[expected_type] keyword[is] keyword[None] :
identifier[expected_type] = keyword[None] . identifier[__class__]
keyword[if] identifier[result] . identifier[__class__] != identifier[expected_type] :
keyword[raise] identifier[UnexpectedResultTypeError] (
literal[string] %
( identifier[expected_type] . identifier[__name__] , identifier[result] . identifier[__class__] . identifier[__name__] ))
keyword[return] identifier[result] | def CallApiHandler(handler, args, token=None):
"""Handles API call to a given handler with given args and token."""
result = handler.Handle(args, token=token)
expected_type = handler.result_type
if expected_type is None:
expected_type = None.__class__ # depends on [control=['if'], data=['expected_type']]
if result.__class__ != expected_type:
raise UnexpectedResultTypeError('Expected %s, but got %s.' % (expected_type.__name__, result.__class__.__name__)) # depends on [control=['if'], data=['expected_type']]
return result |
def detectability(self,**kwargs):
"""
An a priori detectability proxy.
"""
distance_modulus = kwargs.get('distance_modulus')
distance = mod2dist(distance_modulus)
stellar_mass = kwargs.get('stellar_mass')
extension = kwargs.get('extension')
# Normalized to 10^3 Msolar at mod=18
norm = 10**3/mod2dist(18)**2
detect = stellar_mass / distance**2
detect /= norm | def function[detectability, parameter[self]]:
constant[
An a priori detectability proxy.
]
variable[distance_modulus] assign[=] call[name[kwargs].get, parameter[constant[distance_modulus]]]
variable[distance] assign[=] call[name[mod2dist], parameter[name[distance_modulus]]]
variable[stellar_mass] assign[=] call[name[kwargs].get, parameter[constant[stellar_mass]]]
variable[extension] assign[=] call[name[kwargs].get, parameter[constant[extension]]]
variable[norm] assign[=] binary_operation[binary_operation[constant[10] ** constant[3]] / binary_operation[call[name[mod2dist], parameter[constant[18]]] ** constant[2]]]
variable[detect] assign[=] binary_operation[name[stellar_mass] / binary_operation[name[distance] ** constant[2]]]
<ast.AugAssign object at 0x7da1b2345b10> | keyword[def] identifier[detectability] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[distance_modulus] = identifier[kwargs] . identifier[get] ( literal[string] )
identifier[distance] = identifier[mod2dist] ( identifier[distance_modulus] )
identifier[stellar_mass] = identifier[kwargs] . identifier[get] ( literal[string] )
identifier[extension] = identifier[kwargs] . identifier[get] ( literal[string] )
identifier[norm] = literal[int] ** literal[int] / identifier[mod2dist] ( literal[int] )** literal[int]
identifier[detect] = identifier[stellar_mass] / identifier[distance] ** literal[int]
identifier[detect] /= identifier[norm] | def detectability(self, **kwargs):
"""
An a priori detectability proxy.
"""
distance_modulus = kwargs.get('distance_modulus')
distance = mod2dist(distance_modulus)
stellar_mass = kwargs.get('stellar_mass')
extension = kwargs.get('extension')
# Normalized to 10^3 Msolar at mod=18
norm = 10 ** 3 / mod2dist(18) ** 2
detect = stellar_mass / distance ** 2
detect /= norm |
def describe_features(self, traj):
"""Return a list of dictionaries describing the atom pair features.
Parameters
----------
traj : mdtraj.Trajectory
The trajectory to describe
Returns
-------
feature_descs : list of dict
Dictionary describing each feature with the following information
about the atoms participating in each dihedral
- resnames: unique names of residues
- atominds: the two atom inds
- resseqs: unique residue sequence ids (not necessarily
0-indexed)
- resids: unique residue ids (0-indexed)
- featurizer: AtomPairsFeaturizer
- featuregroup: Distance.
- other info : Value of the exponent
"""
feature_descs = []
top = traj.topology
residue_indices = [[top.atom(i[0]).residue.index, top.atom(i[1]).residue.index] \
for i in self.atom_indices]
aind = []
resseqs = []
resnames = []
for ind,resid_ids in enumerate(residue_indices):
aind += [[i for i in self.atom_indices[ind]]]
resseqs += [[top.residue(ri).resSeq for ri in resid_ids]]
resnames += [[top.residue(ri).name for ri in resid_ids]]
zippy = itertools.product(["AtomPairs"], ["Distance"],
["Exponent {}".format(self.exponent)],
zip(aind, resseqs, residue_indices, resnames))
feature_descs.extend(dict_maker(zippy))
return feature_descs | def function[describe_features, parameter[self, traj]]:
constant[Return a list of dictionaries describing the atom pair features.
Parameters
----------
traj : mdtraj.Trajectory
The trajectory to describe
Returns
-------
feature_descs : list of dict
Dictionary describing each feature with the following information
about the atoms participating in each dihedral
- resnames: unique names of residues
- atominds: the two atom inds
- resseqs: unique residue sequence ids (not necessarily
0-indexed)
- resids: unique residue ids (0-indexed)
- featurizer: AtomPairsFeaturizer
- featuregroup: Distance.
- other info : Value of the exponent
]
variable[feature_descs] assign[=] list[[]]
variable[top] assign[=] name[traj].topology
variable[residue_indices] assign[=] <ast.ListComp object at 0x7da1b0791720>
variable[aind] assign[=] list[[]]
variable[resseqs] assign[=] list[[]]
variable[resnames] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b07930a0>, <ast.Name object at 0x7da1b0791930>]]] in starred[call[name[enumerate], parameter[name[residue_indices]]]] begin[:]
<ast.AugAssign object at 0x7da1b0791090>
<ast.AugAssign object at 0x7da1b0786920>
<ast.AugAssign object at 0x7da1b0786860>
variable[zippy] assign[=] call[name[itertools].product, parameter[list[[<ast.Constant object at 0x7da1b07846a0>]], list[[<ast.Constant object at 0x7da1b0786e00>]], list[[<ast.Call object at 0x7da1b0784580>]], call[name[zip], parameter[name[aind], name[resseqs], name[residue_indices], name[resnames]]]]]
call[name[feature_descs].extend, parameter[call[name[dict_maker], parameter[name[zippy]]]]]
return[name[feature_descs]] | keyword[def] identifier[describe_features] ( identifier[self] , identifier[traj] ):
literal[string]
identifier[feature_descs] =[]
identifier[top] = identifier[traj] . identifier[topology]
identifier[residue_indices] =[[ identifier[top] . identifier[atom] ( identifier[i] [ literal[int] ]). identifier[residue] . identifier[index] , identifier[top] . identifier[atom] ( identifier[i] [ literal[int] ]). identifier[residue] . identifier[index] ] keyword[for] identifier[i] keyword[in] identifier[self] . identifier[atom_indices] ]
identifier[aind] =[]
identifier[resseqs] =[]
identifier[resnames] =[]
keyword[for] identifier[ind] , identifier[resid_ids] keyword[in] identifier[enumerate] ( identifier[residue_indices] ):
identifier[aind] +=[[ identifier[i] keyword[for] identifier[i] keyword[in] identifier[self] . identifier[atom_indices] [ identifier[ind] ]]]
identifier[resseqs] +=[[ identifier[top] . identifier[residue] ( identifier[ri] ). identifier[resSeq] keyword[for] identifier[ri] keyword[in] identifier[resid_ids] ]]
identifier[resnames] +=[[ identifier[top] . identifier[residue] ( identifier[ri] ). identifier[name] keyword[for] identifier[ri] keyword[in] identifier[resid_ids] ]]
identifier[zippy] = identifier[itertools] . identifier[product] ([ literal[string] ],[ literal[string] ],
[ literal[string] . identifier[format] ( identifier[self] . identifier[exponent] )],
identifier[zip] ( identifier[aind] , identifier[resseqs] , identifier[residue_indices] , identifier[resnames] ))
identifier[feature_descs] . identifier[extend] ( identifier[dict_maker] ( identifier[zippy] ))
keyword[return] identifier[feature_descs] | def describe_features(self, traj):
"""Return a list of dictionaries describing the atom pair features.
Parameters
----------
traj : mdtraj.Trajectory
The trajectory to describe
Returns
-------
feature_descs : list of dict
Dictionary describing each feature with the following information
about the atoms participating in each dihedral
- resnames: unique names of residues
- atominds: the two atom inds
- resseqs: unique residue sequence ids (not necessarily
0-indexed)
- resids: unique residue ids (0-indexed)
- featurizer: AtomPairsFeaturizer
- featuregroup: Distance.
- other info : Value of the exponent
"""
feature_descs = []
top = traj.topology
residue_indices = [[top.atom(i[0]).residue.index, top.atom(i[1]).residue.index] for i in self.atom_indices]
aind = []
resseqs = []
resnames = []
for (ind, resid_ids) in enumerate(residue_indices):
aind += [[i for i in self.atom_indices[ind]]]
resseqs += [[top.residue(ri).resSeq for ri in resid_ids]]
resnames += [[top.residue(ri).name for ri in resid_ids]] # depends on [control=['for'], data=[]]
zippy = itertools.product(['AtomPairs'], ['Distance'], ['Exponent {}'.format(self.exponent)], zip(aind, resseqs, residue_indices, resnames))
feature_descs.extend(dict_maker(zippy))
return feature_descs |
def endpoint_get(service, region=None, profile=None, interface=None, **connection_args):
'''
Return a specific endpoint (keystone endpoint-get)
CLI Example:
.. code-block:: bash
salt 'v2' keystone.endpoint_get nova [region=RegionOne]
salt 'v3' keystone.endpoint_get nova interface=admin [region=RegionOne]
'''
auth(profile, **connection_args)
services = service_list(profile, **connection_args)
if service not in services:
return {'Error': 'Could not find the specified service'}
service_id = services[service]['id']
endpoints = endpoint_list(profile, **connection_args)
e = [_f for _f in [e
if e['service_id'] == service_id and
(e['region'] == region if region else True) and
(e['interface'] == interface if interface else True)
else None for e in endpoints.values()] if _f]
if len(e) > 1:
return {'Error': 'Multiple endpoints found ({0}) for the {1} service. Please specify region.'.format(e, service)}
if len(e) == 1:
return e[0]
return {'Error': 'Could not find endpoint for the specified service'} | def function[endpoint_get, parameter[service, region, profile, interface]]:
constant[
Return a specific endpoint (keystone endpoint-get)
CLI Example:
.. code-block:: bash
salt 'v2' keystone.endpoint_get nova [region=RegionOne]
salt 'v3' keystone.endpoint_get nova interface=admin [region=RegionOne]
]
call[name[auth], parameter[name[profile]]]
variable[services] assign[=] call[name[service_list], parameter[name[profile]]]
if compare[name[service] <ast.NotIn object at 0x7da2590d7190> name[services]] begin[:]
return[dictionary[[<ast.Constant object at 0x7da20c76f700>], [<ast.Constant object at 0x7da20c76cc10>]]]
variable[service_id] assign[=] call[call[name[services]][name[service]]][constant[id]]
variable[endpoints] assign[=] call[name[endpoint_list], parameter[name[profile]]]
variable[e] assign[=] <ast.ListComp object at 0x7da20c76d990>
if compare[call[name[len], parameter[name[e]]] greater[>] constant[1]] begin[:]
return[dictionary[[<ast.Constant object at 0x7da18bc732e0>], [<ast.Call object at 0x7da18bc719f0>]]]
if compare[call[name[len], parameter[name[e]]] equal[==] constant[1]] begin[:]
return[call[name[e]][constant[0]]]
return[dictionary[[<ast.Constant object at 0x7da18bc72680>], [<ast.Constant object at 0x7da18bc717e0>]]] | keyword[def] identifier[endpoint_get] ( identifier[service] , identifier[region] = keyword[None] , identifier[profile] = keyword[None] , identifier[interface] = keyword[None] ,** identifier[connection_args] ):
literal[string]
identifier[auth] ( identifier[profile] ,** identifier[connection_args] )
identifier[services] = identifier[service_list] ( identifier[profile] ,** identifier[connection_args] )
keyword[if] identifier[service] keyword[not] keyword[in] identifier[services] :
keyword[return] { literal[string] : literal[string] }
identifier[service_id] = identifier[services] [ identifier[service] ][ literal[string] ]
identifier[endpoints] = identifier[endpoint_list] ( identifier[profile] ,** identifier[connection_args] )
identifier[e] =[ identifier[_f] keyword[for] identifier[_f] keyword[in] [ identifier[e]
keyword[if] identifier[e] [ literal[string] ]== identifier[service_id] keyword[and]
( identifier[e] [ literal[string] ]== identifier[region] keyword[if] identifier[region] keyword[else] keyword[True] ) keyword[and]
( identifier[e] [ literal[string] ]== identifier[interface] keyword[if] identifier[interface] keyword[else] keyword[True] )
keyword[else] keyword[None] keyword[for] identifier[e] keyword[in] identifier[endpoints] . identifier[values] ()] keyword[if] identifier[_f] ]
keyword[if] identifier[len] ( identifier[e] )> literal[int] :
keyword[return] { literal[string] : literal[string] . identifier[format] ( identifier[e] , identifier[service] )}
keyword[if] identifier[len] ( identifier[e] )== literal[int] :
keyword[return] identifier[e] [ literal[int] ]
keyword[return] { literal[string] : literal[string] } | def endpoint_get(service, region=None, profile=None, interface=None, **connection_args):
"""
Return a specific endpoint (keystone endpoint-get)
CLI Example:
.. code-block:: bash
salt 'v2' keystone.endpoint_get nova [region=RegionOne]
salt 'v3' keystone.endpoint_get nova interface=admin [region=RegionOne]
"""
auth(profile, **connection_args)
services = service_list(profile, **connection_args)
if service not in services:
return {'Error': 'Could not find the specified service'} # depends on [control=['if'], data=[]]
service_id = services[service]['id']
endpoints = endpoint_list(profile, **connection_args)
e = [_f for _f in [e if e['service_id'] == service_id and (e['region'] == region if region else True) and (e['interface'] == interface if interface else True) else None for e in endpoints.values()] if _f]
if len(e) > 1:
return {'Error': 'Multiple endpoints found ({0}) for the {1} service. Please specify region.'.format(e, service)} # depends on [control=['if'], data=[]]
if len(e) == 1:
return e[0] # depends on [control=['if'], data=[]]
return {'Error': 'Could not find endpoint for the specified service'} |
def _make_standalone_handler(preamble):
"""Class factory used so that preamble can be passed to :py:class:`_StandaloneHandler`
without use of static members"""
class _StandaloneHandler(BaseHTTPRequestHandler, object):
"""HTTP Handler for standalone mode"""
def do_GET(self):
self.send_response(200)
self.send_header('Content-type', 'application/json; charset=utf-8')
self.send_header('Content-length', len(preamble))
self.end_headers()
self.wfile.write(preamble.encode('utf-8'))
def log_message(self, format, *args):
# suppress logging on requests
return
return _StandaloneHandler | def function[_make_standalone_handler, parameter[preamble]]:
constant[Class factory used so that preamble can be passed to :py:class:`_StandaloneHandler`
without use of static members]
class class[_StandaloneHandler, parameter[]] begin[:]
constant[HTTP Handler for standalone mode]
def function[do_GET, parameter[self]]:
call[name[self].send_response, parameter[constant[200]]]
call[name[self].send_header, parameter[constant[Content-type], constant[application/json; charset=utf-8]]]
call[name[self].send_header, parameter[constant[Content-length], call[name[len], parameter[name[preamble]]]]]
call[name[self].end_headers, parameter[]]
call[name[self].wfile.write, parameter[call[name[preamble].encode, parameter[constant[utf-8]]]]]
def function[log_message, parameter[self, format]]:
return[None]
return[name[_StandaloneHandler]] | keyword[def] identifier[_make_standalone_handler] ( identifier[preamble] ):
literal[string]
keyword[class] identifier[_StandaloneHandler] ( identifier[BaseHTTPRequestHandler] , identifier[object] ):
literal[string]
keyword[def] identifier[do_GET] ( identifier[self] ):
identifier[self] . identifier[send_response] ( literal[int] )
identifier[self] . identifier[send_header] ( literal[string] , literal[string] )
identifier[self] . identifier[send_header] ( literal[string] , identifier[len] ( identifier[preamble] ))
identifier[self] . identifier[end_headers] ()
identifier[self] . identifier[wfile] . identifier[write] ( identifier[preamble] . identifier[encode] ( literal[string] ))
keyword[def] identifier[log_message] ( identifier[self] , identifier[format] ,* identifier[args] ):
keyword[return]
keyword[return] identifier[_StandaloneHandler] | def _make_standalone_handler(preamble):
"""Class factory used so that preamble can be passed to :py:class:`_StandaloneHandler`
without use of static members"""
class _StandaloneHandler(BaseHTTPRequestHandler, object):
"""HTTP Handler for standalone mode"""
def do_GET(self):
self.send_response(200)
self.send_header('Content-type', 'application/json; charset=utf-8')
self.send_header('Content-length', len(preamble))
self.end_headers()
self.wfile.write(preamble.encode('utf-8'))
def log_message(self, format, *args):
# suppress logging on requests
return
return _StandaloneHandler |
def FitRadius(z, SampleFreq, Damping, HistBins=100):
"""
Fits the dynamical potential to the Steady
State Potential by varying the Radius.
z : ndarray
Position data
SampleFreq : float
frequency at which the position data was
sampled
Damping : float
value of damping (in radians/second)
HistBins : int
number of values at which to evaluate
the steady state potential / perform
the fitting to the dynamical potential
Returns
-------
Radius : float
Radius of the nanoparticle
RadiusError : float
One Standard Deviation Error in the Radius from the Fit
(doesn't take into account possible error in damping)
"""
dt = 1/SampleFreq
boltzmann=scipy.constants.Boltzmann
temp=300 # why halved??
density=1800
SteadyStatePotnl = list(steady_state_potential(z, HistBins=HistBins))
yoffset=min(SteadyStatePotnl[1])
SteadyStatePotnl[1] -= yoffset
SpringPotnlFunc = dynamical_potential(z, dt)
SpringPotnl = SpringPotnlFunc(z)
kBT_Gamma = temp*boltzmann*1/Damping
#FitSoln = least_squares(GetResiduals, 50, args=(SteadyStatePotnl, SpringPotnlFunc, kBT_Gamma), full_output=True)
#print(FitSoln)
#RADIUS = FitSoln['x'][0]
DynamicPotentialFunc = MakeDynamicPotentialFunc(kBT_Gamma, density, SpringPotnlFunc)
FitSoln = curve_fit(DynamicPotentialFunc, SteadyStatePotnl[0], SteadyStatePotnl[1], p0 = 50)
print(FitSoln)
popt, pcov = FitSoln
perr = np.sqrt(np.diag(pcov))
Radius, RadiusError = popt[0], perr[0]
mass=((4/3)*np.pi*((Radius*10**-9)**3))*density
yfit=(kBT_Gamma/mass)
Y = yfit*SpringPotnl
fig, ax = plt.subplots()
ax.plot(SteadyStatePotnl[0], SteadyStatePotnl[1], 'bo', label="Steady State Potential")
plt.plot(z,Y, 'r-', label="Dynamical Potential")
ax.legend(loc='best')
ax.set_ylabel('U ($k_{B} T $ Joules)')
ax.set_xlabel('Distance (mV)')
plt.tight_layout()
plt.show()
return Radius, RadiusError | def function[FitRadius, parameter[z, SampleFreq, Damping, HistBins]]:
constant[
Fits the dynamical potential to the Steady
State Potential by varying the Radius.
z : ndarray
Position data
SampleFreq : float
frequency at which the position data was
sampled
Damping : float
value of damping (in radians/second)
HistBins : int
number of values at which to evaluate
the steady state potential / perform
the fitting to the dynamical potential
Returns
-------
Radius : float
Radius of the nanoparticle
RadiusError : float
One Standard Deviation Error in the Radius from the Fit
(doesn't take into account possible error in damping)
]
variable[dt] assign[=] binary_operation[constant[1] / name[SampleFreq]]
variable[boltzmann] assign[=] name[scipy].constants.Boltzmann
variable[temp] assign[=] constant[300]
variable[density] assign[=] constant[1800]
variable[SteadyStatePotnl] assign[=] call[name[list], parameter[call[name[steady_state_potential], parameter[name[z]]]]]
variable[yoffset] assign[=] call[name[min], parameter[call[name[SteadyStatePotnl]][constant[1]]]]
<ast.AugAssign object at 0x7da1b27276d0>
variable[SpringPotnlFunc] assign[=] call[name[dynamical_potential], parameter[name[z], name[dt]]]
variable[SpringPotnl] assign[=] call[name[SpringPotnlFunc], parameter[name[z]]]
variable[kBT_Gamma] assign[=] binary_operation[binary_operation[binary_operation[name[temp] * name[boltzmann]] * constant[1]] / name[Damping]]
variable[DynamicPotentialFunc] assign[=] call[name[MakeDynamicPotentialFunc], parameter[name[kBT_Gamma], name[density], name[SpringPotnlFunc]]]
variable[FitSoln] assign[=] call[name[curve_fit], parameter[name[DynamicPotentialFunc], call[name[SteadyStatePotnl]][constant[0]], call[name[SteadyStatePotnl]][constant[1]]]]
call[name[print], parameter[name[FitSoln]]]
<ast.Tuple object at 0x7da1b271e080> assign[=] name[FitSoln]
variable[perr] assign[=] call[name[np].sqrt, parameter[call[name[np].diag, parameter[name[pcov]]]]]
<ast.Tuple object at 0x7da1b271c820> assign[=] tuple[[<ast.Subscript object at 0x7da1b271c460>, <ast.Subscript object at 0x7da1b271d5d0>]]
variable[mass] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[constant[4] / constant[3]] * name[np].pi] * binary_operation[binary_operation[name[Radius] * binary_operation[constant[10] ** <ast.UnaryOp object at 0x7da1b271d2a0>]] ** constant[3]]] * name[density]]
variable[yfit] assign[=] binary_operation[name[kBT_Gamma] / name[mass]]
variable[Y] assign[=] binary_operation[name[yfit] * name[SpringPotnl]]
<ast.Tuple object at 0x7da1b271ceb0> assign[=] call[name[plt].subplots, parameter[]]
call[name[ax].plot, parameter[call[name[SteadyStatePotnl]][constant[0]], call[name[SteadyStatePotnl]][constant[1]], constant[bo]]]
call[name[plt].plot, parameter[name[z], name[Y], constant[r-]]]
call[name[ax].legend, parameter[]]
call[name[ax].set_ylabel, parameter[constant[U ($k_{B} T $ Joules)]]]
call[name[ax].set_xlabel, parameter[constant[Distance (mV)]]]
call[name[plt].tight_layout, parameter[]]
call[name[plt].show, parameter[]]
return[tuple[[<ast.Name object at 0x7da1b27175e0>, <ast.Name object at 0x7da1b2717670>]]] | keyword[def] identifier[FitRadius] ( identifier[z] , identifier[SampleFreq] , identifier[Damping] , identifier[HistBins] = literal[int] ):
literal[string]
identifier[dt] = literal[int] / identifier[SampleFreq]
identifier[boltzmann] = identifier[scipy] . identifier[constants] . identifier[Boltzmann]
identifier[temp] = literal[int]
identifier[density] = literal[int]
identifier[SteadyStatePotnl] = identifier[list] ( identifier[steady_state_potential] ( identifier[z] , identifier[HistBins] = identifier[HistBins] ))
identifier[yoffset] = identifier[min] ( identifier[SteadyStatePotnl] [ literal[int] ])
identifier[SteadyStatePotnl] [ literal[int] ]-= identifier[yoffset]
identifier[SpringPotnlFunc] = identifier[dynamical_potential] ( identifier[z] , identifier[dt] )
identifier[SpringPotnl] = identifier[SpringPotnlFunc] ( identifier[z] )
identifier[kBT_Gamma] = identifier[temp] * identifier[boltzmann] * literal[int] / identifier[Damping]
identifier[DynamicPotentialFunc] = identifier[MakeDynamicPotentialFunc] ( identifier[kBT_Gamma] , identifier[density] , identifier[SpringPotnlFunc] )
identifier[FitSoln] = identifier[curve_fit] ( identifier[DynamicPotentialFunc] , identifier[SteadyStatePotnl] [ literal[int] ], identifier[SteadyStatePotnl] [ literal[int] ], identifier[p0] = literal[int] )
identifier[print] ( identifier[FitSoln] )
identifier[popt] , identifier[pcov] = identifier[FitSoln]
identifier[perr] = identifier[np] . identifier[sqrt] ( identifier[np] . identifier[diag] ( identifier[pcov] ))
identifier[Radius] , identifier[RadiusError] = identifier[popt] [ literal[int] ], identifier[perr] [ literal[int] ]
identifier[mass] =(( literal[int] / literal[int] )* identifier[np] . identifier[pi] *(( identifier[Radius] * literal[int] **- literal[int] )** literal[int] ))* identifier[density]
identifier[yfit] =( identifier[kBT_Gamma] / identifier[mass] )
identifier[Y] = identifier[yfit] * identifier[SpringPotnl]
identifier[fig] , identifier[ax] = identifier[plt] . identifier[subplots] ()
identifier[ax] . identifier[plot] ( identifier[SteadyStatePotnl] [ literal[int] ], identifier[SteadyStatePotnl] [ literal[int] ], literal[string] , identifier[label] = literal[string] )
identifier[plt] . identifier[plot] ( identifier[z] , identifier[Y] , literal[string] , identifier[label] = literal[string] )
identifier[ax] . identifier[legend] ( identifier[loc] = literal[string] )
identifier[ax] . identifier[set_ylabel] ( literal[string] )
identifier[ax] . identifier[set_xlabel] ( literal[string] )
identifier[plt] . identifier[tight_layout] ()
identifier[plt] . identifier[show] ()
keyword[return] identifier[Radius] , identifier[RadiusError] | def FitRadius(z, SampleFreq, Damping, HistBins=100):
"""
Fits the dynamical potential to the Steady
State Potential by varying the Radius.
z : ndarray
Position data
SampleFreq : float
frequency at which the position data was
sampled
Damping : float
value of damping (in radians/second)
HistBins : int
number of values at which to evaluate
the steady state potential / perform
the fitting to the dynamical potential
Returns
-------
Radius : float
Radius of the nanoparticle
RadiusError : float
One Standard Deviation Error in the Radius from the Fit
(doesn't take into account possible error in damping)
"""
dt = 1 / SampleFreq
boltzmann = scipy.constants.Boltzmann
temp = 300 # why halved??
density = 1800
SteadyStatePotnl = list(steady_state_potential(z, HistBins=HistBins))
yoffset = min(SteadyStatePotnl[1])
SteadyStatePotnl[1] -= yoffset
SpringPotnlFunc = dynamical_potential(z, dt)
SpringPotnl = SpringPotnlFunc(z)
kBT_Gamma = temp * boltzmann * 1 / Damping
#FitSoln = least_squares(GetResiduals, 50, args=(SteadyStatePotnl, SpringPotnlFunc, kBT_Gamma), full_output=True)
#print(FitSoln)
#RADIUS = FitSoln['x'][0]
DynamicPotentialFunc = MakeDynamicPotentialFunc(kBT_Gamma, density, SpringPotnlFunc)
FitSoln = curve_fit(DynamicPotentialFunc, SteadyStatePotnl[0], SteadyStatePotnl[1], p0=50)
print(FitSoln)
(popt, pcov) = FitSoln
perr = np.sqrt(np.diag(pcov))
(Radius, RadiusError) = (popt[0], perr[0])
mass = 4 / 3 * np.pi * (Radius * 10 ** (-9)) ** 3 * density
yfit = kBT_Gamma / mass
Y = yfit * SpringPotnl
(fig, ax) = plt.subplots()
ax.plot(SteadyStatePotnl[0], SteadyStatePotnl[1], 'bo', label='Steady State Potential')
plt.plot(z, Y, 'r-', label='Dynamical Potential')
ax.legend(loc='best')
ax.set_ylabel('U ($k_{B} T $ Joules)')
ax.set_xlabel('Distance (mV)')
plt.tight_layout()
plt.show()
return (Radius, RadiusError) |
def format_baseline_for_output(baseline):
"""
:type baseline: dict
:rtype: str
"""
for filename, secret_list in baseline['results'].items():
baseline['results'][filename] = sorted(
secret_list,
key=lambda x: (x['line_number'], x['hashed_secret'],),
)
return json.dumps(
baseline,
indent=2,
sort_keys=True,
separators=(',', ': '),
) | def function[format_baseline_for_output, parameter[baseline]]:
constant[
:type baseline: dict
:rtype: str
]
for taget[tuple[[<ast.Name object at 0x7da20c6e64d0>, <ast.Name object at 0x7da20c6e75e0>]]] in starred[call[call[name[baseline]][constant[results]].items, parameter[]]] begin[:]
call[call[name[baseline]][constant[results]]][name[filename]] assign[=] call[name[sorted], parameter[name[secret_list]]]
return[call[name[json].dumps, parameter[name[baseline]]]] | keyword[def] identifier[format_baseline_for_output] ( identifier[baseline] ):
literal[string]
keyword[for] identifier[filename] , identifier[secret_list] keyword[in] identifier[baseline] [ literal[string] ]. identifier[items] ():
identifier[baseline] [ literal[string] ][ identifier[filename] ]= identifier[sorted] (
identifier[secret_list] ,
identifier[key] = keyword[lambda] identifier[x] :( identifier[x] [ literal[string] ], identifier[x] [ literal[string] ],),
)
keyword[return] identifier[json] . identifier[dumps] (
identifier[baseline] ,
identifier[indent] = literal[int] ,
identifier[sort_keys] = keyword[True] ,
identifier[separators] =( literal[string] , literal[string] ),
) | def format_baseline_for_output(baseline):
"""
:type baseline: dict
:rtype: str
"""
for (filename, secret_list) in baseline['results'].items():
baseline['results'][filename] = sorted(secret_list, key=lambda x: (x['line_number'], x['hashed_secret'])) # depends on [control=['for'], data=[]]
return json.dumps(baseline, indent=2, sort_keys=True, separators=(',', ': ')) |
def patch_worker_factory():
"""
Patches the ``luigi.interface._WorkerSchedulerFactory`` to include sandboxing information when
create a worker instance.
"""
def create_worker(self, scheduler, worker_processes, assistant=False):
worker = luigi.worker.Worker(scheduler=scheduler, worker_processes=worker_processes,
assistant=assistant, worker_id=os.getenv("LAW_SANDBOX_WORKER_ID"))
worker._first_task = os.getenv("LAW_SANDBOX_WORKER_TASK")
return worker
luigi.interface._WorkerSchedulerFactory.create_worker = create_worker | def function[patch_worker_factory, parameter[]]:
constant[
Patches the ``luigi.interface._WorkerSchedulerFactory`` to include sandboxing information when
create a worker instance.
]
def function[create_worker, parameter[self, scheduler, worker_processes, assistant]]:
variable[worker] assign[=] call[name[luigi].worker.Worker, parameter[]]
name[worker]._first_task assign[=] call[name[os].getenv, parameter[constant[LAW_SANDBOX_WORKER_TASK]]]
return[name[worker]]
name[luigi].interface._WorkerSchedulerFactory.create_worker assign[=] name[create_worker] | keyword[def] identifier[patch_worker_factory] ():
literal[string]
keyword[def] identifier[create_worker] ( identifier[self] , identifier[scheduler] , identifier[worker_processes] , identifier[assistant] = keyword[False] ):
identifier[worker] = identifier[luigi] . identifier[worker] . identifier[Worker] ( identifier[scheduler] = identifier[scheduler] , identifier[worker_processes] = identifier[worker_processes] ,
identifier[assistant] = identifier[assistant] , identifier[worker_id] = identifier[os] . identifier[getenv] ( literal[string] ))
identifier[worker] . identifier[_first_task] = identifier[os] . identifier[getenv] ( literal[string] )
keyword[return] identifier[worker]
identifier[luigi] . identifier[interface] . identifier[_WorkerSchedulerFactory] . identifier[create_worker] = identifier[create_worker] | def patch_worker_factory():
"""
Patches the ``luigi.interface._WorkerSchedulerFactory`` to include sandboxing information when
create a worker instance.
"""
def create_worker(self, scheduler, worker_processes, assistant=False):
worker = luigi.worker.Worker(scheduler=scheduler, worker_processes=worker_processes, assistant=assistant, worker_id=os.getenv('LAW_SANDBOX_WORKER_ID'))
worker._first_task = os.getenv('LAW_SANDBOX_WORKER_TASK')
return worker
luigi.interface._WorkerSchedulerFactory.create_worker = create_worker |
def collect_instance(self, nick, host, port, unix_socket, auth):
"""Collect metrics from a single Redis instance
:param str nick: nickname of redis instance
:param str host: redis host
:param int port: redis port
:param str unix_socket: unix socket, if applicable
:param str auth: authentication password
"""
# Connect to redis and get the info
info = self._get_info(host, port, unix_socket, auth)
if info is None:
return
# The structure should include the port for multiple instances per
# server
data = dict()
# Role needs to be handled outside the the _KEYS dict
# since the value is a string, not a int / float
# Also, master_sync_in_progress is only available if the
# redis instance is a slave, so default it here so that
# the metric is cleared if the instance flips from slave
# to master
if 'role' in info:
if info['role'] == "master":
data['replication.master'] = 1
data['replication.master_sync_in_progress'] = 0
else:
data['replication.master'] = 0
# Connect to redis and get the maxmemory config value
# Then calculate the % maxmemory of memory used
maxmemory_config = self._get_config(host, port, unix_socket, auth,
'maxmemory')
if maxmemory_config and 'maxmemory' in maxmemory_config.keys():
maxmemory = float(maxmemory_config['maxmemory'])
# Only report % used if maxmemory is a non zero value
if maxmemory == 0:
maxmemory_percent = 0.0
else:
maxmemory_percent = info['used_memory'] / maxmemory * 100
maxmemory_percent = round(maxmemory_percent, 2)
data['memory.used_percent'] = float("%.2f" % maxmemory_percent)
# Iterate over the top level keys
for key in self._KEYS:
if self._KEYS[key] in info:
data[key] = info[self._KEYS[key]]
# Iterate over renamed keys for 2.6 support
for key in self._RENAMED_KEYS:
if self._RENAMED_KEYS[key] in info:
data[key] = info[self._RENAMED_KEYS[key]]
# Look for databaase speific stats
for dbnum in range(0, int(self.config.get('databases',
self._DATABASE_COUNT))):
db = 'db%i' % dbnum
if db in info:
for key in info[db]:
data['%s.%s' % (db, key)] = info[db][key]
# Time since last save
for key in ['last_save_time', 'rdb_last_save_time']:
if key in info:
data['last_save.time_since'] = int(time.time()) - info[key]
# Publish the data to graphite
for key in data:
self.publish(self._publish_key(nick, key),
data[key],
precision=self._precision(data[key]),
metric_type='GAUGE') | def function[collect_instance, parameter[self, nick, host, port, unix_socket, auth]]:
constant[Collect metrics from a single Redis instance
:param str nick: nickname of redis instance
:param str host: redis host
:param int port: redis port
:param str unix_socket: unix socket, if applicable
:param str auth: authentication password
]
variable[info] assign[=] call[name[self]._get_info, parameter[name[host], name[port], name[unix_socket], name[auth]]]
if compare[name[info] is constant[None]] begin[:]
return[None]
variable[data] assign[=] call[name[dict], parameter[]]
if compare[constant[role] in name[info]] begin[:]
if compare[call[name[info]][constant[role]] equal[==] constant[master]] begin[:]
call[name[data]][constant[replication.master]] assign[=] constant[1]
call[name[data]][constant[replication.master_sync_in_progress]] assign[=] constant[0]
variable[maxmemory_config] assign[=] call[name[self]._get_config, parameter[name[host], name[port], name[unix_socket], name[auth], constant[maxmemory]]]
if <ast.BoolOp object at 0x7da20c6c4b20> begin[:]
variable[maxmemory] assign[=] call[name[float], parameter[call[name[maxmemory_config]][constant[maxmemory]]]]
if compare[name[maxmemory] equal[==] constant[0]] begin[:]
variable[maxmemory_percent] assign[=] constant[0.0]
call[name[data]][constant[memory.used_percent]] assign[=] call[name[float], parameter[binary_operation[constant[%.2f] <ast.Mod object at 0x7da2590d6920> name[maxmemory_percent]]]]
for taget[name[key]] in starred[name[self]._KEYS] begin[:]
if compare[call[name[self]._KEYS][name[key]] in name[info]] begin[:]
call[name[data]][name[key]] assign[=] call[name[info]][call[name[self]._KEYS][name[key]]]
for taget[name[key]] in starred[name[self]._RENAMED_KEYS] begin[:]
if compare[call[name[self]._RENAMED_KEYS][name[key]] in name[info]] begin[:]
call[name[data]][name[key]] assign[=] call[name[info]][call[name[self]._RENAMED_KEYS][name[key]]]
for taget[name[dbnum]] in starred[call[name[range], parameter[constant[0], call[name[int], parameter[call[name[self].config.get, parameter[constant[databases], name[self]._DATABASE_COUNT]]]]]]] begin[:]
variable[db] assign[=] binary_operation[constant[db%i] <ast.Mod object at 0x7da2590d6920> name[dbnum]]
if compare[name[db] in name[info]] begin[:]
for taget[name[key]] in starred[call[name[info]][name[db]]] begin[:]
call[name[data]][binary_operation[constant[%s.%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c6c79a0>, <ast.Name object at 0x7da20c6c7f10>]]]] assign[=] call[call[name[info]][name[db]]][name[key]]
for taget[name[key]] in starred[list[[<ast.Constant object at 0x7da1b194d840>, <ast.Constant object at 0x7da1b194da80>]]] begin[:]
if compare[name[key] in name[info]] begin[:]
call[name[data]][constant[last_save.time_since]] assign[=] binary_operation[call[name[int], parameter[call[name[time].time, parameter[]]]] - call[name[info]][name[key]]]
for taget[name[key]] in starred[name[data]] begin[:]
call[name[self].publish, parameter[call[name[self]._publish_key, parameter[name[nick], name[key]]], call[name[data]][name[key]]]] | keyword[def] identifier[collect_instance] ( identifier[self] , identifier[nick] , identifier[host] , identifier[port] , identifier[unix_socket] , identifier[auth] ):
literal[string]
identifier[info] = identifier[self] . identifier[_get_info] ( identifier[host] , identifier[port] , identifier[unix_socket] , identifier[auth] )
keyword[if] identifier[info] keyword[is] keyword[None] :
keyword[return]
identifier[data] = identifier[dict] ()
keyword[if] literal[string] keyword[in] identifier[info] :
keyword[if] identifier[info] [ literal[string] ]== literal[string] :
identifier[data] [ literal[string] ]= literal[int]
identifier[data] [ literal[string] ]= literal[int]
keyword[else] :
identifier[data] [ literal[string] ]= literal[int]
identifier[maxmemory_config] = identifier[self] . identifier[_get_config] ( identifier[host] , identifier[port] , identifier[unix_socket] , identifier[auth] ,
literal[string] )
keyword[if] identifier[maxmemory_config] keyword[and] literal[string] keyword[in] identifier[maxmemory_config] . identifier[keys] ():
identifier[maxmemory] = identifier[float] ( identifier[maxmemory_config] [ literal[string] ])
keyword[if] identifier[maxmemory] == literal[int] :
identifier[maxmemory_percent] = literal[int]
keyword[else] :
identifier[maxmemory_percent] = identifier[info] [ literal[string] ]/ identifier[maxmemory] * literal[int]
identifier[maxmemory_percent] = identifier[round] ( identifier[maxmemory_percent] , literal[int] )
identifier[data] [ literal[string] ]= identifier[float] ( literal[string] % identifier[maxmemory_percent] )
keyword[for] identifier[key] keyword[in] identifier[self] . identifier[_KEYS] :
keyword[if] identifier[self] . identifier[_KEYS] [ identifier[key] ] keyword[in] identifier[info] :
identifier[data] [ identifier[key] ]= identifier[info] [ identifier[self] . identifier[_KEYS] [ identifier[key] ]]
keyword[for] identifier[key] keyword[in] identifier[self] . identifier[_RENAMED_KEYS] :
keyword[if] identifier[self] . identifier[_RENAMED_KEYS] [ identifier[key] ] keyword[in] identifier[info] :
identifier[data] [ identifier[key] ]= identifier[info] [ identifier[self] . identifier[_RENAMED_KEYS] [ identifier[key] ]]
keyword[for] identifier[dbnum] keyword[in] identifier[range] ( literal[int] , identifier[int] ( identifier[self] . identifier[config] . identifier[get] ( literal[string] ,
identifier[self] . identifier[_DATABASE_COUNT] ))):
identifier[db] = literal[string] % identifier[dbnum]
keyword[if] identifier[db] keyword[in] identifier[info] :
keyword[for] identifier[key] keyword[in] identifier[info] [ identifier[db] ]:
identifier[data] [ literal[string] %( identifier[db] , identifier[key] )]= identifier[info] [ identifier[db] ][ identifier[key] ]
keyword[for] identifier[key] keyword[in] [ literal[string] , literal[string] ]:
keyword[if] identifier[key] keyword[in] identifier[info] :
identifier[data] [ literal[string] ]= identifier[int] ( identifier[time] . identifier[time] ())- identifier[info] [ identifier[key] ]
keyword[for] identifier[key] keyword[in] identifier[data] :
identifier[self] . identifier[publish] ( identifier[self] . identifier[_publish_key] ( identifier[nick] , identifier[key] ),
identifier[data] [ identifier[key] ],
identifier[precision] = identifier[self] . identifier[_precision] ( identifier[data] [ identifier[key] ]),
identifier[metric_type] = literal[string] ) | def collect_instance(self, nick, host, port, unix_socket, auth):
"""Collect metrics from a single Redis instance
:param str nick: nickname of redis instance
:param str host: redis host
:param int port: redis port
:param str unix_socket: unix socket, if applicable
:param str auth: authentication password
"""
# Connect to redis and get the info
info = self._get_info(host, port, unix_socket, auth)
if info is None:
return # depends on [control=['if'], data=[]]
# The structure should include the port for multiple instances per
# server
data = dict()
# Role needs to be handled outside the the _KEYS dict
# since the value is a string, not a int / float
# Also, master_sync_in_progress is only available if the
# redis instance is a slave, so default it here so that
# the metric is cleared if the instance flips from slave
# to master
if 'role' in info:
if info['role'] == 'master':
data['replication.master'] = 1
data['replication.master_sync_in_progress'] = 0 # depends on [control=['if'], data=[]]
else:
data['replication.master'] = 0 # depends on [control=['if'], data=['info']]
# Connect to redis and get the maxmemory config value
# Then calculate the % maxmemory of memory used
maxmemory_config = self._get_config(host, port, unix_socket, auth, 'maxmemory')
if maxmemory_config and 'maxmemory' in maxmemory_config.keys():
maxmemory = float(maxmemory_config['maxmemory'])
# Only report % used if maxmemory is a non zero value
if maxmemory == 0:
maxmemory_percent = 0.0 # depends on [control=['if'], data=[]]
else:
maxmemory_percent = info['used_memory'] / maxmemory * 100
maxmemory_percent = round(maxmemory_percent, 2)
data['memory.used_percent'] = float('%.2f' % maxmemory_percent) # depends on [control=['if'], data=[]]
# Iterate over the top level keys
for key in self._KEYS:
if self._KEYS[key] in info:
data[key] = info[self._KEYS[key]] # depends on [control=['if'], data=['info']] # depends on [control=['for'], data=['key']]
# Iterate over renamed keys for 2.6 support
for key in self._RENAMED_KEYS:
if self._RENAMED_KEYS[key] in info:
data[key] = info[self._RENAMED_KEYS[key]] # depends on [control=['if'], data=['info']] # depends on [control=['for'], data=['key']]
# Look for databaase speific stats
for dbnum in range(0, int(self.config.get('databases', self._DATABASE_COUNT))):
db = 'db%i' % dbnum
if db in info:
for key in info[db]:
data['%s.%s' % (db, key)] = info[db][key] # depends on [control=['for'], data=['key']] # depends on [control=['if'], data=['db', 'info']] # depends on [control=['for'], data=['dbnum']]
# Time since last save
for key in ['last_save_time', 'rdb_last_save_time']:
if key in info:
data['last_save.time_since'] = int(time.time()) - info[key] # depends on [control=['if'], data=['key', 'info']] # depends on [control=['for'], data=['key']]
# Publish the data to graphite
for key in data:
self.publish(self._publish_key(nick, key), data[key], precision=self._precision(data[key]), metric_type='GAUGE') # depends on [control=['for'], data=['key']] |
def whisper_filename(self):
"""Build a file path to the Whisper database"""
source_name = self.source_id and self.source.name or ''
return get_valid_filename("{0}__{1}.wsp".format(source_name,
self.name)) | def function[whisper_filename, parameter[self]]:
constant[Build a file path to the Whisper database]
variable[source_name] assign[=] <ast.BoolOp object at 0x7da20c991cc0>
return[call[name[get_valid_filename], parameter[call[constant[{0}__{1}.wsp].format, parameter[name[source_name], name[self].name]]]]] | keyword[def] identifier[whisper_filename] ( identifier[self] ):
literal[string]
identifier[source_name] = identifier[self] . identifier[source_id] keyword[and] identifier[self] . identifier[source] . identifier[name] keyword[or] literal[string]
keyword[return] identifier[get_valid_filename] ( literal[string] . identifier[format] ( identifier[source_name] ,
identifier[self] . identifier[name] )) | def whisper_filename(self):
"""Build a file path to the Whisper database"""
source_name = self.source_id and self.source.name or ''
return get_valid_filename('{0}__{1}.wsp'.format(source_name, self.name)) |
def get_critical_compositions(self, comp1, comp2):
"""
Get the critical compositions along the tieline between two
compositions. I.e. where the decomposition products change.
The endpoints are also returned.
Args:
comp1, comp2 (Composition): compositions that define the tieline
Returns:
[(Composition)]: list of critical compositions. All are of
the form x * comp1 + (1-x) * comp2
"""
n1 = comp1.num_atoms
n2 = comp2.num_atoms
pd_els = self.elements
# the reduced dimensionality Simplexes don't use the
# first element in the PD
c1 = self.pd_coords(comp1)
c2 = self.pd_coords(comp2)
# none of the projections work if c1 == c2, so just return *copies*
# of the inputs
if np.all(c1 == c2):
return [comp1.copy(), comp2.copy()]
intersections = [c1, c2]
for sc in self.simplexes:
intersections.extend(sc.line_intersection(c1, c2))
intersections = np.array(intersections)
# find position along line
l = (c2 - c1)
l /= np.sum(l ** 2) ** 0.5
proj = np.dot(intersections - c1, l)
# only take compositions between endpoints
proj = proj[np.logical_and(proj > -self.numerical_tol,
proj < proj[1] + self.numerical_tol)]
proj.sort()
# only unique compositions
valid = np.ones(len(proj), dtype=np.bool)
valid[1:] = proj[1:] > proj[:-1] + self.numerical_tol
proj = proj[valid]
ints = c1 + l * proj[:, None]
# reconstruct full-dimensional composition array
cs = np.concatenate([np.array([1 - np.sum(ints, axis=-1)]).T,
ints], axis=-1)
# mixing fraction when compositions are normalized
x = proj / np.dot(c2 - c1, l)
# mixing fraction when compositions are not normalized
x_unnormalized = x * n1 / (n2 + x * (n1 - n2))
num_atoms = n1 + (n2 - n1) * x_unnormalized
cs *= num_atoms[:, None]
return [Composition((c, v) for c, v in zip(pd_els, m)) for m in cs] | def function[get_critical_compositions, parameter[self, comp1, comp2]]:
constant[
Get the critical compositions along the tieline between two
compositions. I.e. where the decomposition products change.
The endpoints are also returned.
Args:
comp1, comp2 (Composition): compositions that define the tieline
Returns:
[(Composition)]: list of critical compositions. All are of
the form x * comp1 + (1-x) * comp2
]
variable[n1] assign[=] name[comp1].num_atoms
variable[n2] assign[=] name[comp2].num_atoms
variable[pd_els] assign[=] name[self].elements
variable[c1] assign[=] call[name[self].pd_coords, parameter[name[comp1]]]
variable[c2] assign[=] call[name[self].pd_coords, parameter[name[comp2]]]
if call[name[np].all, parameter[compare[name[c1] equal[==] name[c2]]]] begin[:]
return[list[[<ast.Call object at 0x7da1b2197580>, <ast.Call object at 0x7da1b21971f0>]]]
variable[intersections] assign[=] list[[<ast.Name object at 0x7da1b2196d10>, <ast.Name object at 0x7da1b2196b90>]]
for taget[name[sc]] in starred[name[self].simplexes] begin[:]
call[name[intersections].extend, parameter[call[name[sc].line_intersection, parameter[name[c1], name[c2]]]]]
variable[intersections] assign[=] call[name[np].array, parameter[name[intersections]]]
variable[l] assign[=] binary_operation[name[c2] - name[c1]]
<ast.AugAssign object at 0x7da1b21965c0>
variable[proj] assign[=] call[name[np].dot, parameter[binary_operation[name[intersections] - name[c1]], name[l]]]
variable[proj] assign[=] call[name[proj]][call[name[np].logical_and, parameter[compare[name[proj] greater[>] <ast.UnaryOp object at 0x7da204564b80>], compare[name[proj] less[<] binary_operation[call[name[proj]][constant[1]] + name[self].numerical_tol]]]]]
call[name[proj].sort, parameter[]]
variable[valid] assign[=] call[name[np].ones, parameter[call[name[len], parameter[name[proj]]]]]
call[name[valid]][<ast.Slice object at 0x7da2045653f0>] assign[=] compare[call[name[proj]][<ast.Slice object at 0x7da204564370>] greater[>] binary_operation[call[name[proj]][<ast.Slice object at 0x7da204566d40>] + name[self].numerical_tol]]
variable[proj] assign[=] call[name[proj]][name[valid]]
variable[ints] assign[=] binary_operation[name[c1] + binary_operation[name[l] * call[name[proj]][tuple[[<ast.Slice object at 0x7da204565bd0>, <ast.Constant object at 0x7da2045641f0>]]]]]
variable[cs] assign[=] call[name[np].concatenate, parameter[list[[<ast.Attribute object at 0x7da204566fb0>, <ast.Name object at 0x7da204565ff0>]]]]
variable[x] assign[=] binary_operation[name[proj] / call[name[np].dot, parameter[binary_operation[name[c2] - name[c1]], name[l]]]]
variable[x_unnormalized] assign[=] binary_operation[binary_operation[name[x] * name[n1]] / binary_operation[name[n2] + binary_operation[name[x] * binary_operation[name[n1] - name[n2]]]]]
variable[num_atoms] assign[=] binary_operation[name[n1] + binary_operation[binary_operation[name[n2] - name[n1]] * name[x_unnormalized]]]
<ast.AugAssign object at 0x7da204565ba0>
return[<ast.ListComp object at 0x7da204564250>] | keyword[def] identifier[get_critical_compositions] ( identifier[self] , identifier[comp1] , identifier[comp2] ):
literal[string]
identifier[n1] = identifier[comp1] . identifier[num_atoms]
identifier[n2] = identifier[comp2] . identifier[num_atoms]
identifier[pd_els] = identifier[self] . identifier[elements]
identifier[c1] = identifier[self] . identifier[pd_coords] ( identifier[comp1] )
identifier[c2] = identifier[self] . identifier[pd_coords] ( identifier[comp2] )
keyword[if] identifier[np] . identifier[all] ( identifier[c1] == identifier[c2] ):
keyword[return] [ identifier[comp1] . identifier[copy] (), identifier[comp2] . identifier[copy] ()]
identifier[intersections] =[ identifier[c1] , identifier[c2] ]
keyword[for] identifier[sc] keyword[in] identifier[self] . identifier[simplexes] :
identifier[intersections] . identifier[extend] ( identifier[sc] . identifier[line_intersection] ( identifier[c1] , identifier[c2] ))
identifier[intersections] = identifier[np] . identifier[array] ( identifier[intersections] )
identifier[l] =( identifier[c2] - identifier[c1] )
identifier[l] /= identifier[np] . identifier[sum] ( identifier[l] ** literal[int] )** literal[int]
identifier[proj] = identifier[np] . identifier[dot] ( identifier[intersections] - identifier[c1] , identifier[l] )
identifier[proj] = identifier[proj] [ identifier[np] . identifier[logical_and] ( identifier[proj] >- identifier[self] . identifier[numerical_tol] ,
identifier[proj] < identifier[proj] [ literal[int] ]+ identifier[self] . identifier[numerical_tol] )]
identifier[proj] . identifier[sort] ()
identifier[valid] = identifier[np] . identifier[ones] ( identifier[len] ( identifier[proj] ), identifier[dtype] = identifier[np] . identifier[bool] )
identifier[valid] [ literal[int] :]= identifier[proj] [ literal[int] :]> identifier[proj] [:- literal[int] ]+ identifier[self] . identifier[numerical_tol]
identifier[proj] = identifier[proj] [ identifier[valid] ]
identifier[ints] = identifier[c1] + identifier[l] * identifier[proj] [:, keyword[None] ]
identifier[cs] = identifier[np] . identifier[concatenate] ([ identifier[np] . identifier[array] ([ literal[int] - identifier[np] . identifier[sum] ( identifier[ints] , identifier[axis] =- literal[int] )]). identifier[T] ,
identifier[ints] ], identifier[axis] =- literal[int] )
identifier[x] = identifier[proj] / identifier[np] . identifier[dot] ( identifier[c2] - identifier[c1] , identifier[l] )
identifier[x_unnormalized] = identifier[x] * identifier[n1] /( identifier[n2] + identifier[x] *( identifier[n1] - identifier[n2] ))
identifier[num_atoms] = identifier[n1] +( identifier[n2] - identifier[n1] )* identifier[x_unnormalized]
identifier[cs] *= identifier[num_atoms] [:, keyword[None] ]
keyword[return] [ identifier[Composition] (( identifier[c] , identifier[v] ) keyword[for] identifier[c] , identifier[v] keyword[in] identifier[zip] ( identifier[pd_els] , identifier[m] )) keyword[for] identifier[m] keyword[in] identifier[cs] ] | def get_critical_compositions(self, comp1, comp2):
"""
Get the critical compositions along the tieline between two
compositions. I.e. where the decomposition products change.
The endpoints are also returned.
Args:
comp1, comp2 (Composition): compositions that define the tieline
Returns:
[(Composition)]: list of critical compositions. All are of
the form x * comp1 + (1-x) * comp2
"""
n1 = comp1.num_atoms
n2 = comp2.num_atoms
pd_els = self.elements
# the reduced dimensionality Simplexes don't use the
# first element in the PD
c1 = self.pd_coords(comp1)
c2 = self.pd_coords(comp2)
# none of the projections work if c1 == c2, so just return *copies*
# of the inputs
if np.all(c1 == c2):
return [comp1.copy(), comp2.copy()] # depends on [control=['if'], data=[]]
intersections = [c1, c2]
for sc in self.simplexes:
intersections.extend(sc.line_intersection(c1, c2)) # depends on [control=['for'], data=['sc']]
intersections = np.array(intersections)
# find position along line
l = c2 - c1
l /= np.sum(l ** 2) ** 0.5
proj = np.dot(intersections - c1, l)
# only take compositions between endpoints
proj = proj[np.logical_and(proj > -self.numerical_tol, proj < proj[1] + self.numerical_tol)]
proj.sort()
# only unique compositions
valid = np.ones(len(proj), dtype=np.bool)
valid[1:] = proj[1:] > proj[:-1] + self.numerical_tol
proj = proj[valid]
ints = c1 + l * proj[:, None]
# reconstruct full-dimensional composition array
cs = np.concatenate([np.array([1 - np.sum(ints, axis=-1)]).T, ints], axis=-1)
# mixing fraction when compositions are normalized
x = proj / np.dot(c2 - c1, l)
# mixing fraction when compositions are not normalized
x_unnormalized = x * n1 / (n2 + x * (n1 - n2))
num_atoms = n1 + (n2 - n1) * x_unnormalized
cs *= num_atoms[:, None]
return [Composition(((c, v) for (c, v) in zip(pd_els, m))) for m in cs] |
def add_static_route(self, method: Union[str, Methods], route: str, handler: Callable,
skip_middleware=False):
"""
Adds a static route. A static route is a special route that
doesnt follow any of the normal rules, and never has any path
parameters.
Ideally, this is used for non-public facing endpoints such as
"/healthcheck", or "/stats" or something of that nature.
All static routes SKIP middlewares
"""
if isinstance(method, str):
method = Methods(method.upper())
route = self._prefix + route
route = route.strip('/')
if route not in self._static_routes:
self._static_routes[route] = {}
self._static_routes[route][method] = handler | def function[add_static_route, parameter[self, method, route, handler, skip_middleware]]:
constant[
Adds a static route. A static route is a special route that
doesnt follow any of the normal rules, and never has any path
parameters.
Ideally, this is used for non-public facing endpoints such as
"/healthcheck", or "/stats" or something of that nature.
All static routes SKIP middlewares
]
if call[name[isinstance], parameter[name[method], name[str]]] begin[:]
variable[method] assign[=] call[name[Methods], parameter[call[name[method].upper, parameter[]]]]
variable[route] assign[=] binary_operation[name[self]._prefix + name[route]]
variable[route] assign[=] call[name[route].strip, parameter[constant[/]]]
if compare[name[route] <ast.NotIn object at 0x7da2590d7190> name[self]._static_routes] begin[:]
call[name[self]._static_routes][name[route]] assign[=] dictionary[[], []]
call[call[name[self]._static_routes][name[route]]][name[method]] assign[=] name[handler] | keyword[def] identifier[add_static_route] ( identifier[self] , identifier[method] : identifier[Union] [ identifier[str] , identifier[Methods] ], identifier[route] : identifier[str] , identifier[handler] : identifier[Callable] ,
identifier[skip_middleware] = keyword[False] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[method] , identifier[str] ):
identifier[method] = identifier[Methods] ( identifier[method] . identifier[upper] ())
identifier[route] = identifier[self] . identifier[_prefix] + identifier[route]
identifier[route] = identifier[route] . identifier[strip] ( literal[string] )
keyword[if] identifier[route] keyword[not] keyword[in] identifier[self] . identifier[_static_routes] :
identifier[self] . identifier[_static_routes] [ identifier[route] ]={}
identifier[self] . identifier[_static_routes] [ identifier[route] ][ identifier[method] ]= identifier[handler] | def add_static_route(self, method: Union[str, Methods], route: str, handler: Callable, skip_middleware=False):
"""
Adds a static route. A static route is a special route that
doesnt follow any of the normal rules, and never has any path
parameters.
Ideally, this is used for non-public facing endpoints such as
"/healthcheck", or "/stats" or something of that nature.
All static routes SKIP middlewares
"""
if isinstance(method, str):
method = Methods(method.upper()) # depends on [control=['if'], data=[]]
route = self._prefix + route
route = route.strip('/')
if route not in self._static_routes:
self._static_routes[route] = {} # depends on [control=['if'], data=['route']]
self._static_routes[route][method] = handler |
def _copy_id_str_old(self):
'''
Return the string to execute ssh-copy-id
'''
if self.passwd:
# Using single quotes prevents shell expansion and
# passwords containing '$'
return "{0} {1} '{2} -p {3} {4} {5}@{6}'".format(
'ssh-copy-id',
'-i {0}.pub'.format(self.priv),
self._passwd_opts(),
self.port,
self._ssh_opts(),
self.user,
self.host)
return None | def function[_copy_id_str_old, parameter[self]]:
constant[
Return the string to execute ssh-copy-id
]
if name[self].passwd begin[:]
return[call[constant[{0} {1} '{2} -p {3} {4} {5}@{6}'].format, parameter[constant[ssh-copy-id], call[constant[-i {0}.pub].format, parameter[name[self].priv]], call[name[self]._passwd_opts, parameter[]], name[self].port, call[name[self]._ssh_opts, parameter[]], name[self].user, name[self].host]]]
return[constant[None]] | keyword[def] identifier[_copy_id_str_old] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[passwd] :
keyword[return] literal[string] . identifier[format] (
literal[string] ,
literal[string] . identifier[format] ( identifier[self] . identifier[priv] ),
identifier[self] . identifier[_passwd_opts] (),
identifier[self] . identifier[port] ,
identifier[self] . identifier[_ssh_opts] (),
identifier[self] . identifier[user] ,
identifier[self] . identifier[host] )
keyword[return] keyword[None] | def _copy_id_str_old(self):
"""
Return the string to execute ssh-copy-id
"""
if self.passwd:
# Using single quotes prevents shell expansion and
# passwords containing '$'
return "{0} {1} '{2} -p {3} {4} {5}@{6}'".format('ssh-copy-id', '-i {0}.pub'.format(self.priv), self._passwd_opts(), self.port, self._ssh_opts(), self.user, self.host) # depends on [control=['if'], data=[]]
return None |
def anchor_idx(self):
"""int or str representing index of anchor element in dimension.
When the anchor is an operation, like 'top' or 'bottom'
"""
anchor = self.anchor
if anchor in ["top", "bottom"]:
return anchor
return self.valid_elements.get_by_id(anchor).index_in_valids | def function[anchor_idx, parameter[self]]:
constant[int or str representing index of anchor element in dimension.
When the anchor is an operation, like 'top' or 'bottom'
]
variable[anchor] assign[=] name[self].anchor
if compare[name[anchor] in list[[<ast.Constant object at 0x7da1b1b7ffd0>, <ast.Constant object at 0x7da1b1b7fca0>]]] begin[:]
return[name[anchor]]
return[call[name[self].valid_elements.get_by_id, parameter[name[anchor]]].index_in_valids] | keyword[def] identifier[anchor_idx] ( identifier[self] ):
literal[string]
identifier[anchor] = identifier[self] . identifier[anchor]
keyword[if] identifier[anchor] keyword[in] [ literal[string] , literal[string] ]:
keyword[return] identifier[anchor]
keyword[return] identifier[self] . identifier[valid_elements] . identifier[get_by_id] ( identifier[anchor] ). identifier[index_in_valids] | def anchor_idx(self):
"""int or str representing index of anchor element in dimension.
When the anchor is an operation, like 'top' or 'bottom'
"""
anchor = self.anchor
if anchor in ['top', 'bottom']:
return anchor # depends on [control=['if'], data=['anchor']]
return self.valid_elements.get_by_id(anchor).index_in_valids |
def gradient(self):
"""Gradient operator of the functional."""
if self.operator is None:
return ConstantOperator(self.vector, self.domain)
else:
if not self.operator.is_linear:
# TODO: Acutally works otherwise, but needs more work
raise NotImplementedError('`operator` must be linear')
# Figure out if operator is symmetric
opadjoint = self.operator.adjoint
if opadjoint == self.operator:
gradient = 2 * self.operator
else:
gradient = self.operator + opadjoint
# Return gradient
if self.vector is None:
return gradient
else:
return gradient + self.vector | def function[gradient, parameter[self]]:
constant[Gradient operator of the functional.]
if compare[name[self].operator is constant[None]] begin[:]
return[call[name[ConstantOperator], parameter[name[self].vector, name[self].domain]]] | keyword[def] identifier[gradient] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[operator] keyword[is] keyword[None] :
keyword[return] identifier[ConstantOperator] ( identifier[self] . identifier[vector] , identifier[self] . identifier[domain] )
keyword[else] :
keyword[if] keyword[not] identifier[self] . identifier[operator] . identifier[is_linear] :
keyword[raise] identifier[NotImplementedError] ( literal[string] )
identifier[opadjoint] = identifier[self] . identifier[operator] . identifier[adjoint]
keyword[if] identifier[opadjoint] == identifier[self] . identifier[operator] :
identifier[gradient] = literal[int] * identifier[self] . identifier[operator]
keyword[else] :
identifier[gradient] = identifier[self] . identifier[operator] + identifier[opadjoint]
keyword[if] identifier[self] . identifier[vector] keyword[is] keyword[None] :
keyword[return] identifier[gradient]
keyword[else] :
keyword[return] identifier[gradient] + identifier[self] . identifier[vector] | def gradient(self):
"""Gradient operator of the functional."""
if self.operator is None:
return ConstantOperator(self.vector, self.domain) # depends on [control=['if'], data=[]]
else:
if not self.operator.is_linear:
# TODO: Acutally works otherwise, but needs more work
raise NotImplementedError('`operator` must be linear') # depends on [control=['if'], data=[]]
# Figure out if operator is symmetric
opadjoint = self.operator.adjoint
if opadjoint == self.operator:
gradient = 2 * self.operator # depends on [control=['if'], data=[]]
else:
gradient = self.operator + opadjoint
# Return gradient
if self.vector is None:
return gradient # depends on [control=['if'], data=[]]
else:
return gradient + self.vector |
def generate(basename, xml):
'''generate complete javascript implementation'''
if basename.endswith('.js'):
filename = basename
else:
filename = basename + '.js'
msgs = []
enums = []
filelist = []
for x in xml:
msgs.extend(x.message)
enums.extend(x.enum)
filelist.append(os.path.basename(x.filename))
for m in msgs:
if xml[0].little_endian:
m.fmtstr = '<'
else:
m.fmtstr = '>'
for f in m.ordered_fields:
m.fmtstr += mavfmt(f)
m.order_map = [ 0 ] * len(m.fieldnames)
for i in range(0, len(m.fieldnames)):
m.order_map[i] = m.ordered_fieldnames.index(m.fieldnames[i])
print("Generating %s" % filename)
outf = open(filename, "w")
generate_preamble(outf, msgs, filelist, xml[0])
generate_enums(outf, enums)
generate_message_ids(outf, msgs)
generate_classes(outf, msgs)
generate_mavlink_class(outf, msgs, xml[0])
generate_footer(outf)
outf.close()
print("Generated %s OK" % filename) | def function[generate, parameter[basename, xml]]:
constant[generate complete javascript implementation]
if call[name[basename].endswith, parameter[constant[.js]]] begin[:]
variable[filename] assign[=] name[basename]
variable[msgs] assign[=] list[[]]
variable[enums] assign[=] list[[]]
variable[filelist] assign[=] list[[]]
for taget[name[x]] in starred[name[xml]] begin[:]
call[name[msgs].extend, parameter[name[x].message]]
call[name[enums].extend, parameter[name[x].enum]]
call[name[filelist].append, parameter[call[name[os].path.basename, parameter[name[x].filename]]]]
for taget[name[m]] in starred[name[msgs]] begin[:]
if call[name[xml]][constant[0]].little_endian begin[:]
name[m].fmtstr assign[=] constant[<]
for taget[name[f]] in starred[name[m].ordered_fields] begin[:]
<ast.AugAssign object at 0x7da1b16851b0>
name[m].order_map assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b16855d0>]] * call[name[len], parameter[name[m].fieldnames]]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[m].fieldnames]]]]] begin[:]
call[name[m].order_map][name[i]] assign[=] call[name[m].ordered_fieldnames.index, parameter[call[name[m].fieldnames][name[i]]]]
call[name[print], parameter[binary_operation[constant[Generating %s] <ast.Mod object at 0x7da2590d6920> name[filename]]]]
variable[outf] assign[=] call[name[open], parameter[name[filename], constant[w]]]
call[name[generate_preamble], parameter[name[outf], name[msgs], name[filelist], call[name[xml]][constant[0]]]]
call[name[generate_enums], parameter[name[outf], name[enums]]]
call[name[generate_message_ids], parameter[name[outf], name[msgs]]]
call[name[generate_classes], parameter[name[outf], name[msgs]]]
call[name[generate_mavlink_class], parameter[name[outf], name[msgs], call[name[xml]][constant[0]]]]
call[name[generate_footer], parameter[name[outf]]]
call[name[outf].close, parameter[]]
call[name[print], parameter[binary_operation[constant[Generated %s OK] <ast.Mod object at 0x7da2590d6920> name[filename]]]] | keyword[def] identifier[generate] ( identifier[basename] , identifier[xml] ):
literal[string]
keyword[if] identifier[basename] . identifier[endswith] ( literal[string] ):
identifier[filename] = identifier[basename]
keyword[else] :
identifier[filename] = identifier[basename] + literal[string]
identifier[msgs] =[]
identifier[enums] =[]
identifier[filelist] =[]
keyword[for] identifier[x] keyword[in] identifier[xml] :
identifier[msgs] . identifier[extend] ( identifier[x] . identifier[message] )
identifier[enums] . identifier[extend] ( identifier[x] . identifier[enum] )
identifier[filelist] . identifier[append] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[x] . identifier[filename] ))
keyword[for] identifier[m] keyword[in] identifier[msgs] :
keyword[if] identifier[xml] [ literal[int] ]. identifier[little_endian] :
identifier[m] . identifier[fmtstr] = literal[string]
keyword[else] :
identifier[m] . identifier[fmtstr] = literal[string]
keyword[for] identifier[f] keyword[in] identifier[m] . identifier[ordered_fields] :
identifier[m] . identifier[fmtstr] += identifier[mavfmt] ( identifier[f] )
identifier[m] . identifier[order_map] =[ literal[int] ]* identifier[len] ( identifier[m] . identifier[fieldnames] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[m] . identifier[fieldnames] )):
identifier[m] . identifier[order_map] [ identifier[i] ]= identifier[m] . identifier[ordered_fieldnames] . identifier[index] ( identifier[m] . identifier[fieldnames] [ identifier[i] ])
identifier[print] ( literal[string] % identifier[filename] )
identifier[outf] = identifier[open] ( identifier[filename] , literal[string] )
identifier[generate_preamble] ( identifier[outf] , identifier[msgs] , identifier[filelist] , identifier[xml] [ literal[int] ])
identifier[generate_enums] ( identifier[outf] , identifier[enums] )
identifier[generate_message_ids] ( identifier[outf] , identifier[msgs] )
identifier[generate_classes] ( identifier[outf] , identifier[msgs] )
identifier[generate_mavlink_class] ( identifier[outf] , identifier[msgs] , identifier[xml] [ literal[int] ])
identifier[generate_footer] ( identifier[outf] )
identifier[outf] . identifier[close] ()
identifier[print] ( literal[string] % identifier[filename] ) | def generate(basename, xml):
"""generate complete javascript implementation"""
if basename.endswith('.js'):
filename = basename # depends on [control=['if'], data=[]]
else:
filename = basename + '.js'
msgs = []
enums = []
filelist = []
for x in xml:
msgs.extend(x.message)
enums.extend(x.enum)
filelist.append(os.path.basename(x.filename)) # depends on [control=['for'], data=['x']]
for m in msgs:
if xml[0].little_endian:
m.fmtstr = '<' # depends on [control=['if'], data=[]]
else:
m.fmtstr = '>'
for f in m.ordered_fields:
m.fmtstr += mavfmt(f) # depends on [control=['for'], data=['f']]
m.order_map = [0] * len(m.fieldnames)
for i in range(0, len(m.fieldnames)):
m.order_map[i] = m.ordered_fieldnames.index(m.fieldnames[i]) # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['m']]
print('Generating %s' % filename)
outf = open(filename, 'w')
generate_preamble(outf, msgs, filelist, xml[0])
generate_enums(outf, enums)
generate_message_ids(outf, msgs)
generate_classes(outf, msgs)
generate_mavlink_class(outf, msgs, xml[0])
generate_footer(outf)
outf.close()
print('Generated %s OK' % filename) |
def log_wrap(origfunc):
"""
DRY: Use magic instead of code to get a string for the correct log
level when calling ``print_log_msg``. Because writing the same
boilerplate code in each log_XXX def was too painful to commit.
"""
def orig_func_wraper(msg, *args):
# Take the callers name and snap it in two, result is log
# level, e.g.: log_debug is DEBUG level.
log_level = origfunc.__name__.split("_")[1]
import Log
if getattr(Log, "LOG_%s" % log_level.upper()) <= \
Log.LOG_LEVEL_CURRENT:
# flatten and stringify the positional params so we don't
# tuple() a tuple or an array and end up with
# weirdness.
a = map(str, juicer.utils.flatten(args))
print_log_msg(log_level, str(msg) % tuple(a))
return orig_func_wraper | def function[log_wrap, parameter[origfunc]]:
constant[
DRY: Use magic instead of code to get a string for the correct log
level when calling ``print_log_msg``. Because writing the same
boilerplate code in each log_XXX def was too painful to commit.
]
def function[orig_func_wraper, parameter[msg]]:
variable[log_level] assign[=] call[call[name[origfunc].__name__.split, parameter[constant[_]]]][constant[1]]
import module[Log]
if compare[call[name[getattr], parameter[name[Log], binary_operation[constant[LOG_%s] <ast.Mod object at 0x7da2590d6920> call[name[log_level].upper, parameter[]]]]] less_or_equal[<=] name[Log].LOG_LEVEL_CURRENT] begin[:]
variable[a] assign[=] call[name[map], parameter[name[str], call[name[juicer].utils.flatten, parameter[name[args]]]]]
call[name[print_log_msg], parameter[name[log_level], binary_operation[call[name[str], parameter[name[msg]]] <ast.Mod object at 0x7da2590d6920> call[name[tuple], parameter[name[a]]]]]]
return[name[orig_func_wraper]] | keyword[def] identifier[log_wrap] ( identifier[origfunc] ):
literal[string]
keyword[def] identifier[orig_func_wraper] ( identifier[msg] ,* identifier[args] ):
identifier[log_level] = identifier[origfunc] . identifier[__name__] . identifier[split] ( literal[string] )[ literal[int] ]
keyword[import] identifier[Log]
keyword[if] identifier[getattr] ( identifier[Log] , literal[string] % identifier[log_level] . identifier[upper] ())<= identifier[Log] . identifier[LOG_LEVEL_CURRENT] :
identifier[a] = identifier[map] ( identifier[str] , identifier[juicer] . identifier[utils] . identifier[flatten] ( identifier[args] ))
identifier[print_log_msg] ( identifier[log_level] , identifier[str] ( identifier[msg] )% identifier[tuple] ( identifier[a] ))
keyword[return] identifier[orig_func_wraper] | def log_wrap(origfunc):
"""
DRY: Use magic instead of code to get a string for the correct log
level when calling ``print_log_msg``. Because writing the same
boilerplate code in each log_XXX def was too painful to commit.
"""
def orig_func_wraper(msg, *args):
# Take the callers name and snap it in two, result is log
# level, e.g.: log_debug is DEBUG level.
log_level = origfunc.__name__.split('_')[1]
import Log
if getattr(Log, 'LOG_%s' % log_level.upper()) <= Log.LOG_LEVEL_CURRENT:
# flatten and stringify the positional params so we don't
# tuple() a tuple or an array and end up with
# weirdness.
a = map(str, juicer.utils.flatten(args))
print_log_msg(log_level, str(msg) % tuple(a)) # depends on [control=['if'], data=[]]
return orig_func_wraper |
def k15(k15file, dir_path='.', input_dir_path='',
meas_file='measurements.txt', aniso_outfile='specimens.txt',
samp_file="samples.txt", result_file ="rmag_anisotropy.txt",
specnum=0, sample_naming_con='1', location="unknown",
data_model_num=3):
"""
converts .k15 format data to MagIC format.
assumes Jelinek Kappabridge measurement scheme.
Parameters
----------
k15file : str
input file name
dir_path : str
output file directory, default "."
input_dir_path : str
input file directory IF different from dir_path, default ""
meas_file : str
output measurement file name, default "measurements.txt"
aniso_outfile : str
output specimen file name, default "specimens.txt"
samp_file: str
output sample file name, default "samples.txt"
aniso_results_file : str
output result file name, default "rmag_results.txt", data model 2 only
specnum : int
number of characters to designate a specimen, default 0
samp_con : str
sample/site naming convention, default '1', see info below
location : str
location name, default "unknown"
data_model_num : int
MagIC data model [2, 3], default 3
Returns
--------
type - Tuple : (True or False indicating if conversion was sucessful, samp_file name written)
Info
--------
Infile format:
name [az,pl,strike,dip], followed by
3 rows of 5 measurements for each specimen
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXXYYY: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail [email protected] for help.
"""
#
# initialize some variables
#
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, dir_path)
version_num = pmag.get_version()
syn = 0
itilt, igeo, linecnt, key = 0, 0, 0, ""
first_save = 1
k15 = []
citation = 'This study'
data_model_num = int(float(data_model_num))
# set column names for MagIC 3
spec_name_col = 'specimen' #
samp_name_col = 'sample' #
site_name_col = 'site' #
loc_name_col = 'location' #
citation_col = 'citations'
method_col = 'method_codes'
site_description_col = 'description'
expedition_col = 'expedition_name'
instrument_col = 'instrument_codes'
experiment_col = 'experiments'
analyst_col = 'analysts'
quality_col = 'quality'
aniso_quality_col = 'result_quality'
meas_standard_col = 'standard'
meas_description_col = 'description'
aniso_type_col = 'aniso_type'
aniso_unit_col = 'aniso_s_unit'
aniso_n_col = 'aniso_s_n_measurements'
azimuth_col = 'azimuth'
spec_volume_col = 'volume'
samp_dip_col = 'dip'
bed_dip_col = 'bed_dip'
bed_dip_direction_col = 'bed_dip_direction'
chi_vol_col = 'susc_chi_volume'
aniso_sigma_col = 'aniso_s_sigma'
aniso_unit_col = 'aniso_s_unit'
aniso_tilt_corr_col = 'aniso_tilt_correction'
meas_table_name = 'measurements'
spec_table_name = 'specimens'
samp_table_name = 'samples'
site_table_name = 'sites'
meas_name_col = 'measurement'
meas_time_col = 'timestamp'
meas_ac_col = 'meas_field_ac'
meas_temp_col = "meas_temp"
#
software_col = 'software_packages'
description_col = 'description' # sites.description
treat_temp_col = 'treat_temp'
meas_orient_phi_col = "meas_orient_phi"
meas_orient_theta_col = "meas_orient_theta"
aniso_mean_col = 'aniso_s_mean'
result_description_col = "description"
# set defaults correctly for MagIC 2
if data_model_num == 2:
if meas_file == 'measurements.txt':
meas_file = 'magic_measurements.txt'
if samp_file == 'samples.txt':
samp_file = 'er_samples.txt'
if aniso_outfile == 'specimens.txt':
aniso_outfile = 'rmag_anisotropy.txt'
# set column names for MagIC 2
if data_model_num == 2:
spec_name_col = 'er_specimen_name'
samp_name_col = 'er_sample_name'
site_name_col = 'er_site_name'
loc_name_col = 'er_location_name'
citation_col = 'er_citation_names'
method_col = 'magic_method_codes'
site_description_col = 'site_description'
expedition_col = 'er_expedition_name'
instrument_col = 'magic_instrument_codes'
experiment_col = 'magic_experiment_names'
analyst_col = 'er_analyst_mail_names'
quality_col = 'measurement_flag'
aniso_quality_col = 'anisotropy_flag'
meas_standard_col = 'measurement_standard'
meas_description_col = 'measurement_description'
aniso_type_col = 'anisotropy_type'
aniso_unit_col = 'anisotropy_unit'
aniso_n_col = 'anisotropy_n'
azimuth_col = 'sample_azimuth'
spec_volume_col = 'specimen_volume'
samp_dip_col = 'sample_dip'
bed_dip_col = 'sample_bed_dip'
bed_dip_direction_col = 'sample_bed_dip_direction'
chi_vol_col = 'measurement_chi_volume'
aniso_sigma_col = 'anisotropy_sigma'
aniso_unit_col = 'anisotropy_unit'
aniso_tilt_corr_col = 'anisotropy_tilt_correction'
meas_table_name = 'magic_measurements'
spec_table_name = 'er_specimens'
samp_table_name = 'er_samples'
site_table_name = 'er_sites'
meas_name_col = 'measurement_number'
meas_time_col = 'measurement_date'
meas_ac_col = 'measurement_lab_field_ac'
meas_temp_col = "measurement_temp"
#
software_col = 'magic_software_packages'
description_col = 'rmag_result_name'
treat_temp_col = 'treatment_temp'
meas_temp_col = "measurement_temp"
meas_orient_phi_col = "measurement_orient_phi"
meas_orient_theta_col = "measurement_orient_theta"
aniso_mean_col = 'anisotropy_mean'
result_description_col = "result_description"
# pick off stuff from command line
Z = ""
if "4" in sample_naming_con:
if "-" not in sample_naming_con:
print("option [4] must be in form 4-Z where Z is an integer")
return False, "option [4] must be in form 4-Z where Z is an integer"
else:
Z = sample_naming_con.split("-")[1]
sample_naming_con = "4"
if sample_naming_con == '6':
Samps, filetype = pmag.magic_read(
os.path.join(input_dir_path, samp_table_name + ".txt"))
samp_file = pmag.resolve_file_name(samp_file, output_dir_path)
meas_file = pmag.resolve_file_name(meas_file, output_dir_path)
aniso_outfile = pmag.resolve_file_name(aniso_outfile, output_dir_path)
result_file = pmag.resolve_file_name(result_file, output_dir_path)
k15file = pmag.resolve_file_name(k15file, input_dir_path)
if not os.path.exists(k15file):
print(k15file)
return False, "You must provide a valid k15 format file"
try:
SampRecs, filetype = pmag.magic_read(
samp_file) # append new records to existing
samplist = []
for samp in SampRecs:
if samp[samp_name_col] not in samplist:
samplist.append(samp[samp_name_col])
except IOError:
SampRecs = []
# measurement directions for Jelinek 1977 protocol:
Decs = [315, 225, 180, 135, 45, 90, 270, 270, 270, 90, 180, 180, 0, 0, 0]
Incs = [0, 0, 0, 0, 0, -45, -45, 0, 45, 45, 45, -45, -90, -45, 45]
# some defaults to read in .k15 file format
# list of measurements and default number of characters for specimen ID
# some magic default definitions
#
# read in data
with open(k15file, 'r') as finput:
lines = finput.readlines()
MeasRecs, SpecRecs, AnisRecs, ResRecs = [], [], [], []
# read in data
MeasRec, SpecRec, SampRec, SiteRec, AnisRec, ResRec = {}, {}, {}, {}, {}, {}
for line in lines:
linecnt += 1
rec = line.split()
if linecnt == 1:
MeasRec[method_col] = ""
SpecRec[method_col] = ""
SampRec[method_col] = ""
AnisRec[method_col] = ""
SiteRec[method_col] = ""
ResRec[method_col] = ""
MeasRec[software_col] = version_num
SpecRec[software_col] = version_num
SampRec[software_col] = version_num
AnisRec[software_col] = version_num
SiteRec[software_col] = version_num
ResRec[software_col] = version_num
MeasRec[method_col] = "LP-X"
MeasRec[quality_col] = "g"
MeasRec[meas_standard_col] = "u"
MeasRec[citation_col] = "This study"
SpecRec[citation_col] = "This study"
SampRec[citation_col] = "This study"
AnisRec[citation_col] = "This study"
ResRec[citation_col] = "This study"
MeasRec[spec_name_col] = rec[0]
MeasRec[experiment_col] = rec[0] + ":LP-AN-MS"
AnisRec[experiment_col] = rec[0] + ":AMS"
ResRec[experiment_col] = rec[0] + ":AMS"
SpecRec[spec_name_col] = rec[0]
AnisRec[spec_name_col] = rec[0]
SampRec[spec_name_col] = rec[0]
if data_model_num == 2:
ResRec[description_col] = rec[0]
if data_model_num == 3:
ResRec[spec_name_col] = rec[0]
specnum = int(specnum)
if specnum != 0:
MeasRec[samp_name_col] = rec[0][:-specnum]
if specnum == 0:
MeasRec[samp_name_col] = rec[0]
SampRec[samp_name_col] = MeasRec[samp_name_col]
SpecRec[samp_name_col] = MeasRec[samp_name_col]
AnisRec[samp_name_col] = MeasRec[samp_name_col]
if data_model_num == 3:
ResRec[samp_name_col] = MeasRec[samp_name_col]
else:
ResRec[samp_name_col + "s"] = MeasRec[samp_name_col]
if sample_naming_con == "6":
for samp in Samps:
if samp[samp_name_col] == AnisRec[samp_name_col]:
sitename = samp[site_name_col]
location = samp[loc_name_col]
elif sample_naming_con != "":
sitename = pmag.parse_site(
AnisRec[samp_name_col], sample_naming_con, Z)
MeasRec[site_name_col] = sitename
MeasRec[loc_name_col] = location
SampRec[site_name_col] = MeasRec[site_name_col]
SpecRec[site_name_col] = MeasRec[site_name_col]
AnisRec[site_name_col] = MeasRec[site_name_col]
ResRec[loc_name_col] = location
ResRec[site_name_col] = MeasRec[site_name_col]
if data_model_num == 2:
ResRec[site_name_col + "s"] = MeasRec[site_name_col]
SampRec[loc_name_col] = MeasRec[loc_name_col]
SpecRec[loc_name_col] = MeasRec[loc_name_col]
AnisRec[loc_name_col] = MeasRec[loc_name_col]
if data_model_num == 2 :
ResRec[loc_name_col + "s"] = MeasRec[loc_name_col]
if len(rec) >= 3:
SampRec[azimuth_col], SampRec[samp_dip_col] = rec[1], rec[2]
az, pl, igeo = float(rec[1]), float(rec[2]), 1
if len(rec) == 5:
SampRec[bed_dip_direction_col], SampRec[bed_dip_col] = '%7.1f' % (
90. + float(rec[3])), (rec[4])
bed_az, bed_dip, itilt, igeo = 90. + \
float(rec[3]), float(rec[4]), 1, 1
else:
for i in range(5):
# assume measurements in micro SI
k15.append(1e-6 * float(rec[i]))
if linecnt == 4:
sbar, sigma, bulk = pmag.dok15_s(k15)
hpars = pmag.dohext(9, sigma, sbar)
MeasRec[treat_temp_col] = '%8.3e' % (
273) # room temp in kelvin
MeasRec[meas_temp_col] = '%8.3e' % (
273) # room temp in kelvin
for i in range(15):
NewMeas = copy.deepcopy(MeasRec)
NewMeas[meas_orient_phi_col] = '%7.1f' % (Decs[i])
NewMeas[meas_orient_theta_col] = '%7.1f' % (Incs[i])
NewMeas[chi_vol_col] = '%12.10f' % (k15[i])
NewMeas[meas_name_col] = '%i' % (i + 1)
if data_model_num == 2:
NewMeas["magic_experiment_name"] = rec[0] + ":LP-AN-MS"
else:
NewMeas["experiment"] = rec[0] + ":LP-AN-MS"
MeasRecs.append(NewMeas)
if SampRec[samp_name_col] not in samplist:
SampRecs.append(SampRec)
samplist.append(SampRec[samp_name_col])
SpecRecs.append(SpecRec)
AnisRec[aniso_type_col] = "AMS"
ResRec[aniso_type_col] = "AMS"
s1_val = '{:12.10f}'.format(sbar[0])
s2_val = '{:12.10f}'.format(sbar[1])
s3_val = '{:12.10f}'.format(sbar[2])
s4_val = '{:12.10f}'.format(sbar[3])
s5_val = '{:12.10f}'.format(sbar[4])
s6_val = '{:12.10f}'.format(sbar[5])
# MAgIC 2
if data_model_num == 2:
AnisRec["anisotropy_s1"] = s1_val
AnisRec["anisotropy_s2"] = s2_val
AnisRec["anisotropy_s3"] = s3_val
AnisRec["anisotropy_s4"] = s4_val
AnisRec["anisotropy_s5"] = s5_val
AnisRec["anisotropy_s6"] = s6_val
# MagIC 3
else:
vals = [s1_val, s2_val, s3_val, s4_val, s5_val, s6_val]
AnisRec['aniso_s'] = ":".join([str(v).strip() for v in vals])
AnisRec[aniso_mean_col] = '%12.10f' % (bulk)
AnisRec[aniso_sigma_col] = '%12.10f' % (sigma)
AnisRec[aniso_mean_col] = '{:12.10f}'.format(bulk)
AnisRec[aniso_sigma_col] = '{:12.10f}'.format(sigma)
AnisRec[aniso_unit_col] = 'SI'
AnisRec[aniso_n_col] = '15'
AnisRec[aniso_tilt_corr_col] = '-1'
AnisRec[method_col] = 'LP-X:AE-H:LP-AN-MS'
AnisRecs.append(AnisRec)
ResRec[method_col] = 'LP-X:AE-H:LP-AN-MS'
ResRec[aniso_tilt_corr_col] = '-1'
if data_model_num == 3:
aniso_v1 = ':'.join([str(i) for i in (hpars['t1'], hpars['v1_dec'], hpars['v1_inc'], hpars['v2_dec'], hpars['v2_inc'], hpars['e12'], hpars['v3_dec'], hpars['v3_inc'], hpars['e13'])])
aniso_v2 = ':'.join([str(i) for i in (hpars['t2'], hpars['v2_dec'], hpars['v2_inc'], hpars['v1_dec'], hpars['v1_inc'], hpars['e12'], hpars['v3_dec'], hpars['v3_inc'], hpars['e23'])])
aniso_v3 = ':'.join([str(i) for i in (hpars['t3'], hpars['v3_dec'], hpars['v3_inc'], hpars['v1_dec'], hpars['v1_inc'], hpars['e13'], hpars['v2_dec'], hpars['v2_inc'], hpars['e23'])])
ResRec['aniso_v1'] = aniso_v1
ResRec['aniso_v2'] = aniso_v2
ResRec['aniso_v3'] = aniso_v3
else: # data model 2
ResRec["anisotropy_t1"] = '%12.10f' % (hpars['t1'])
ResRec["anisotropy_t2"] = '%12.10f' % (hpars['t2'])
ResRec["anisotropy_t3"] = '%12.10f' % (hpars['t3'])
ResRec["anisotropy_fest"] = '%12.10f' % (hpars['F'])
ResRec["anisotropy_ftest12"] = '%12.10f' % (hpars['F12'])
ResRec["anisotropy_ftest23"] = '%12.10f' % (hpars['F23'])
ResRec["anisotropy_v1_dec"] = '%7.1f' % (hpars['v1_dec'])
ResRec["anisotropy_v2_dec"] = '%7.1f' % (hpars['v2_dec'])
ResRec["anisotropy_v3_dec"] = '%7.1f' % (hpars['v3_dec'])
ResRec["anisotropy_v1_inc"] = '%7.1f' % (hpars['v1_inc'])
ResRec["anisotropy_v2_inc"] = '%7.1f' % (hpars['v2_inc'])
ResRec["anisotropy_v3_inc"] = '%7.1f' % (hpars['v3_inc'])
ResRec['anisotropy_v1_eta_dec'] = ResRec['anisotropy_v2_dec']
ResRec['anisotropy_v1_eta_inc'] = ResRec['anisotropy_v2_inc']
ResRec['anisotropy_v1_zeta_dec'] = ResRec['anisotropy_v3_dec']
ResRec['anisotropy_v1_zeta_inc'] = ResRec['anisotropy_v3_inc']
ResRec['anisotropy_v2_eta_dec'] = ResRec['anisotropy_v1_dec']
ResRec['anisotropy_v2_eta_inc'] = ResRec['anisotropy_v1_inc']
ResRec['anisotropy_v2_zeta_dec'] = ResRec['anisotropy_v3_dec']
ResRec['anisotropy_v2_zeta_inc'] = ResRec['anisotropy_v3_inc']
ResRec['anisotropy_v3_eta_dec'] = ResRec['anisotropy_v1_dec']
ResRec['anisotropy_v3_eta_inc'] = ResRec['anisotropy_v1_inc']
ResRec['anisotropy_v3_zeta_dec'] = ResRec['anisotropy_v2_dec']
ResRec['anisotropy_v3_zeta_inc'] = ResRec['anisotropy_v2_inc']
ResRec["anisotropy_v1_eta_semi_angle"] = '%7.1f' % (
hpars['e12'])
ResRec["anisotropy_v1_zeta_semi_angle"] = '%7.1f' % (
hpars['e13'])
ResRec["anisotropy_v2_eta_semi_angle"] = '%7.1f' % (
hpars['e12'])
ResRec["anisotropy_v2_zeta_semi_angle"] = '%7.1f' % (
hpars['e23'])
ResRec["anisotropy_v3_eta_semi_angle"] = '%7.1f' % (
hpars['e13'])
ResRec["anisotropy_v3_zeta_semi_angle"] = '%7.1f' % (
hpars['e23'])
ResRec[result_description_col] = 'Critical F: ' + hpars["F_crit"] + ';Critical F12/F13: ' + hpars["F12_crit"]
#
ResRecs.append(ResRec)
if igeo == 1:
sbarg = pmag.dosgeo(sbar, az, pl)
hparsg = pmag.dohext(9, sigma, sbarg)
AnisRecG = copy.copy(AnisRec)
ResRecG = copy.copy(ResRec)
if data_model_num == 3:
AnisRecG["aniso_s"] = ":".join('{:12.10f}'.format(i) for i in sbarg)
if data_model_num == 2:
AnisRecG["anisotropy_s1"] = '%12.10f' % (sbarg[0])
AnisRecG["anisotropy_s2"] = '%12.10f' % (sbarg[1])
AnisRecG["anisotropy_s3"] = '%12.10f' % (sbarg[2])
AnisRecG["anisotropy_s4"] = '%12.10f' % (sbarg[3])
AnisRecG["anisotropy_s5"] = '%12.10f' % (sbarg[4])
AnisRecG["anisotropy_s6"] = '%12.10f' % (sbarg[5])
AnisRecG[aniso_tilt_corr_col] = '0'
ResRecG[aniso_tilt_corr_col] = '0'
if data_model_num == 3:
aniso_v1 = ':'.join([str(i) for i in (hparsg['t1'], hparsg['v1_dec'], hparsg['v1_inc'], hparsg['v2_dec'], hparsg['v2_inc'], hparsg['e12'], hparsg['v3_dec'], hparsg['v3_inc'], hparsg['e13'])])
aniso_v2 = ':'.join([str(i) for i in (hparsg['t2'], hparsg['v2_dec'], hparsg['v2_inc'], hparsg['v1_dec'], hparsg['v1_inc'], hparsg['e12'], hparsg['v3_dec'], hparsg['v3_inc'], hparsg['e23'])])
aniso_v3 = ':'.join([str(i) for i in (hparsg['t3'], hparsg['v3_dec'], hparsg['v3_inc'], hparsg['v1_dec'], hparsg['v1_inc'], hparsg['e13'], hparsg['v2_dec'], hparsg['v2_inc'], hparsg['e23'])])
ResRecG['aniso_v1'] = aniso_v1
ResRecG['aniso_v2'] = aniso_v2
ResRecG['aniso_v3'] = aniso_v3
#
if data_model_num == 2:
ResRecG["anisotropy_v1_dec"] = '%7.1f' % (hparsg['v1_dec'])
ResRecG["anisotropy_v2_dec"] = '%7.1f' % (hparsg['v2_dec'])
ResRecG["anisotropy_v3_dec"] = '%7.1f' % (hparsg['v3_dec'])
ResRecG["anisotropy_v1_inc"] = '%7.1f' % (hparsg['v1_inc'])
ResRecG["anisotropy_v2_inc"] = '%7.1f' % (hparsg['v2_inc'])
ResRecG["anisotropy_v3_inc"] = '%7.1f' % (hparsg['v3_inc'])
ResRecG['anisotropy_v1_eta_dec'] = ResRecG['anisotropy_v2_dec']
ResRecG['anisotropy_v1_eta_inc'] = ResRecG['anisotropy_v2_inc']
ResRecG['anisotropy_v1_zeta_dec'] = ResRecG['anisotropy_v3_dec']
ResRecG['anisotropy_v1_zeta_inc'] = ResRecG['anisotropy_v3_inc']
ResRecG['anisotropy_v2_eta_dec'] = ResRecG['anisotropy_v1_dec']
ResRecG['anisotropy_v2_eta_inc'] = ResRecG['anisotropy_v1_inc']
ResRecG['anisotropy_v2_zeta_dec'] = ResRecG['anisotropy_v3_dec']
ResRecG['anisotropy_v2_zeta_inc'] = ResRecG['anisotropy_v3_inc']
ResRecG['anisotropy_v3_eta_dec'] = ResRecG['anisotropy_v1_dec']
ResRecG['anisotropy_v3_eta_inc'] = ResRecG['anisotropy_v1_inc']
ResRecG['anisotropy_v3_zeta_dec'] = ResRecG['anisotropy_v2_dec']
ResRecG['anisotropy_v3_zeta_inc'] = ResRecG['anisotropy_v2_inc']
#
ResRecG[result_description_col] = 'Critical F: ' + \
hpars["F_crit"] + ';Critical F12/F13: ' + \
hpars["F12_crit"]
ResRecs.append(ResRecG)
AnisRecs.append(AnisRecG)
if itilt == 1:
sbart = pmag.dostilt(sbarg, bed_az, bed_dip)
hparst = pmag.dohext(9, sigma, sbart)
AnisRecT = copy.copy(AnisRec)
ResRecT = copy.copy(ResRec)
if data_model_num == 3:
aniso_v1 = ':'.join([str(i) for i in (hparst['t1'], hparst['v1_dec'], hparst['v1_inc'], hparst['v2_dec'], hparst['v2_inc'], hparst['e12'], hparst['v3_dec'], hparst['v3_inc'], hparst['e13'])])
aniso_v2 = ':'.join([str(i) for i in (hparst['t2'], hparst['v2_dec'], hparst['v2_inc'], hparst['v1_dec'], hparst['v1_inc'], hparst['e12'], hparst['v3_dec'], hparst['v3_inc'], hparst['e23'])])
aniso_v3 = ':'.join([str(i) for i in (hparst['t3'], hparst['v3_dec'], hparst['v3_inc'], hparst['v1_dec'], hparst['v1_inc'], hparst['e13'], hparst['v2_dec'], hparst['v2_inc'], hparst['e23'])])
ResRecT['aniso_v1'] = aniso_v1
ResRecT['aniso_v2'] = aniso_v2
ResRecT['aniso_v3'] = aniso_v3
#
if data_model_num == 2:
AnisRecT["anisotropy_s1"] = '%12.10f' % (sbart[0])
AnisRecT["anisotropy_s2"] = '%12.10f' % (sbart[1])
AnisRecT["anisotropy_s3"] = '%12.10f' % (sbart[2])
AnisRecT["anisotropy_s4"] = '%12.10f' % (sbart[3])
AnisRecT["anisotropy_s5"] = '%12.10f' % (sbart[4])
AnisRecT["anisotropy_s6"] = '%12.10f' % (sbart[5])
AnisRecT["anisotropy_tilt_correction"] = '100'
ResRecT["anisotropy_v1_dec"] = '%7.1f' % (hparst['v1_dec'])
ResRecT["anisotropy_v2_dec"] = '%7.1f' % (hparst['v2_dec'])
ResRecT["anisotropy_v3_dec"] = '%7.1f' % (hparst['v3_dec'])
ResRecT["anisotropy_v1_inc"] = '%7.1f' % (hparst['v1_inc'])
ResRecT["anisotropy_v2_inc"] = '%7.1f' % (hparst['v2_inc'])
ResRecT["anisotropy_v3_inc"] = '%7.1f' % (hparst['v3_inc'])
ResRecT['anisotropy_v1_eta_dec'] = ResRecT['anisotropy_v2_dec']
ResRecT['anisotropy_v1_eta_inc'] = ResRecT['anisotropy_v2_inc']
ResRecT['anisotropy_v1_zeta_dec'] = ResRecT['anisotropy_v3_dec']
ResRecT['anisotropy_v1_zeta_inc'] = ResRecT['anisotropy_v3_inc']
ResRecT['anisotropy_v2_eta_dec'] = ResRecT['anisotropy_v1_dec']
ResRecT['anisotropy_v2_eta_inc'] = ResRecT['anisotropy_v1_inc']
ResRecT['anisotropy_v2_zeta_dec'] = ResRecT['anisotropy_v3_dec']
ResRecT['anisotropy_v2_zeta_inc'] = ResRecT['anisotropy_v3_inc']
ResRecT['anisotropy_v3_eta_dec'] = ResRecT['anisotropy_v1_dec']
ResRecT['anisotropy_v3_eta_inc'] = ResRecT['anisotropy_v1_inc']
ResRecT['anisotropy_v3_zeta_dec'] = ResRecT['anisotropy_v2_dec']
ResRecT['anisotropy_v3_zeta_inc'] = ResRecT['anisotropy_v2_inc']
#
ResRecT[aniso_tilt_corr_col] = '100'
ResRecT[result_description_col] = 'Critical F: ' + \
hparst["F_crit"] + ';Critical F12/F13: ' + \
hparst["F12_crit"]
ResRecs.append(ResRecT)
AnisRecs.append(AnisRecT)
k15, linecnt = [], 0
MeasRec, SpecRec, SampRec, SiteRec, AnisRec = {}, {}, {}, {}, {}
# samples
pmag.magic_write(samp_file, SampRecs, samp_table_name)
# specimens / rmag_anisotropy / rmag_results
if data_model_num == 3:
AnisRecs.extend(ResRecs)
SpecRecs = AnisRecs.copy()
SpecRecs, keys = pmag.fillkeys(SpecRecs)
pmag.magic_write(aniso_outfile, SpecRecs, 'specimens')
flist = [meas_file, aniso_outfile, samp_file]
else:
pmag.magic_write(aniso_outfile, AnisRecs, 'rmag_anisotropy') # add to specimens?
pmag.magic_write(result_file, ResRecs, 'rmag_results') # added to specimens (NOT sites)
flist = [meas_file, samp_file, aniso_outfile, result_file]
# measurements
pmag.magic_write(meas_file, MeasRecs, meas_table_name)
print("Data saved to: " + ", ".join(flist))
return True, meas_file | def function[k15, parameter[k15file, dir_path, input_dir_path, meas_file, aniso_outfile, samp_file, result_file, specnum, sample_naming_con, location, data_model_num]]:
constant[
converts .k15 format data to MagIC format.
assumes Jelinek Kappabridge measurement scheme.
Parameters
----------
k15file : str
input file name
dir_path : str
output file directory, default "."
input_dir_path : str
input file directory IF different from dir_path, default ""
meas_file : str
output measurement file name, default "measurements.txt"
aniso_outfile : str
output specimen file name, default "specimens.txt"
samp_file: str
output sample file name, default "samples.txt"
aniso_results_file : str
output result file name, default "rmag_results.txt", data model 2 only
specnum : int
number of characters to designate a specimen, default 0
samp_con : str
sample/site naming convention, default '1', see info below
location : str
location name, default "unknown"
data_model_num : int
MagIC data model [2, 3], default 3
Returns
--------
type - Tuple : (True or False indicating if conversion was sucessful, samp_file name written)
Info
--------
Infile format:
name [az,pl,strike,dip], followed by
3 rows of 5 measurements for each specimen
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXXYYY: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail [email protected] for help.
]
<ast.Tuple object at 0x7da1b0237a90> assign[=] call[name[pmag].fix_directories, parameter[name[input_dir_path], name[dir_path]]]
variable[version_num] assign[=] call[name[pmag].get_version, parameter[]]
variable[syn] assign[=] constant[0]
<ast.Tuple object at 0x7da1b0237760> assign[=] tuple[[<ast.Constant object at 0x7da1b0237640>, <ast.Constant object at 0x7da1b0237610>, <ast.Constant object at 0x7da1b02375e0>, <ast.Constant object at 0x7da1b02375b0>]]
variable[first_save] assign[=] constant[1]
variable[k15] assign[=] list[[]]
variable[citation] assign[=] constant[This study]
variable[data_model_num] assign[=] call[name[int], parameter[call[name[float], parameter[name[data_model_num]]]]]
variable[spec_name_col] assign[=] constant[specimen]
variable[samp_name_col] assign[=] constant[sample]
variable[site_name_col] assign[=] constant[site]
variable[loc_name_col] assign[=] constant[location]
variable[citation_col] assign[=] constant[citations]
variable[method_col] assign[=] constant[method_codes]
variable[site_description_col] assign[=] constant[description]
variable[expedition_col] assign[=] constant[expedition_name]
variable[instrument_col] assign[=] constant[instrument_codes]
variable[experiment_col] assign[=] constant[experiments]
variable[analyst_col] assign[=] constant[analysts]
variable[quality_col] assign[=] constant[quality]
variable[aniso_quality_col] assign[=] constant[result_quality]
variable[meas_standard_col] assign[=] constant[standard]
variable[meas_description_col] assign[=] constant[description]
variable[aniso_type_col] assign[=] constant[aniso_type]
variable[aniso_unit_col] assign[=] constant[aniso_s_unit]
variable[aniso_n_col] assign[=] constant[aniso_s_n_measurements]
variable[azimuth_col] assign[=] constant[azimuth]
variable[spec_volume_col] assign[=] constant[volume]
variable[samp_dip_col] assign[=] constant[dip]
variable[bed_dip_col] assign[=] constant[bed_dip]
variable[bed_dip_direction_col] assign[=] constant[bed_dip_direction]
variable[chi_vol_col] assign[=] constant[susc_chi_volume]
variable[aniso_sigma_col] assign[=] constant[aniso_s_sigma]
variable[aniso_unit_col] assign[=] constant[aniso_s_unit]
variable[aniso_tilt_corr_col] assign[=] constant[aniso_tilt_correction]
variable[meas_table_name] assign[=] constant[measurements]
variable[spec_table_name] assign[=] constant[specimens]
variable[samp_table_name] assign[=] constant[samples]
variable[site_table_name] assign[=] constant[sites]
variable[meas_name_col] assign[=] constant[measurement]
variable[meas_time_col] assign[=] constant[timestamp]
variable[meas_ac_col] assign[=] constant[meas_field_ac]
variable[meas_temp_col] assign[=] constant[meas_temp]
variable[software_col] assign[=] constant[software_packages]
variable[description_col] assign[=] constant[description]
variable[treat_temp_col] assign[=] constant[treat_temp]
variable[meas_orient_phi_col] assign[=] constant[meas_orient_phi]
variable[meas_orient_theta_col] assign[=] constant[meas_orient_theta]
variable[aniso_mean_col] assign[=] constant[aniso_s_mean]
variable[result_description_col] assign[=] constant[description]
if compare[name[data_model_num] equal[==] constant[2]] begin[:]
if compare[name[meas_file] equal[==] constant[measurements.txt]] begin[:]
variable[meas_file] assign[=] constant[magic_measurements.txt]
if compare[name[samp_file] equal[==] constant[samples.txt]] begin[:]
variable[samp_file] assign[=] constant[er_samples.txt]
if compare[name[aniso_outfile] equal[==] constant[specimens.txt]] begin[:]
variable[aniso_outfile] assign[=] constant[rmag_anisotropy.txt]
if compare[name[data_model_num] equal[==] constant[2]] begin[:]
variable[spec_name_col] assign[=] constant[er_specimen_name]
variable[samp_name_col] assign[=] constant[er_sample_name]
variable[site_name_col] assign[=] constant[er_site_name]
variable[loc_name_col] assign[=] constant[er_location_name]
variable[citation_col] assign[=] constant[er_citation_names]
variable[method_col] assign[=] constant[magic_method_codes]
variable[site_description_col] assign[=] constant[site_description]
variable[expedition_col] assign[=] constant[er_expedition_name]
variable[instrument_col] assign[=] constant[magic_instrument_codes]
variable[experiment_col] assign[=] constant[magic_experiment_names]
variable[analyst_col] assign[=] constant[er_analyst_mail_names]
variable[quality_col] assign[=] constant[measurement_flag]
variable[aniso_quality_col] assign[=] constant[anisotropy_flag]
variable[meas_standard_col] assign[=] constant[measurement_standard]
variable[meas_description_col] assign[=] constant[measurement_description]
variable[aniso_type_col] assign[=] constant[anisotropy_type]
variable[aniso_unit_col] assign[=] constant[anisotropy_unit]
variable[aniso_n_col] assign[=] constant[anisotropy_n]
variable[azimuth_col] assign[=] constant[sample_azimuth]
variable[spec_volume_col] assign[=] constant[specimen_volume]
variable[samp_dip_col] assign[=] constant[sample_dip]
variable[bed_dip_col] assign[=] constant[sample_bed_dip]
variable[bed_dip_direction_col] assign[=] constant[sample_bed_dip_direction]
variable[chi_vol_col] assign[=] constant[measurement_chi_volume]
variable[aniso_sigma_col] assign[=] constant[anisotropy_sigma]
variable[aniso_unit_col] assign[=] constant[anisotropy_unit]
variable[aniso_tilt_corr_col] assign[=] constant[anisotropy_tilt_correction]
variable[meas_table_name] assign[=] constant[magic_measurements]
variable[spec_table_name] assign[=] constant[er_specimens]
variable[samp_table_name] assign[=] constant[er_samples]
variable[site_table_name] assign[=] constant[er_sites]
variable[meas_name_col] assign[=] constant[measurement_number]
variable[meas_time_col] assign[=] constant[measurement_date]
variable[meas_ac_col] assign[=] constant[measurement_lab_field_ac]
variable[meas_temp_col] assign[=] constant[measurement_temp]
variable[software_col] assign[=] constant[magic_software_packages]
variable[description_col] assign[=] constant[rmag_result_name]
variable[treat_temp_col] assign[=] constant[treatment_temp]
variable[meas_temp_col] assign[=] constant[measurement_temp]
variable[meas_orient_phi_col] assign[=] constant[measurement_orient_phi]
variable[meas_orient_theta_col] assign[=] constant[measurement_orient_theta]
variable[aniso_mean_col] assign[=] constant[anisotropy_mean]
variable[result_description_col] assign[=] constant[result_description]
variable[Z] assign[=] constant[]
if compare[constant[4] in name[sample_naming_con]] begin[:]
if compare[constant[-] <ast.NotIn object at 0x7da2590d7190> name[sample_naming_con]] begin[:]
call[name[print], parameter[constant[option [4] must be in form 4-Z where Z is an integer]]]
return[tuple[[<ast.Constant object at 0x7da1b0476d10>, <ast.Constant object at 0x7da1b0476da0>]]]
if compare[name[sample_naming_con] equal[==] constant[6]] begin[:]
<ast.Tuple object at 0x7da1b0474040> assign[=] call[name[pmag].magic_read, parameter[call[name[os].path.join, parameter[name[input_dir_path], binary_operation[name[samp_table_name] + constant[.txt]]]]]]
variable[samp_file] assign[=] call[name[pmag].resolve_file_name, parameter[name[samp_file], name[output_dir_path]]]
variable[meas_file] assign[=] call[name[pmag].resolve_file_name, parameter[name[meas_file], name[output_dir_path]]]
variable[aniso_outfile] assign[=] call[name[pmag].resolve_file_name, parameter[name[aniso_outfile], name[output_dir_path]]]
variable[result_file] assign[=] call[name[pmag].resolve_file_name, parameter[name[result_file], name[output_dir_path]]]
variable[k15file] assign[=] call[name[pmag].resolve_file_name, parameter[name[k15file], name[input_dir_path]]]
if <ast.UnaryOp object at 0x7da1b0474790> begin[:]
call[name[print], parameter[name[k15file]]]
return[tuple[[<ast.Constant object at 0x7da1b0474be0>, <ast.Constant object at 0x7da1b0474c40>]]]
<ast.Try object at 0x7da1b0474c70>
variable[Decs] assign[=] list[[<ast.Constant object at 0x7da1b0475390>, <ast.Constant object at 0x7da1b0475540>, <ast.Constant object at 0x7da1b0475570>, <ast.Constant object at 0x7da1b04754b0>, <ast.Constant object at 0x7da1b0475480>, <ast.Constant object at 0x7da1b04754e0>, <ast.Constant object at 0x7da1b0475420>, <ast.Constant object at 0x7da1b04753f0>, <ast.Constant object at 0x7da1b0475450>, <ast.Constant object at 0x7da1b0475510>, <ast.Constant object at 0x7da1b04755a0>, <ast.Constant object at 0x7da1b04753c0>, <ast.Constant object at 0x7da1b04755d0>, <ast.Constant object at 0x7da1b04757b0>, <ast.Constant object at 0x7da1b0475720>]]
variable[Incs] assign[=] list[[<ast.Constant object at 0x7da1b0475750>, <ast.Constant object at 0x7da1b0475630>, <ast.Constant object at 0x7da1b0475600>, <ast.Constant object at 0x7da1b0475660>, <ast.Constant object at 0x7da1b0475780>, <ast.UnaryOp object at 0x7da1b04757e0>, <ast.UnaryOp object at 0x7da1b0475900>, <ast.Constant object at 0x7da1b0475840>, <ast.Constant object at 0x7da1b04758a0>, <ast.Constant object at 0x7da1b0475930>, <ast.Constant object at 0x7da1b0475810>, <ast.UnaryOp object at 0x7da1b0475960>, <ast.UnaryOp object at 0x7da1b0475a80>, <ast.UnaryOp object at 0x7da1b04759c0>, <ast.Constant object at 0x7da1b0475ab0>]]
with call[name[open], parameter[name[k15file], constant[r]]] begin[:]
variable[lines] assign[=] call[name[finput].readlines, parameter[]]
<ast.Tuple object at 0x7da1b0475d20> assign[=] tuple[[<ast.List object at 0x7da1b0475e70>, <ast.List object at 0x7da1b0476590>, <ast.List object at 0x7da1b0476500>, <ast.List object at 0x7da1b0476530>]]
<ast.Tuple object at 0x7da1b04760b0> assign[=] tuple[[<ast.Dict object at 0x7da1b0475f90>, <ast.Dict object at 0x7da1b0475fc0>, <ast.Dict object at 0x7da1b0475ff0>, <ast.Dict object at 0x7da1b0476050>, <ast.Dict object at 0x7da1b0475ea0>, <ast.Dict object at 0x7da1b0476080>]]
for taget[name[line]] in starred[name[lines]] begin[:]
<ast.AugAssign object at 0x7da1b0476410>
variable[rec] assign[=] call[name[line].split, parameter[]]
if compare[name[linecnt] equal[==] constant[1]] begin[:]
call[name[MeasRec]][name[method_col]] assign[=] constant[]
call[name[SpecRec]][name[method_col]] assign[=] constant[]
call[name[SampRec]][name[method_col]] assign[=] constant[]
call[name[AnisRec]][name[method_col]] assign[=] constant[]
call[name[SiteRec]][name[method_col]] assign[=] constant[]
call[name[ResRec]][name[method_col]] assign[=] constant[]
call[name[MeasRec]][name[software_col]] assign[=] name[version_num]
call[name[SpecRec]][name[software_col]] assign[=] name[version_num]
call[name[SampRec]][name[software_col]] assign[=] name[version_num]
call[name[AnisRec]][name[software_col]] assign[=] name[version_num]
call[name[SiteRec]][name[software_col]] assign[=] name[version_num]
call[name[ResRec]][name[software_col]] assign[=] name[version_num]
call[name[MeasRec]][name[method_col]] assign[=] constant[LP-X]
call[name[MeasRec]][name[quality_col]] assign[=] constant[g]
call[name[MeasRec]][name[meas_standard_col]] assign[=] constant[u]
call[name[MeasRec]][name[citation_col]] assign[=] constant[This study]
call[name[SpecRec]][name[citation_col]] assign[=] constant[This study]
call[name[SampRec]][name[citation_col]] assign[=] constant[This study]
call[name[AnisRec]][name[citation_col]] assign[=] constant[This study]
call[name[ResRec]][name[citation_col]] assign[=] constant[This study]
call[name[MeasRec]][name[spec_name_col]] assign[=] call[name[rec]][constant[0]]
call[name[MeasRec]][name[experiment_col]] assign[=] binary_operation[call[name[rec]][constant[0]] + constant[:LP-AN-MS]]
call[name[AnisRec]][name[experiment_col]] assign[=] binary_operation[call[name[rec]][constant[0]] + constant[:AMS]]
call[name[ResRec]][name[experiment_col]] assign[=] binary_operation[call[name[rec]][constant[0]] + constant[:AMS]]
call[name[SpecRec]][name[spec_name_col]] assign[=] call[name[rec]][constant[0]]
call[name[AnisRec]][name[spec_name_col]] assign[=] call[name[rec]][constant[0]]
call[name[SampRec]][name[spec_name_col]] assign[=] call[name[rec]][constant[0]]
if compare[name[data_model_num] equal[==] constant[2]] begin[:]
call[name[ResRec]][name[description_col]] assign[=] call[name[rec]][constant[0]]
if compare[name[data_model_num] equal[==] constant[3]] begin[:]
call[name[ResRec]][name[spec_name_col]] assign[=] call[name[rec]][constant[0]]
variable[specnum] assign[=] call[name[int], parameter[name[specnum]]]
if compare[name[specnum] not_equal[!=] constant[0]] begin[:]
call[name[MeasRec]][name[samp_name_col]] assign[=] call[call[name[rec]][constant[0]]][<ast.Slice object at 0x7da1b03fd960>]
if compare[name[specnum] equal[==] constant[0]] begin[:]
call[name[MeasRec]][name[samp_name_col]] assign[=] call[name[rec]][constant[0]]
call[name[SampRec]][name[samp_name_col]] assign[=] call[name[MeasRec]][name[samp_name_col]]
call[name[SpecRec]][name[samp_name_col]] assign[=] call[name[MeasRec]][name[samp_name_col]]
call[name[AnisRec]][name[samp_name_col]] assign[=] call[name[MeasRec]][name[samp_name_col]]
if compare[name[data_model_num] equal[==] constant[3]] begin[:]
call[name[ResRec]][name[samp_name_col]] assign[=] call[name[MeasRec]][name[samp_name_col]]
if compare[name[sample_naming_con] equal[==] constant[6]] begin[:]
for taget[name[samp]] in starred[name[Samps]] begin[:]
if compare[call[name[samp]][name[samp_name_col]] equal[==] call[name[AnisRec]][name[samp_name_col]]] begin[:]
variable[sitename] assign[=] call[name[samp]][name[site_name_col]]
variable[location] assign[=] call[name[samp]][name[loc_name_col]]
call[name[MeasRec]][name[site_name_col]] assign[=] name[sitename]
call[name[MeasRec]][name[loc_name_col]] assign[=] name[location]
call[name[SampRec]][name[site_name_col]] assign[=] call[name[MeasRec]][name[site_name_col]]
call[name[SpecRec]][name[site_name_col]] assign[=] call[name[MeasRec]][name[site_name_col]]
call[name[AnisRec]][name[site_name_col]] assign[=] call[name[MeasRec]][name[site_name_col]]
call[name[ResRec]][name[loc_name_col]] assign[=] name[location]
call[name[ResRec]][name[site_name_col]] assign[=] call[name[MeasRec]][name[site_name_col]]
if compare[name[data_model_num] equal[==] constant[2]] begin[:]
call[name[ResRec]][binary_operation[name[site_name_col] + constant[s]]] assign[=] call[name[MeasRec]][name[site_name_col]]
call[name[SampRec]][name[loc_name_col]] assign[=] call[name[MeasRec]][name[loc_name_col]]
call[name[SpecRec]][name[loc_name_col]] assign[=] call[name[MeasRec]][name[loc_name_col]]
call[name[AnisRec]][name[loc_name_col]] assign[=] call[name[MeasRec]][name[loc_name_col]]
if compare[name[data_model_num] equal[==] constant[2]] begin[:]
call[name[ResRec]][binary_operation[name[loc_name_col] + constant[s]]] assign[=] call[name[MeasRec]][name[loc_name_col]]
if compare[call[name[len], parameter[name[rec]]] greater_or_equal[>=] constant[3]] begin[:]
<ast.Tuple object at 0x7da2045676a0> assign[=] tuple[[<ast.Subscript object at 0x7da204564520>, <ast.Subscript object at 0x7da204564550>]]
<ast.Tuple object at 0x7da204565b40> assign[=] tuple[[<ast.Call object at 0x7da2045666b0>, <ast.Call object at 0x7da2045640d0>, <ast.Constant object at 0x7da204565000>]]
if compare[call[name[len], parameter[name[rec]]] equal[==] constant[5]] begin[:]
<ast.Tuple object at 0x7da204564a90> assign[=] tuple[[<ast.BinOp object at 0x7da204567310>, <ast.Subscript object at 0x7da204564610>]]
<ast.Tuple object at 0x7da204565810> assign[=] tuple[[<ast.BinOp object at 0x7da204565480>, <ast.Call object at 0x7da204564400>, <ast.Constant object at 0x7da204567730>, <ast.Constant object at 0x7da204567e20>]]
call[name[pmag].magic_write, parameter[name[samp_file], name[SampRecs], name[samp_table_name]]]
if compare[name[data_model_num] equal[==] constant[3]] begin[:]
call[name[AnisRecs].extend, parameter[name[ResRecs]]]
variable[SpecRecs] assign[=] call[name[AnisRecs].copy, parameter[]]
<ast.Tuple object at 0x7da18bc719c0> assign[=] call[name[pmag].fillkeys, parameter[name[SpecRecs]]]
call[name[pmag].magic_write, parameter[name[aniso_outfile], name[SpecRecs], constant[specimens]]]
variable[flist] assign[=] list[[<ast.Name object at 0x7da18bc73dc0>, <ast.Name object at 0x7da18bc72050>, <ast.Name object at 0x7da18bc709d0>]]
call[name[pmag].magic_write, parameter[name[meas_file], name[MeasRecs], name[meas_table_name]]]
call[name[print], parameter[binary_operation[constant[Data saved to: ] + call[constant[, ].join, parameter[name[flist]]]]]]
return[tuple[[<ast.Constant object at 0x7da18bc73250>, <ast.Name object at 0x7da18bc73610>]]] | keyword[def] identifier[k15] ( identifier[k15file] , identifier[dir_path] = literal[string] , identifier[input_dir_path] = literal[string] ,
identifier[meas_file] = literal[string] , identifier[aniso_outfile] = literal[string] ,
identifier[samp_file] = literal[string] , identifier[result_file] = literal[string] ,
identifier[specnum] = literal[int] , identifier[sample_naming_con] = literal[string] , identifier[location] = literal[string] ,
identifier[data_model_num] = literal[int] ):
literal[string]
identifier[input_dir_path] , identifier[output_dir_path] = identifier[pmag] . identifier[fix_directories] ( identifier[input_dir_path] , identifier[dir_path] )
identifier[version_num] = identifier[pmag] . identifier[get_version] ()
identifier[syn] = literal[int]
identifier[itilt] , identifier[igeo] , identifier[linecnt] , identifier[key] = literal[int] , literal[int] , literal[int] , literal[string]
identifier[first_save] = literal[int]
identifier[k15] =[]
identifier[citation] = literal[string]
identifier[data_model_num] = identifier[int] ( identifier[float] ( identifier[data_model_num] ))
identifier[spec_name_col] = literal[string]
identifier[samp_name_col] = literal[string]
identifier[site_name_col] = literal[string]
identifier[loc_name_col] = literal[string]
identifier[citation_col] = literal[string]
identifier[method_col] = literal[string]
identifier[site_description_col] = literal[string]
identifier[expedition_col] = literal[string]
identifier[instrument_col] = literal[string]
identifier[experiment_col] = literal[string]
identifier[analyst_col] = literal[string]
identifier[quality_col] = literal[string]
identifier[aniso_quality_col] = literal[string]
identifier[meas_standard_col] = literal[string]
identifier[meas_description_col] = literal[string]
identifier[aniso_type_col] = literal[string]
identifier[aniso_unit_col] = literal[string]
identifier[aniso_n_col] = literal[string]
identifier[azimuth_col] = literal[string]
identifier[spec_volume_col] = literal[string]
identifier[samp_dip_col] = literal[string]
identifier[bed_dip_col] = literal[string]
identifier[bed_dip_direction_col] = literal[string]
identifier[chi_vol_col] = literal[string]
identifier[aniso_sigma_col] = literal[string]
identifier[aniso_unit_col] = literal[string]
identifier[aniso_tilt_corr_col] = literal[string]
identifier[meas_table_name] = literal[string]
identifier[spec_table_name] = literal[string]
identifier[samp_table_name] = literal[string]
identifier[site_table_name] = literal[string]
identifier[meas_name_col] = literal[string]
identifier[meas_time_col] = literal[string]
identifier[meas_ac_col] = literal[string]
identifier[meas_temp_col] = literal[string]
identifier[software_col] = literal[string]
identifier[description_col] = literal[string]
identifier[treat_temp_col] = literal[string]
identifier[meas_orient_phi_col] = literal[string]
identifier[meas_orient_theta_col] = literal[string]
identifier[aniso_mean_col] = literal[string]
identifier[result_description_col] = literal[string]
keyword[if] identifier[data_model_num] == literal[int] :
keyword[if] identifier[meas_file] == literal[string] :
identifier[meas_file] = literal[string]
keyword[if] identifier[samp_file] == literal[string] :
identifier[samp_file] = literal[string]
keyword[if] identifier[aniso_outfile] == literal[string] :
identifier[aniso_outfile] = literal[string]
keyword[if] identifier[data_model_num] == literal[int] :
identifier[spec_name_col] = literal[string]
identifier[samp_name_col] = literal[string]
identifier[site_name_col] = literal[string]
identifier[loc_name_col] = literal[string]
identifier[citation_col] = literal[string]
identifier[method_col] = literal[string]
identifier[site_description_col] = literal[string]
identifier[expedition_col] = literal[string]
identifier[instrument_col] = literal[string]
identifier[experiment_col] = literal[string]
identifier[analyst_col] = literal[string]
identifier[quality_col] = literal[string]
identifier[aniso_quality_col] = literal[string]
identifier[meas_standard_col] = literal[string]
identifier[meas_description_col] = literal[string]
identifier[aniso_type_col] = literal[string]
identifier[aniso_unit_col] = literal[string]
identifier[aniso_n_col] = literal[string]
identifier[azimuth_col] = literal[string]
identifier[spec_volume_col] = literal[string]
identifier[samp_dip_col] = literal[string]
identifier[bed_dip_col] = literal[string]
identifier[bed_dip_direction_col] = literal[string]
identifier[chi_vol_col] = literal[string]
identifier[aniso_sigma_col] = literal[string]
identifier[aniso_unit_col] = literal[string]
identifier[aniso_tilt_corr_col] = literal[string]
identifier[meas_table_name] = literal[string]
identifier[spec_table_name] = literal[string]
identifier[samp_table_name] = literal[string]
identifier[site_table_name] = literal[string]
identifier[meas_name_col] = literal[string]
identifier[meas_time_col] = literal[string]
identifier[meas_ac_col] = literal[string]
identifier[meas_temp_col] = literal[string]
identifier[software_col] = literal[string]
identifier[description_col] = literal[string]
identifier[treat_temp_col] = literal[string]
identifier[meas_temp_col] = literal[string]
identifier[meas_orient_phi_col] = literal[string]
identifier[meas_orient_theta_col] = literal[string]
identifier[aniso_mean_col] = literal[string]
identifier[result_description_col] = literal[string]
identifier[Z] = literal[string]
keyword[if] literal[string] keyword[in] identifier[sample_naming_con] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[sample_naming_con] :
identifier[print] ( literal[string] )
keyword[return] keyword[False] , literal[string]
keyword[else] :
identifier[Z] = identifier[sample_naming_con] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[sample_naming_con] = literal[string]
keyword[if] identifier[sample_naming_con] == literal[string] :
identifier[Samps] , identifier[filetype] = identifier[pmag] . identifier[magic_read] (
identifier[os] . identifier[path] . identifier[join] ( identifier[input_dir_path] , identifier[samp_table_name] + literal[string] ))
identifier[samp_file] = identifier[pmag] . identifier[resolve_file_name] ( identifier[samp_file] , identifier[output_dir_path] )
identifier[meas_file] = identifier[pmag] . identifier[resolve_file_name] ( identifier[meas_file] , identifier[output_dir_path] )
identifier[aniso_outfile] = identifier[pmag] . identifier[resolve_file_name] ( identifier[aniso_outfile] , identifier[output_dir_path] )
identifier[result_file] = identifier[pmag] . identifier[resolve_file_name] ( identifier[result_file] , identifier[output_dir_path] )
identifier[k15file] = identifier[pmag] . identifier[resolve_file_name] ( identifier[k15file] , identifier[input_dir_path] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[k15file] ):
identifier[print] ( identifier[k15file] )
keyword[return] keyword[False] , literal[string]
keyword[try] :
identifier[SampRecs] , identifier[filetype] = identifier[pmag] . identifier[magic_read] (
identifier[samp_file] )
identifier[samplist] =[]
keyword[for] identifier[samp] keyword[in] identifier[SampRecs] :
keyword[if] identifier[samp] [ identifier[samp_name_col] ] keyword[not] keyword[in] identifier[samplist] :
identifier[samplist] . identifier[append] ( identifier[samp] [ identifier[samp_name_col] ])
keyword[except] identifier[IOError] :
identifier[SampRecs] =[]
identifier[Decs] =[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ]
identifier[Incs] =[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ,- literal[int] ,- literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ,- literal[int] ,- literal[int] ,- literal[int] , literal[int] ]
keyword[with] identifier[open] ( identifier[k15file] , literal[string] ) keyword[as] identifier[finput] :
identifier[lines] = identifier[finput] . identifier[readlines] ()
identifier[MeasRecs] , identifier[SpecRecs] , identifier[AnisRecs] , identifier[ResRecs] =[],[],[],[]
identifier[MeasRec] , identifier[SpecRec] , identifier[SampRec] , identifier[SiteRec] , identifier[AnisRec] , identifier[ResRec] ={},{},{},{},{},{}
keyword[for] identifier[line] keyword[in] identifier[lines] :
identifier[linecnt] += literal[int]
identifier[rec] = identifier[line] . identifier[split] ()
keyword[if] identifier[linecnt] == literal[int] :
identifier[MeasRec] [ identifier[method_col] ]= literal[string]
identifier[SpecRec] [ identifier[method_col] ]= literal[string]
identifier[SampRec] [ identifier[method_col] ]= literal[string]
identifier[AnisRec] [ identifier[method_col] ]= literal[string]
identifier[SiteRec] [ identifier[method_col] ]= literal[string]
identifier[ResRec] [ identifier[method_col] ]= literal[string]
identifier[MeasRec] [ identifier[software_col] ]= identifier[version_num]
identifier[SpecRec] [ identifier[software_col] ]= identifier[version_num]
identifier[SampRec] [ identifier[software_col] ]= identifier[version_num]
identifier[AnisRec] [ identifier[software_col] ]= identifier[version_num]
identifier[SiteRec] [ identifier[software_col] ]= identifier[version_num]
identifier[ResRec] [ identifier[software_col] ]= identifier[version_num]
identifier[MeasRec] [ identifier[method_col] ]= literal[string]
identifier[MeasRec] [ identifier[quality_col] ]= literal[string]
identifier[MeasRec] [ identifier[meas_standard_col] ]= literal[string]
identifier[MeasRec] [ identifier[citation_col] ]= literal[string]
identifier[SpecRec] [ identifier[citation_col] ]= literal[string]
identifier[SampRec] [ identifier[citation_col] ]= literal[string]
identifier[AnisRec] [ identifier[citation_col] ]= literal[string]
identifier[ResRec] [ identifier[citation_col] ]= literal[string]
identifier[MeasRec] [ identifier[spec_name_col] ]= identifier[rec] [ literal[int] ]
identifier[MeasRec] [ identifier[experiment_col] ]= identifier[rec] [ literal[int] ]+ literal[string]
identifier[AnisRec] [ identifier[experiment_col] ]= identifier[rec] [ literal[int] ]+ literal[string]
identifier[ResRec] [ identifier[experiment_col] ]= identifier[rec] [ literal[int] ]+ literal[string]
identifier[SpecRec] [ identifier[spec_name_col] ]= identifier[rec] [ literal[int] ]
identifier[AnisRec] [ identifier[spec_name_col] ]= identifier[rec] [ literal[int] ]
identifier[SampRec] [ identifier[spec_name_col] ]= identifier[rec] [ literal[int] ]
keyword[if] identifier[data_model_num] == literal[int] :
identifier[ResRec] [ identifier[description_col] ]= identifier[rec] [ literal[int] ]
keyword[if] identifier[data_model_num] == literal[int] :
identifier[ResRec] [ identifier[spec_name_col] ]= identifier[rec] [ literal[int] ]
identifier[specnum] = identifier[int] ( identifier[specnum] )
keyword[if] identifier[specnum] != literal[int] :
identifier[MeasRec] [ identifier[samp_name_col] ]= identifier[rec] [ literal[int] ][:- identifier[specnum] ]
keyword[if] identifier[specnum] == literal[int] :
identifier[MeasRec] [ identifier[samp_name_col] ]= identifier[rec] [ literal[int] ]
identifier[SampRec] [ identifier[samp_name_col] ]= identifier[MeasRec] [ identifier[samp_name_col] ]
identifier[SpecRec] [ identifier[samp_name_col] ]= identifier[MeasRec] [ identifier[samp_name_col] ]
identifier[AnisRec] [ identifier[samp_name_col] ]= identifier[MeasRec] [ identifier[samp_name_col] ]
keyword[if] identifier[data_model_num] == literal[int] :
identifier[ResRec] [ identifier[samp_name_col] ]= identifier[MeasRec] [ identifier[samp_name_col] ]
keyword[else] :
identifier[ResRec] [ identifier[samp_name_col] + literal[string] ]= identifier[MeasRec] [ identifier[samp_name_col] ]
keyword[if] identifier[sample_naming_con] == literal[string] :
keyword[for] identifier[samp] keyword[in] identifier[Samps] :
keyword[if] identifier[samp] [ identifier[samp_name_col] ]== identifier[AnisRec] [ identifier[samp_name_col] ]:
identifier[sitename] = identifier[samp] [ identifier[site_name_col] ]
identifier[location] = identifier[samp] [ identifier[loc_name_col] ]
keyword[elif] identifier[sample_naming_con] != literal[string] :
identifier[sitename] = identifier[pmag] . identifier[parse_site] (
identifier[AnisRec] [ identifier[samp_name_col] ], identifier[sample_naming_con] , identifier[Z] )
identifier[MeasRec] [ identifier[site_name_col] ]= identifier[sitename]
identifier[MeasRec] [ identifier[loc_name_col] ]= identifier[location]
identifier[SampRec] [ identifier[site_name_col] ]= identifier[MeasRec] [ identifier[site_name_col] ]
identifier[SpecRec] [ identifier[site_name_col] ]= identifier[MeasRec] [ identifier[site_name_col] ]
identifier[AnisRec] [ identifier[site_name_col] ]= identifier[MeasRec] [ identifier[site_name_col] ]
identifier[ResRec] [ identifier[loc_name_col] ]= identifier[location]
identifier[ResRec] [ identifier[site_name_col] ]= identifier[MeasRec] [ identifier[site_name_col] ]
keyword[if] identifier[data_model_num] == literal[int] :
identifier[ResRec] [ identifier[site_name_col] + literal[string] ]= identifier[MeasRec] [ identifier[site_name_col] ]
identifier[SampRec] [ identifier[loc_name_col] ]= identifier[MeasRec] [ identifier[loc_name_col] ]
identifier[SpecRec] [ identifier[loc_name_col] ]= identifier[MeasRec] [ identifier[loc_name_col] ]
identifier[AnisRec] [ identifier[loc_name_col] ]= identifier[MeasRec] [ identifier[loc_name_col] ]
keyword[if] identifier[data_model_num] == literal[int] :
identifier[ResRec] [ identifier[loc_name_col] + literal[string] ]= identifier[MeasRec] [ identifier[loc_name_col] ]
keyword[if] identifier[len] ( identifier[rec] )>= literal[int] :
identifier[SampRec] [ identifier[azimuth_col] ], identifier[SampRec] [ identifier[samp_dip_col] ]= identifier[rec] [ literal[int] ], identifier[rec] [ literal[int] ]
identifier[az] , identifier[pl] , identifier[igeo] = identifier[float] ( identifier[rec] [ literal[int] ]), identifier[float] ( identifier[rec] [ literal[int] ]), literal[int]
keyword[if] identifier[len] ( identifier[rec] )== literal[int] :
identifier[SampRec] [ identifier[bed_dip_direction_col] ], identifier[SampRec] [ identifier[bed_dip_col] ]= literal[string] %(
literal[int] + identifier[float] ( identifier[rec] [ literal[int] ])),( identifier[rec] [ literal[int] ])
identifier[bed_az] , identifier[bed_dip] , identifier[itilt] , identifier[igeo] = literal[int] + identifier[float] ( identifier[rec] [ literal[int] ]), identifier[float] ( identifier[rec] [ literal[int] ]), literal[int] , literal[int]
keyword[else] :
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ):
identifier[k15] . identifier[append] ( literal[int] * identifier[float] ( identifier[rec] [ identifier[i] ]))
keyword[if] identifier[linecnt] == literal[int] :
identifier[sbar] , identifier[sigma] , identifier[bulk] = identifier[pmag] . identifier[dok15_s] ( identifier[k15] )
identifier[hpars] = identifier[pmag] . identifier[dohext] ( literal[int] , identifier[sigma] , identifier[sbar] )
identifier[MeasRec] [ identifier[treat_temp_col] ]= literal[string] %(
literal[int] )
identifier[MeasRec] [ identifier[meas_temp_col] ]= literal[string] %(
literal[int] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ):
identifier[NewMeas] = identifier[copy] . identifier[deepcopy] ( identifier[MeasRec] )
identifier[NewMeas] [ identifier[meas_orient_phi_col] ]= literal[string] %( identifier[Decs] [ identifier[i] ])
identifier[NewMeas] [ identifier[meas_orient_theta_col] ]= literal[string] %( identifier[Incs] [ identifier[i] ])
identifier[NewMeas] [ identifier[chi_vol_col] ]= literal[string] %( identifier[k15] [ identifier[i] ])
identifier[NewMeas] [ identifier[meas_name_col] ]= literal[string] %( identifier[i] + literal[int] )
keyword[if] identifier[data_model_num] == literal[int] :
identifier[NewMeas] [ literal[string] ]= identifier[rec] [ literal[int] ]+ literal[string]
keyword[else] :
identifier[NewMeas] [ literal[string] ]= identifier[rec] [ literal[int] ]+ literal[string]
identifier[MeasRecs] . identifier[append] ( identifier[NewMeas] )
keyword[if] identifier[SampRec] [ identifier[samp_name_col] ] keyword[not] keyword[in] identifier[samplist] :
identifier[SampRecs] . identifier[append] ( identifier[SampRec] )
identifier[samplist] . identifier[append] ( identifier[SampRec] [ identifier[samp_name_col] ])
identifier[SpecRecs] . identifier[append] ( identifier[SpecRec] )
identifier[AnisRec] [ identifier[aniso_type_col] ]= literal[string]
identifier[ResRec] [ identifier[aniso_type_col] ]= literal[string]
identifier[s1_val] = literal[string] . identifier[format] ( identifier[sbar] [ literal[int] ])
identifier[s2_val] = literal[string] . identifier[format] ( identifier[sbar] [ literal[int] ])
identifier[s3_val] = literal[string] . identifier[format] ( identifier[sbar] [ literal[int] ])
identifier[s4_val] = literal[string] . identifier[format] ( identifier[sbar] [ literal[int] ])
identifier[s5_val] = literal[string] . identifier[format] ( identifier[sbar] [ literal[int] ])
identifier[s6_val] = literal[string] . identifier[format] ( identifier[sbar] [ literal[int] ])
keyword[if] identifier[data_model_num] == literal[int] :
identifier[AnisRec] [ literal[string] ]= identifier[s1_val]
identifier[AnisRec] [ literal[string] ]= identifier[s2_val]
identifier[AnisRec] [ literal[string] ]= identifier[s3_val]
identifier[AnisRec] [ literal[string] ]= identifier[s4_val]
identifier[AnisRec] [ literal[string] ]= identifier[s5_val]
identifier[AnisRec] [ literal[string] ]= identifier[s6_val]
keyword[else] :
identifier[vals] =[ identifier[s1_val] , identifier[s2_val] , identifier[s3_val] , identifier[s4_val] , identifier[s5_val] , identifier[s6_val] ]
identifier[AnisRec] [ literal[string] ]= literal[string] . identifier[join] ([ identifier[str] ( identifier[v] ). identifier[strip] () keyword[for] identifier[v] keyword[in] identifier[vals] ])
identifier[AnisRec] [ identifier[aniso_mean_col] ]= literal[string] %( identifier[bulk] )
identifier[AnisRec] [ identifier[aniso_sigma_col] ]= literal[string] %( identifier[sigma] )
identifier[AnisRec] [ identifier[aniso_mean_col] ]= literal[string] . identifier[format] ( identifier[bulk] )
identifier[AnisRec] [ identifier[aniso_sigma_col] ]= literal[string] . identifier[format] ( identifier[sigma] )
identifier[AnisRec] [ identifier[aniso_unit_col] ]= literal[string]
identifier[AnisRec] [ identifier[aniso_n_col] ]= literal[string]
identifier[AnisRec] [ identifier[aniso_tilt_corr_col] ]= literal[string]
identifier[AnisRec] [ identifier[method_col] ]= literal[string]
identifier[AnisRecs] . identifier[append] ( identifier[AnisRec] )
identifier[ResRec] [ identifier[method_col] ]= literal[string]
identifier[ResRec] [ identifier[aniso_tilt_corr_col] ]= literal[string]
keyword[if] identifier[data_model_num] == literal[int] :
identifier[aniso_v1] = literal[string] . identifier[join] ([ identifier[str] ( identifier[i] ) keyword[for] identifier[i] keyword[in] ( identifier[hpars] [ literal[string] ], identifier[hpars] [ literal[string] ], identifier[hpars] [ literal[string] ], identifier[hpars] [ literal[string] ], identifier[hpars] [ literal[string] ], identifier[hpars] [ literal[string] ], identifier[hpars] [ literal[string] ], identifier[hpars] [ literal[string] ], identifier[hpars] [ literal[string] ])])
identifier[aniso_v2] = literal[string] . identifier[join] ([ identifier[str] ( identifier[i] ) keyword[for] identifier[i] keyword[in] ( identifier[hpars] [ literal[string] ], identifier[hpars] [ literal[string] ], identifier[hpars] [ literal[string] ], identifier[hpars] [ literal[string] ], identifier[hpars] [ literal[string] ], identifier[hpars] [ literal[string] ], identifier[hpars] [ literal[string] ], identifier[hpars] [ literal[string] ], identifier[hpars] [ literal[string] ])])
identifier[aniso_v3] = literal[string] . identifier[join] ([ identifier[str] ( identifier[i] ) keyword[for] identifier[i] keyword[in] ( identifier[hpars] [ literal[string] ], identifier[hpars] [ literal[string] ], identifier[hpars] [ literal[string] ], identifier[hpars] [ literal[string] ], identifier[hpars] [ literal[string] ], identifier[hpars] [ literal[string] ], identifier[hpars] [ literal[string] ], identifier[hpars] [ literal[string] ], identifier[hpars] [ literal[string] ])])
identifier[ResRec] [ literal[string] ]= identifier[aniso_v1]
identifier[ResRec] [ literal[string] ]= identifier[aniso_v2]
identifier[ResRec] [ literal[string] ]= identifier[aniso_v3]
keyword[else] :
identifier[ResRec] [ literal[string] ]= literal[string] %( identifier[hpars] [ literal[string] ])
identifier[ResRec] [ literal[string] ]= literal[string] %( identifier[hpars] [ literal[string] ])
identifier[ResRec] [ literal[string] ]= literal[string] %( identifier[hpars] [ literal[string] ])
identifier[ResRec] [ literal[string] ]= literal[string] %( identifier[hpars] [ literal[string] ])
identifier[ResRec] [ literal[string] ]= literal[string] %( identifier[hpars] [ literal[string] ])
identifier[ResRec] [ literal[string] ]= literal[string] %( identifier[hpars] [ literal[string] ])
identifier[ResRec] [ literal[string] ]= literal[string] %( identifier[hpars] [ literal[string] ])
identifier[ResRec] [ literal[string] ]= literal[string] %( identifier[hpars] [ literal[string] ])
identifier[ResRec] [ literal[string] ]= literal[string] %( identifier[hpars] [ literal[string] ])
identifier[ResRec] [ literal[string] ]= literal[string] %( identifier[hpars] [ literal[string] ])
identifier[ResRec] [ literal[string] ]= literal[string] %( identifier[hpars] [ literal[string] ])
identifier[ResRec] [ literal[string] ]= literal[string] %( identifier[hpars] [ literal[string] ])
identifier[ResRec] [ literal[string] ]= identifier[ResRec] [ literal[string] ]
identifier[ResRec] [ literal[string] ]= identifier[ResRec] [ literal[string] ]
identifier[ResRec] [ literal[string] ]= identifier[ResRec] [ literal[string] ]
identifier[ResRec] [ literal[string] ]= identifier[ResRec] [ literal[string] ]
identifier[ResRec] [ literal[string] ]= identifier[ResRec] [ literal[string] ]
identifier[ResRec] [ literal[string] ]= identifier[ResRec] [ literal[string] ]
identifier[ResRec] [ literal[string] ]= identifier[ResRec] [ literal[string] ]
identifier[ResRec] [ literal[string] ]= identifier[ResRec] [ literal[string] ]
identifier[ResRec] [ literal[string] ]= identifier[ResRec] [ literal[string] ]
identifier[ResRec] [ literal[string] ]= identifier[ResRec] [ literal[string] ]
identifier[ResRec] [ literal[string] ]= identifier[ResRec] [ literal[string] ]
identifier[ResRec] [ literal[string] ]= identifier[ResRec] [ literal[string] ]
identifier[ResRec] [ literal[string] ]= literal[string] %(
identifier[hpars] [ literal[string] ])
identifier[ResRec] [ literal[string] ]= literal[string] %(
identifier[hpars] [ literal[string] ])
identifier[ResRec] [ literal[string] ]= literal[string] %(
identifier[hpars] [ literal[string] ])
identifier[ResRec] [ literal[string] ]= literal[string] %(
identifier[hpars] [ literal[string] ])
identifier[ResRec] [ literal[string] ]= literal[string] %(
identifier[hpars] [ literal[string] ])
identifier[ResRec] [ literal[string] ]= literal[string] %(
identifier[hpars] [ literal[string] ])
identifier[ResRec] [ identifier[result_description_col] ]= literal[string] + identifier[hpars] [ literal[string] ]+ literal[string] + identifier[hpars] [ literal[string] ]
identifier[ResRecs] . identifier[append] ( identifier[ResRec] )
keyword[if] identifier[igeo] == literal[int] :
identifier[sbarg] = identifier[pmag] . identifier[dosgeo] ( identifier[sbar] , identifier[az] , identifier[pl] )
identifier[hparsg] = identifier[pmag] . identifier[dohext] ( literal[int] , identifier[sigma] , identifier[sbarg] )
identifier[AnisRecG] = identifier[copy] . identifier[copy] ( identifier[AnisRec] )
identifier[ResRecG] = identifier[copy] . identifier[copy] ( identifier[ResRec] )
keyword[if] identifier[data_model_num] == literal[int] :
identifier[AnisRecG] [ literal[string] ]= literal[string] . identifier[join] ( literal[string] . identifier[format] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[sbarg] )
keyword[if] identifier[data_model_num] == literal[int] :
identifier[AnisRecG] [ literal[string] ]= literal[string] %( identifier[sbarg] [ literal[int] ])
identifier[AnisRecG] [ literal[string] ]= literal[string] %( identifier[sbarg] [ literal[int] ])
identifier[AnisRecG] [ literal[string] ]= literal[string] %( identifier[sbarg] [ literal[int] ])
identifier[AnisRecG] [ literal[string] ]= literal[string] %( identifier[sbarg] [ literal[int] ])
identifier[AnisRecG] [ literal[string] ]= literal[string] %( identifier[sbarg] [ literal[int] ])
identifier[AnisRecG] [ literal[string] ]= literal[string] %( identifier[sbarg] [ literal[int] ])
identifier[AnisRecG] [ identifier[aniso_tilt_corr_col] ]= literal[string]
identifier[ResRecG] [ identifier[aniso_tilt_corr_col] ]= literal[string]
keyword[if] identifier[data_model_num] == literal[int] :
identifier[aniso_v1] = literal[string] . identifier[join] ([ identifier[str] ( identifier[i] ) keyword[for] identifier[i] keyword[in] ( identifier[hparsg] [ literal[string] ], identifier[hparsg] [ literal[string] ], identifier[hparsg] [ literal[string] ], identifier[hparsg] [ literal[string] ], identifier[hparsg] [ literal[string] ], identifier[hparsg] [ literal[string] ], identifier[hparsg] [ literal[string] ], identifier[hparsg] [ literal[string] ], identifier[hparsg] [ literal[string] ])])
identifier[aniso_v2] = literal[string] . identifier[join] ([ identifier[str] ( identifier[i] ) keyword[for] identifier[i] keyword[in] ( identifier[hparsg] [ literal[string] ], identifier[hparsg] [ literal[string] ], identifier[hparsg] [ literal[string] ], identifier[hparsg] [ literal[string] ], identifier[hparsg] [ literal[string] ], identifier[hparsg] [ literal[string] ], identifier[hparsg] [ literal[string] ], identifier[hparsg] [ literal[string] ], identifier[hparsg] [ literal[string] ])])
identifier[aniso_v3] = literal[string] . identifier[join] ([ identifier[str] ( identifier[i] ) keyword[for] identifier[i] keyword[in] ( identifier[hparsg] [ literal[string] ], identifier[hparsg] [ literal[string] ], identifier[hparsg] [ literal[string] ], identifier[hparsg] [ literal[string] ], identifier[hparsg] [ literal[string] ], identifier[hparsg] [ literal[string] ], identifier[hparsg] [ literal[string] ], identifier[hparsg] [ literal[string] ], identifier[hparsg] [ literal[string] ])])
identifier[ResRecG] [ literal[string] ]= identifier[aniso_v1]
identifier[ResRecG] [ literal[string] ]= identifier[aniso_v2]
identifier[ResRecG] [ literal[string] ]= identifier[aniso_v3]
keyword[if] identifier[data_model_num] == literal[int] :
identifier[ResRecG] [ literal[string] ]= literal[string] %( identifier[hparsg] [ literal[string] ])
identifier[ResRecG] [ literal[string] ]= literal[string] %( identifier[hparsg] [ literal[string] ])
identifier[ResRecG] [ literal[string] ]= literal[string] %( identifier[hparsg] [ literal[string] ])
identifier[ResRecG] [ literal[string] ]= literal[string] %( identifier[hparsg] [ literal[string] ])
identifier[ResRecG] [ literal[string] ]= literal[string] %( identifier[hparsg] [ literal[string] ])
identifier[ResRecG] [ literal[string] ]= literal[string] %( identifier[hparsg] [ literal[string] ])
identifier[ResRecG] [ literal[string] ]= identifier[ResRecG] [ literal[string] ]
identifier[ResRecG] [ literal[string] ]= identifier[ResRecG] [ literal[string] ]
identifier[ResRecG] [ literal[string] ]= identifier[ResRecG] [ literal[string] ]
identifier[ResRecG] [ literal[string] ]= identifier[ResRecG] [ literal[string] ]
identifier[ResRecG] [ literal[string] ]= identifier[ResRecG] [ literal[string] ]
identifier[ResRecG] [ literal[string] ]= identifier[ResRecG] [ literal[string] ]
identifier[ResRecG] [ literal[string] ]= identifier[ResRecG] [ literal[string] ]
identifier[ResRecG] [ literal[string] ]= identifier[ResRecG] [ literal[string] ]
identifier[ResRecG] [ literal[string] ]= identifier[ResRecG] [ literal[string] ]
identifier[ResRecG] [ literal[string] ]= identifier[ResRecG] [ literal[string] ]
identifier[ResRecG] [ literal[string] ]= identifier[ResRecG] [ literal[string] ]
identifier[ResRecG] [ literal[string] ]= identifier[ResRecG] [ literal[string] ]
identifier[ResRecG] [ identifier[result_description_col] ]= literal[string] + identifier[hpars] [ literal[string] ]+ literal[string] + identifier[hpars] [ literal[string] ]
identifier[ResRecs] . identifier[append] ( identifier[ResRecG] )
identifier[AnisRecs] . identifier[append] ( identifier[AnisRecG] )
keyword[if] identifier[itilt] == literal[int] :
identifier[sbart] = identifier[pmag] . identifier[dostilt] ( identifier[sbarg] , identifier[bed_az] , identifier[bed_dip] )
identifier[hparst] = identifier[pmag] . identifier[dohext] ( literal[int] , identifier[sigma] , identifier[sbart] )
identifier[AnisRecT] = identifier[copy] . identifier[copy] ( identifier[AnisRec] )
identifier[ResRecT] = identifier[copy] . identifier[copy] ( identifier[ResRec] )
keyword[if] identifier[data_model_num] == literal[int] :
identifier[aniso_v1] = literal[string] . identifier[join] ([ identifier[str] ( identifier[i] ) keyword[for] identifier[i] keyword[in] ( identifier[hparst] [ literal[string] ], identifier[hparst] [ literal[string] ], identifier[hparst] [ literal[string] ], identifier[hparst] [ literal[string] ], identifier[hparst] [ literal[string] ], identifier[hparst] [ literal[string] ], identifier[hparst] [ literal[string] ], identifier[hparst] [ literal[string] ], identifier[hparst] [ literal[string] ])])
identifier[aniso_v2] = literal[string] . identifier[join] ([ identifier[str] ( identifier[i] ) keyword[for] identifier[i] keyword[in] ( identifier[hparst] [ literal[string] ], identifier[hparst] [ literal[string] ], identifier[hparst] [ literal[string] ], identifier[hparst] [ literal[string] ], identifier[hparst] [ literal[string] ], identifier[hparst] [ literal[string] ], identifier[hparst] [ literal[string] ], identifier[hparst] [ literal[string] ], identifier[hparst] [ literal[string] ])])
identifier[aniso_v3] = literal[string] . identifier[join] ([ identifier[str] ( identifier[i] ) keyword[for] identifier[i] keyword[in] ( identifier[hparst] [ literal[string] ], identifier[hparst] [ literal[string] ], identifier[hparst] [ literal[string] ], identifier[hparst] [ literal[string] ], identifier[hparst] [ literal[string] ], identifier[hparst] [ literal[string] ], identifier[hparst] [ literal[string] ], identifier[hparst] [ literal[string] ], identifier[hparst] [ literal[string] ])])
identifier[ResRecT] [ literal[string] ]= identifier[aniso_v1]
identifier[ResRecT] [ literal[string] ]= identifier[aniso_v2]
identifier[ResRecT] [ literal[string] ]= identifier[aniso_v3]
keyword[if] identifier[data_model_num] == literal[int] :
identifier[AnisRecT] [ literal[string] ]= literal[string] %( identifier[sbart] [ literal[int] ])
identifier[AnisRecT] [ literal[string] ]= literal[string] %( identifier[sbart] [ literal[int] ])
identifier[AnisRecT] [ literal[string] ]= literal[string] %( identifier[sbart] [ literal[int] ])
identifier[AnisRecT] [ literal[string] ]= literal[string] %( identifier[sbart] [ literal[int] ])
identifier[AnisRecT] [ literal[string] ]= literal[string] %( identifier[sbart] [ literal[int] ])
identifier[AnisRecT] [ literal[string] ]= literal[string] %( identifier[sbart] [ literal[int] ])
identifier[AnisRecT] [ literal[string] ]= literal[string]
identifier[ResRecT] [ literal[string] ]= literal[string] %( identifier[hparst] [ literal[string] ])
identifier[ResRecT] [ literal[string] ]= literal[string] %( identifier[hparst] [ literal[string] ])
identifier[ResRecT] [ literal[string] ]= literal[string] %( identifier[hparst] [ literal[string] ])
identifier[ResRecT] [ literal[string] ]= literal[string] %( identifier[hparst] [ literal[string] ])
identifier[ResRecT] [ literal[string] ]= literal[string] %( identifier[hparst] [ literal[string] ])
identifier[ResRecT] [ literal[string] ]= literal[string] %( identifier[hparst] [ literal[string] ])
identifier[ResRecT] [ literal[string] ]= identifier[ResRecT] [ literal[string] ]
identifier[ResRecT] [ literal[string] ]= identifier[ResRecT] [ literal[string] ]
identifier[ResRecT] [ literal[string] ]= identifier[ResRecT] [ literal[string] ]
identifier[ResRecT] [ literal[string] ]= identifier[ResRecT] [ literal[string] ]
identifier[ResRecT] [ literal[string] ]= identifier[ResRecT] [ literal[string] ]
identifier[ResRecT] [ literal[string] ]= identifier[ResRecT] [ literal[string] ]
identifier[ResRecT] [ literal[string] ]= identifier[ResRecT] [ literal[string] ]
identifier[ResRecT] [ literal[string] ]= identifier[ResRecT] [ literal[string] ]
identifier[ResRecT] [ literal[string] ]= identifier[ResRecT] [ literal[string] ]
identifier[ResRecT] [ literal[string] ]= identifier[ResRecT] [ literal[string] ]
identifier[ResRecT] [ literal[string] ]= identifier[ResRecT] [ literal[string] ]
identifier[ResRecT] [ literal[string] ]= identifier[ResRecT] [ literal[string] ]
identifier[ResRecT] [ identifier[aniso_tilt_corr_col] ]= literal[string]
identifier[ResRecT] [ identifier[result_description_col] ]= literal[string] + identifier[hparst] [ literal[string] ]+ literal[string] + identifier[hparst] [ literal[string] ]
identifier[ResRecs] . identifier[append] ( identifier[ResRecT] )
identifier[AnisRecs] . identifier[append] ( identifier[AnisRecT] )
identifier[k15] , identifier[linecnt] =[], literal[int]
identifier[MeasRec] , identifier[SpecRec] , identifier[SampRec] , identifier[SiteRec] , identifier[AnisRec] ={},{},{},{},{}
identifier[pmag] . identifier[magic_write] ( identifier[samp_file] , identifier[SampRecs] , identifier[samp_table_name] )
keyword[if] identifier[data_model_num] == literal[int] :
identifier[AnisRecs] . identifier[extend] ( identifier[ResRecs] )
identifier[SpecRecs] = identifier[AnisRecs] . identifier[copy] ()
identifier[SpecRecs] , identifier[keys] = identifier[pmag] . identifier[fillkeys] ( identifier[SpecRecs] )
identifier[pmag] . identifier[magic_write] ( identifier[aniso_outfile] , identifier[SpecRecs] , literal[string] )
identifier[flist] =[ identifier[meas_file] , identifier[aniso_outfile] , identifier[samp_file] ]
keyword[else] :
identifier[pmag] . identifier[magic_write] ( identifier[aniso_outfile] , identifier[AnisRecs] , literal[string] )
identifier[pmag] . identifier[magic_write] ( identifier[result_file] , identifier[ResRecs] , literal[string] )
identifier[flist] =[ identifier[meas_file] , identifier[samp_file] , identifier[aniso_outfile] , identifier[result_file] ]
identifier[pmag] . identifier[magic_write] ( identifier[meas_file] , identifier[MeasRecs] , identifier[meas_table_name] )
identifier[print] ( literal[string] + literal[string] . identifier[join] ( identifier[flist] ))
keyword[return] keyword[True] , identifier[meas_file] | def k15(k15file, dir_path='.', input_dir_path='', meas_file='measurements.txt', aniso_outfile='specimens.txt', samp_file='samples.txt', result_file='rmag_anisotropy.txt', specnum=0, sample_naming_con='1', location='unknown', data_model_num=3):
"""
converts .k15 format data to MagIC format.
assumes Jelinek Kappabridge measurement scheme.
Parameters
----------
k15file : str
input file name
dir_path : str
output file directory, default "."
input_dir_path : str
input file directory IF different from dir_path, default ""
meas_file : str
output measurement file name, default "measurements.txt"
aniso_outfile : str
output specimen file name, default "specimens.txt"
samp_file: str
output sample file name, default "samples.txt"
aniso_results_file : str
output result file name, default "rmag_results.txt", data model 2 only
specnum : int
number of characters to designate a specimen, default 0
samp_con : str
sample/site naming convention, default '1', see info below
location : str
location name, default "unknown"
data_model_num : int
MagIC data model [2, 3], default 3
Returns
--------
type - Tuple : (True or False indicating if conversion was sucessful, samp_file name written)
Info
--------
Infile format:
name [az,pl,strike,dip], followed by
3 rows of 5 measurements for each specimen
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXXYYY: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail [email protected] for help.
"""
#
# initialize some variables
#
(input_dir_path, output_dir_path) = pmag.fix_directories(input_dir_path, dir_path)
version_num = pmag.get_version()
syn = 0
(itilt, igeo, linecnt, key) = (0, 0, 0, '')
first_save = 1
k15 = []
citation = 'This study'
data_model_num = int(float(data_model_num))
# set column names for MagIC 3
spec_name_col = 'specimen' #
samp_name_col = 'sample' #
site_name_col = 'site' #
loc_name_col = 'location' #
citation_col = 'citations'
method_col = 'method_codes'
site_description_col = 'description'
expedition_col = 'expedition_name'
instrument_col = 'instrument_codes'
experiment_col = 'experiments'
analyst_col = 'analysts'
quality_col = 'quality'
aniso_quality_col = 'result_quality'
meas_standard_col = 'standard'
meas_description_col = 'description'
aniso_type_col = 'aniso_type'
aniso_unit_col = 'aniso_s_unit'
aniso_n_col = 'aniso_s_n_measurements'
azimuth_col = 'azimuth'
spec_volume_col = 'volume'
samp_dip_col = 'dip'
bed_dip_col = 'bed_dip'
bed_dip_direction_col = 'bed_dip_direction'
chi_vol_col = 'susc_chi_volume'
aniso_sigma_col = 'aniso_s_sigma'
aniso_unit_col = 'aniso_s_unit'
aniso_tilt_corr_col = 'aniso_tilt_correction'
meas_table_name = 'measurements'
spec_table_name = 'specimens'
samp_table_name = 'samples'
site_table_name = 'sites'
meas_name_col = 'measurement'
meas_time_col = 'timestamp'
meas_ac_col = 'meas_field_ac'
meas_temp_col = 'meas_temp'
#
software_col = 'software_packages'
description_col = 'description' # sites.description
treat_temp_col = 'treat_temp'
meas_orient_phi_col = 'meas_orient_phi'
meas_orient_theta_col = 'meas_orient_theta'
aniso_mean_col = 'aniso_s_mean'
result_description_col = 'description'
# set defaults correctly for MagIC 2
if data_model_num == 2:
if meas_file == 'measurements.txt':
meas_file = 'magic_measurements.txt' # depends on [control=['if'], data=['meas_file']]
if samp_file == 'samples.txt':
samp_file = 'er_samples.txt' # depends on [control=['if'], data=['samp_file']]
if aniso_outfile == 'specimens.txt':
aniso_outfile = 'rmag_anisotropy.txt' # depends on [control=['if'], data=['aniso_outfile']] # depends on [control=['if'], data=[]]
# set column names for MagIC 2
if data_model_num == 2:
spec_name_col = 'er_specimen_name'
samp_name_col = 'er_sample_name'
site_name_col = 'er_site_name'
loc_name_col = 'er_location_name'
citation_col = 'er_citation_names'
method_col = 'magic_method_codes'
site_description_col = 'site_description'
expedition_col = 'er_expedition_name'
instrument_col = 'magic_instrument_codes'
experiment_col = 'magic_experiment_names'
analyst_col = 'er_analyst_mail_names'
quality_col = 'measurement_flag'
aniso_quality_col = 'anisotropy_flag'
meas_standard_col = 'measurement_standard'
meas_description_col = 'measurement_description'
aniso_type_col = 'anisotropy_type'
aniso_unit_col = 'anisotropy_unit'
aniso_n_col = 'anisotropy_n'
azimuth_col = 'sample_azimuth'
spec_volume_col = 'specimen_volume'
samp_dip_col = 'sample_dip'
bed_dip_col = 'sample_bed_dip'
bed_dip_direction_col = 'sample_bed_dip_direction'
chi_vol_col = 'measurement_chi_volume'
aniso_sigma_col = 'anisotropy_sigma'
aniso_unit_col = 'anisotropy_unit'
aniso_tilt_corr_col = 'anisotropy_tilt_correction'
meas_table_name = 'magic_measurements'
spec_table_name = 'er_specimens'
samp_table_name = 'er_samples'
site_table_name = 'er_sites'
meas_name_col = 'measurement_number'
meas_time_col = 'measurement_date'
meas_ac_col = 'measurement_lab_field_ac'
meas_temp_col = 'measurement_temp'
#
software_col = 'magic_software_packages'
description_col = 'rmag_result_name'
treat_temp_col = 'treatment_temp'
meas_temp_col = 'measurement_temp'
meas_orient_phi_col = 'measurement_orient_phi'
meas_orient_theta_col = 'measurement_orient_theta'
aniso_mean_col = 'anisotropy_mean'
result_description_col = 'result_description' # depends on [control=['if'], data=[]]
# pick off stuff from command line
Z = ''
if '4' in sample_naming_con:
if '-' not in sample_naming_con:
print('option [4] must be in form 4-Z where Z is an integer')
return (False, 'option [4] must be in form 4-Z where Z is an integer') # depends on [control=['if'], data=[]]
else:
Z = sample_naming_con.split('-')[1]
sample_naming_con = '4' # depends on [control=['if'], data=['sample_naming_con']]
if sample_naming_con == '6':
(Samps, filetype) = pmag.magic_read(os.path.join(input_dir_path, samp_table_name + '.txt')) # depends on [control=['if'], data=[]]
samp_file = pmag.resolve_file_name(samp_file, output_dir_path)
meas_file = pmag.resolve_file_name(meas_file, output_dir_path)
aniso_outfile = pmag.resolve_file_name(aniso_outfile, output_dir_path)
result_file = pmag.resolve_file_name(result_file, output_dir_path)
k15file = pmag.resolve_file_name(k15file, input_dir_path)
if not os.path.exists(k15file):
print(k15file)
return (False, 'You must provide a valid k15 format file') # depends on [control=['if'], data=[]]
try:
(SampRecs, filetype) = pmag.magic_read(samp_file) # append new records to existing
samplist = []
for samp in SampRecs:
if samp[samp_name_col] not in samplist:
samplist.append(samp[samp_name_col]) # depends on [control=['if'], data=['samplist']] # depends on [control=['for'], data=['samp']] # depends on [control=['try'], data=[]]
except IOError:
SampRecs = [] # depends on [control=['except'], data=[]]
# measurement directions for Jelinek 1977 protocol:
Decs = [315, 225, 180, 135, 45, 90, 270, 270, 270, 90, 180, 180, 0, 0, 0]
Incs = [0, 0, 0, 0, 0, -45, -45, 0, 45, 45, 45, -45, -90, -45, 45]
# some defaults to read in .k15 file format
# list of measurements and default number of characters for specimen ID
# some magic default definitions
#
# read in data
with open(k15file, 'r') as finput:
lines = finput.readlines() # depends on [control=['with'], data=['finput']]
(MeasRecs, SpecRecs, AnisRecs, ResRecs) = ([], [], [], [])
# read in data
(MeasRec, SpecRec, SampRec, SiteRec, AnisRec, ResRec) = ({}, {}, {}, {}, {}, {})
for line in lines:
linecnt += 1
rec = line.split()
if linecnt == 1:
MeasRec[method_col] = ''
SpecRec[method_col] = ''
SampRec[method_col] = ''
AnisRec[method_col] = ''
SiteRec[method_col] = ''
ResRec[method_col] = ''
MeasRec[software_col] = version_num
SpecRec[software_col] = version_num
SampRec[software_col] = version_num
AnisRec[software_col] = version_num
SiteRec[software_col] = version_num
ResRec[software_col] = version_num
MeasRec[method_col] = 'LP-X'
MeasRec[quality_col] = 'g'
MeasRec[meas_standard_col] = 'u'
MeasRec[citation_col] = 'This study'
SpecRec[citation_col] = 'This study'
SampRec[citation_col] = 'This study'
AnisRec[citation_col] = 'This study'
ResRec[citation_col] = 'This study'
MeasRec[spec_name_col] = rec[0]
MeasRec[experiment_col] = rec[0] + ':LP-AN-MS'
AnisRec[experiment_col] = rec[0] + ':AMS'
ResRec[experiment_col] = rec[0] + ':AMS'
SpecRec[spec_name_col] = rec[0]
AnisRec[spec_name_col] = rec[0]
SampRec[spec_name_col] = rec[0]
if data_model_num == 2:
ResRec[description_col] = rec[0] # depends on [control=['if'], data=[]]
if data_model_num == 3:
ResRec[spec_name_col] = rec[0] # depends on [control=['if'], data=[]]
specnum = int(specnum)
if specnum != 0:
MeasRec[samp_name_col] = rec[0][:-specnum] # depends on [control=['if'], data=['specnum']]
if specnum == 0:
MeasRec[samp_name_col] = rec[0] # depends on [control=['if'], data=[]]
SampRec[samp_name_col] = MeasRec[samp_name_col]
SpecRec[samp_name_col] = MeasRec[samp_name_col]
AnisRec[samp_name_col] = MeasRec[samp_name_col]
if data_model_num == 3:
ResRec[samp_name_col] = MeasRec[samp_name_col] # depends on [control=['if'], data=[]]
else:
ResRec[samp_name_col + 's'] = MeasRec[samp_name_col]
if sample_naming_con == '6':
for samp in Samps:
if samp[samp_name_col] == AnisRec[samp_name_col]:
sitename = samp[site_name_col]
location = samp[loc_name_col] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['samp']] # depends on [control=['if'], data=[]]
elif sample_naming_con != '':
sitename = pmag.parse_site(AnisRec[samp_name_col], sample_naming_con, Z) # depends on [control=['if'], data=['sample_naming_con']]
MeasRec[site_name_col] = sitename
MeasRec[loc_name_col] = location
SampRec[site_name_col] = MeasRec[site_name_col]
SpecRec[site_name_col] = MeasRec[site_name_col]
AnisRec[site_name_col] = MeasRec[site_name_col]
ResRec[loc_name_col] = location
ResRec[site_name_col] = MeasRec[site_name_col]
if data_model_num == 2:
ResRec[site_name_col + 's'] = MeasRec[site_name_col] # depends on [control=['if'], data=[]]
SampRec[loc_name_col] = MeasRec[loc_name_col]
SpecRec[loc_name_col] = MeasRec[loc_name_col]
AnisRec[loc_name_col] = MeasRec[loc_name_col]
if data_model_num == 2:
ResRec[loc_name_col + 's'] = MeasRec[loc_name_col] # depends on [control=['if'], data=[]]
if len(rec) >= 3:
(SampRec[azimuth_col], SampRec[samp_dip_col]) = (rec[1], rec[2])
(az, pl, igeo) = (float(rec[1]), float(rec[2]), 1) # depends on [control=['if'], data=[]]
if len(rec) == 5:
(SampRec[bed_dip_direction_col], SampRec[bed_dip_col]) = ('%7.1f' % (90.0 + float(rec[3])), rec[4])
(bed_az, bed_dip, itilt, igeo) = (90.0 + float(rec[3]), float(rec[4]), 1, 1) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
for i in range(5):
# assume measurements in micro SI
k15.append(1e-06 * float(rec[i])) # depends on [control=['for'], data=['i']]
if linecnt == 4:
(sbar, sigma, bulk) = pmag.dok15_s(k15)
hpars = pmag.dohext(9, sigma, sbar)
MeasRec[treat_temp_col] = '%8.3e' % 273 # room temp in kelvin
MeasRec[meas_temp_col] = '%8.3e' % 273 # room temp in kelvin
for i in range(15):
NewMeas = copy.deepcopy(MeasRec)
NewMeas[meas_orient_phi_col] = '%7.1f' % Decs[i]
NewMeas[meas_orient_theta_col] = '%7.1f' % Incs[i]
NewMeas[chi_vol_col] = '%12.10f' % k15[i]
NewMeas[meas_name_col] = '%i' % (i + 1)
if data_model_num == 2:
NewMeas['magic_experiment_name'] = rec[0] + ':LP-AN-MS' # depends on [control=['if'], data=[]]
else:
NewMeas['experiment'] = rec[0] + ':LP-AN-MS'
MeasRecs.append(NewMeas) # depends on [control=['for'], data=['i']]
if SampRec[samp_name_col] not in samplist:
SampRecs.append(SampRec)
samplist.append(SampRec[samp_name_col]) # depends on [control=['if'], data=['samplist']]
SpecRecs.append(SpecRec)
AnisRec[aniso_type_col] = 'AMS'
ResRec[aniso_type_col] = 'AMS'
s1_val = '{:12.10f}'.format(sbar[0])
s2_val = '{:12.10f}'.format(sbar[1])
s3_val = '{:12.10f}'.format(sbar[2])
s4_val = '{:12.10f}'.format(sbar[3])
s5_val = '{:12.10f}'.format(sbar[4])
s6_val = '{:12.10f}'.format(sbar[5])
# MAgIC 2
if data_model_num == 2:
AnisRec['anisotropy_s1'] = s1_val
AnisRec['anisotropy_s2'] = s2_val
AnisRec['anisotropy_s3'] = s3_val
AnisRec['anisotropy_s4'] = s4_val
AnisRec['anisotropy_s5'] = s5_val
AnisRec['anisotropy_s6'] = s6_val # depends on [control=['if'], data=[]]
else:
# MagIC 3
vals = [s1_val, s2_val, s3_val, s4_val, s5_val, s6_val]
AnisRec['aniso_s'] = ':'.join([str(v).strip() for v in vals])
AnisRec[aniso_mean_col] = '%12.10f' % bulk
AnisRec[aniso_sigma_col] = '%12.10f' % sigma
AnisRec[aniso_mean_col] = '{:12.10f}'.format(bulk)
AnisRec[aniso_sigma_col] = '{:12.10f}'.format(sigma)
AnisRec[aniso_unit_col] = 'SI'
AnisRec[aniso_n_col] = '15'
AnisRec[aniso_tilt_corr_col] = '-1'
AnisRec[method_col] = 'LP-X:AE-H:LP-AN-MS'
AnisRecs.append(AnisRec)
ResRec[method_col] = 'LP-X:AE-H:LP-AN-MS'
ResRec[aniso_tilt_corr_col] = '-1'
if data_model_num == 3:
aniso_v1 = ':'.join([str(i) for i in (hpars['t1'], hpars['v1_dec'], hpars['v1_inc'], hpars['v2_dec'], hpars['v2_inc'], hpars['e12'], hpars['v3_dec'], hpars['v3_inc'], hpars['e13'])])
aniso_v2 = ':'.join([str(i) for i in (hpars['t2'], hpars['v2_dec'], hpars['v2_inc'], hpars['v1_dec'], hpars['v1_inc'], hpars['e12'], hpars['v3_dec'], hpars['v3_inc'], hpars['e23'])])
aniso_v3 = ':'.join([str(i) for i in (hpars['t3'], hpars['v3_dec'], hpars['v3_inc'], hpars['v1_dec'], hpars['v1_inc'], hpars['e13'], hpars['v2_dec'], hpars['v2_inc'], hpars['e23'])])
ResRec['aniso_v1'] = aniso_v1
ResRec['aniso_v2'] = aniso_v2
ResRec['aniso_v3'] = aniso_v3 # depends on [control=['if'], data=[]]
else: # data model 2
ResRec['anisotropy_t1'] = '%12.10f' % hpars['t1']
ResRec['anisotropy_t2'] = '%12.10f' % hpars['t2']
ResRec['anisotropy_t3'] = '%12.10f' % hpars['t3']
ResRec['anisotropy_fest'] = '%12.10f' % hpars['F']
ResRec['anisotropy_ftest12'] = '%12.10f' % hpars['F12']
ResRec['anisotropy_ftest23'] = '%12.10f' % hpars['F23']
ResRec['anisotropy_v1_dec'] = '%7.1f' % hpars['v1_dec']
ResRec['anisotropy_v2_dec'] = '%7.1f' % hpars['v2_dec']
ResRec['anisotropy_v3_dec'] = '%7.1f' % hpars['v3_dec']
ResRec['anisotropy_v1_inc'] = '%7.1f' % hpars['v1_inc']
ResRec['anisotropy_v2_inc'] = '%7.1f' % hpars['v2_inc']
ResRec['anisotropy_v3_inc'] = '%7.1f' % hpars['v3_inc']
ResRec['anisotropy_v1_eta_dec'] = ResRec['anisotropy_v2_dec']
ResRec['anisotropy_v1_eta_inc'] = ResRec['anisotropy_v2_inc']
ResRec['anisotropy_v1_zeta_dec'] = ResRec['anisotropy_v3_dec']
ResRec['anisotropy_v1_zeta_inc'] = ResRec['anisotropy_v3_inc']
ResRec['anisotropy_v2_eta_dec'] = ResRec['anisotropy_v1_dec']
ResRec['anisotropy_v2_eta_inc'] = ResRec['anisotropy_v1_inc']
ResRec['anisotropy_v2_zeta_dec'] = ResRec['anisotropy_v3_dec']
ResRec['anisotropy_v2_zeta_inc'] = ResRec['anisotropy_v3_inc']
ResRec['anisotropy_v3_eta_dec'] = ResRec['anisotropy_v1_dec']
ResRec['anisotropy_v3_eta_inc'] = ResRec['anisotropy_v1_inc']
ResRec['anisotropy_v3_zeta_dec'] = ResRec['anisotropy_v2_dec']
ResRec['anisotropy_v3_zeta_inc'] = ResRec['anisotropy_v2_inc']
ResRec['anisotropy_v1_eta_semi_angle'] = '%7.1f' % hpars['e12']
ResRec['anisotropy_v1_zeta_semi_angle'] = '%7.1f' % hpars['e13']
ResRec['anisotropy_v2_eta_semi_angle'] = '%7.1f' % hpars['e12']
ResRec['anisotropy_v2_zeta_semi_angle'] = '%7.1f' % hpars['e23']
ResRec['anisotropy_v3_eta_semi_angle'] = '%7.1f' % hpars['e13']
ResRec['anisotropy_v3_zeta_semi_angle'] = '%7.1f' % hpars['e23']
ResRec[result_description_col] = 'Critical F: ' + hpars['F_crit'] + ';Critical F12/F13: ' + hpars['F12_crit']
#
ResRecs.append(ResRec)
if igeo == 1:
sbarg = pmag.dosgeo(sbar, az, pl)
hparsg = pmag.dohext(9, sigma, sbarg)
AnisRecG = copy.copy(AnisRec)
ResRecG = copy.copy(ResRec)
if data_model_num == 3:
AnisRecG['aniso_s'] = ':'.join(('{:12.10f}'.format(i) for i in sbarg)) # depends on [control=['if'], data=[]]
if data_model_num == 2:
AnisRecG['anisotropy_s1'] = '%12.10f' % sbarg[0]
AnisRecG['anisotropy_s2'] = '%12.10f' % sbarg[1]
AnisRecG['anisotropy_s3'] = '%12.10f' % sbarg[2]
AnisRecG['anisotropy_s4'] = '%12.10f' % sbarg[3]
AnisRecG['anisotropy_s5'] = '%12.10f' % sbarg[4]
AnisRecG['anisotropy_s6'] = '%12.10f' % sbarg[5] # depends on [control=['if'], data=[]]
AnisRecG[aniso_tilt_corr_col] = '0'
ResRecG[aniso_tilt_corr_col] = '0'
if data_model_num == 3:
aniso_v1 = ':'.join([str(i) for i in (hparsg['t1'], hparsg['v1_dec'], hparsg['v1_inc'], hparsg['v2_dec'], hparsg['v2_inc'], hparsg['e12'], hparsg['v3_dec'], hparsg['v3_inc'], hparsg['e13'])])
aniso_v2 = ':'.join([str(i) for i in (hparsg['t2'], hparsg['v2_dec'], hparsg['v2_inc'], hparsg['v1_dec'], hparsg['v1_inc'], hparsg['e12'], hparsg['v3_dec'], hparsg['v3_inc'], hparsg['e23'])])
aniso_v3 = ':'.join([str(i) for i in (hparsg['t3'], hparsg['v3_dec'], hparsg['v3_inc'], hparsg['v1_dec'], hparsg['v1_inc'], hparsg['e13'], hparsg['v2_dec'], hparsg['v2_inc'], hparsg['e23'])])
ResRecG['aniso_v1'] = aniso_v1
ResRecG['aniso_v2'] = aniso_v2
ResRecG['aniso_v3'] = aniso_v3 # depends on [control=['if'], data=[]]
#
if data_model_num == 2:
ResRecG['anisotropy_v1_dec'] = '%7.1f' % hparsg['v1_dec']
ResRecG['anisotropy_v2_dec'] = '%7.1f' % hparsg['v2_dec']
ResRecG['anisotropy_v3_dec'] = '%7.1f' % hparsg['v3_dec']
ResRecG['anisotropy_v1_inc'] = '%7.1f' % hparsg['v1_inc']
ResRecG['anisotropy_v2_inc'] = '%7.1f' % hparsg['v2_inc']
ResRecG['anisotropy_v3_inc'] = '%7.1f' % hparsg['v3_inc']
ResRecG['anisotropy_v1_eta_dec'] = ResRecG['anisotropy_v2_dec']
ResRecG['anisotropy_v1_eta_inc'] = ResRecG['anisotropy_v2_inc']
ResRecG['anisotropy_v1_zeta_dec'] = ResRecG['anisotropy_v3_dec']
ResRecG['anisotropy_v1_zeta_inc'] = ResRecG['anisotropy_v3_inc']
ResRecG['anisotropy_v2_eta_dec'] = ResRecG['anisotropy_v1_dec']
ResRecG['anisotropy_v2_eta_inc'] = ResRecG['anisotropy_v1_inc']
ResRecG['anisotropy_v2_zeta_dec'] = ResRecG['anisotropy_v3_dec']
ResRecG['anisotropy_v2_zeta_inc'] = ResRecG['anisotropy_v3_inc']
ResRecG['anisotropy_v3_eta_dec'] = ResRecG['anisotropy_v1_dec']
ResRecG['anisotropy_v3_eta_inc'] = ResRecG['anisotropy_v1_inc']
ResRecG['anisotropy_v3_zeta_dec'] = ResRecG['anisotropy_v2_dec']
ResRecG['anisotropy_v3_zeta_inc'] = ResRecG['anisotropy_v2_inc'] # depends on [control=['if'], data=[]]
#
ResRecG[result_description_col] = 'Critical F: ' + hpars['F_crit'] + ';Critical F12/F13: ' + hpars['F12_crit']
ResRecs.append(ResRecG)
AnisRecs.append(AnisRecG) # depends on [control=['if'], data=[]]
if itilt == 1:
sbart = pmag.dostilt(sbarg, bed_az, bed_dip)
hparst = pmag.dohext(9, sigma, sbart)
AnisRecT = copy.copy(AnisRec)
ResRecT = copy.copy(ResRec)
if data_model_num == 3:
aniso_v1 = ':'.join([str(i) for i in (hparst['t1'], hparst['v1_dec'], hparst['v1_inc'], hparst['v2_dec'], hparst['v2_inc'], hparst['e12'], hparst['v3_dec'], hparst['v3_inc'], hparst['e13'])])
aniso_v2 = ':'.join([str(i) for i in (hparst['t2'], hparst['v2_dec'], hparst['v2_inc'], hparst['v1_dec'], hparst['v1_inc'], hparst['e12'], hparst['v3_dec'], hparst['v3_inc'], hparst['e23'])])
aniso_v3 = ':'.join([str(i) for i in (hparst['t3'], hparst['v3_dec'], hparst['v3_inc'], hparst['v1_dec'], hparst['v1_inc'], hparst['e13'], hparst['v2_dec'], hparst['v2_inc'], hparst['e23'])])
ResRecT['aniso_v1'] = aniso_v1
ResRecT['aniso_v2'] = aniso_v2
ResRecT['aniso_v3'] = aniso_v3 # depends on [control=['if'], data=[]]
#
if data_model_num == 2:
AnisRecT['anisotropy_s1'] = '%12.10f' % sbart[0]
AnisRecT['anisotropy_s2'] = '%12.10f' % sbart[1]
AnisRecT['anisotropy_s3'] = '%12.10f' % sbart[2]
AnisRecT['anisotropy_s4'] = '%12.10f' % sbart[3]
AnisRecT['anisotropy_s5'] = '%12.10f' % sbart[4]
AnisRecT['anisotropy_s6'] = '%12.10f' % sbart[5]
AnisRecT['anisotropy_tilt_correction'] = '100'
ResRecT['anisotropy_v1_dec'] = '%7.1f' % hparst['v1_dec']
ResRecT['anisotropy_v2_dec'] = '%7.1f' % hparst['v2_dec']
ResRecT['anisotropy_v3_dec'] = '%7.1f' % hparst['v3_dec']
ResRecT['anisotropy_v1_inc'] = '%7.1f' % hparst['v1_inc']
ResRecT['anisotropy_v2_inc'] = '%7.1f' % hparst['v2_inc']
ResRecT['anisotropy_v3_inc'] = '%7.1f' % hparst['v3_inc']
ResRecT['anisotropy_v1_eta_dec'] = ResRecT['anisotropy_v2_dec']
ResRecT['anisotropy_v1_eta_inc'] = ResRecT['anisotropy_v2_inc']
ResRecT['anisotropy_v1_zeta_dec'] = ResRecT['anisotropy_v3_dec']
ResRecT['anisotropy_v1_zeta_inc'] = ResRecT['anisotropy_v3_inc']
ResRecT['anisotropy_v2_eta_dec'] = ResRecT['anisotropy_v1_dec']
ResRecT['anisotropy_v2_eta_inc'] = ResRecT['anisotropy_v1_inc']
ResRecT['anisotropy_v2_zeta_dec'] = ResRecT['anisotropy_v3_dec']
ResRecT['anisotropy_v2_zeta_inc'] = ResRecT['anisotropy_v3_inc']
ResRecT['anisotropy_v3_eta_dec'] = ResRecT['anisotropy_v1_dec']
ResRecT['anisotropy_v3_eta_inc'] = ResRecT['anisotropy_v1_inc']
ResRecT['anisotropy_v3_zeta_dec'] = ResRecT['anisotropy_v2_dec']
ResRecT['anisotropy_v3_zeta_inc'] = ResRecT['anisotropy_v2_inc'] # depends on [control=['if'], data=[]]
#
ResRecT[aniso_tilt_corr_col] = '100'
ResRecT[result_description_col] = 'Critical F: ' + hparst['F_crit'] + ';Critical F12/F13: ' + hparst['F12_crit']
ResRecs.append(ResRecT)
AnisRecs.append(AnisRecT) # depends on [control=['if'], data=[]]
(k15, linecnt) = ([], 0)
(MeasRec, SpecRec, SampRec, SiteRec, AnisRec) = ({}, {}, {}, {}, {}) # depends on [control=['if'], data=['linecnt']] # depends on [control=['for'], data=['line']]
# samples
pmag.magic_write(samp_file, SampRecs, samp_table_name)
# specimens / rmag_anisotropy / rmag_results
if data_model_num == 3:
AnisRecs.extend(ResRecs)
SpecRecs = AnisRecs.copy()
(SpecRecs, keys) = pmag.fillkeys(SpecRecs)
pmag.magic_write(aniso_outfile, SpecRecs, 'specimens')
flist = [meas_file, aniso_outfile, samp_file] # depends on [control=['if'], data=[]]
else:
pmag.magic_write(aniso_outfile, AnisRecs, 'rmag_anisotropy') # add to specimens?
pmag.magic_write(result_file, ResRecs, 'rmag_results') # added to specimens (NOT sites)
flist = [meas_file, samp_file, aniso_outfile, result_file]
# measurements
pmag.magic_write(meas_file, MeasRecs, meas_table_name)
print('Data saved to: ' + ', '.join(flist))
return (True, meas_file) |
def delete_folder(self, folder_id, recursive=True):
"""Delete an existing folder
Args:
folder_id (int): ID of the folder to delete.
recursive (bool): Delete all subfolder if True.
Returns:
dict. Response from Box.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
"""
return self.__request("DELETE", "folders/%s" % (folder_id, ),
querystring={'recursive': unicode(recursive).lower()}) | def function[delete_folder, parameter[self, folder_id, recursive]]:
constant[Delete an existing folder
Args:
folder_id (int): ID of the folder to delete.
recursive (bool): Delete all subfolder if True.
Returns:
dict. Response from Box.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
]
return[call[name[self].__request, parameter[constant[DELETE], binary_operation[constant[folders/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da207f9ba30>]]]]]] | keyword[def] identifier[delete_folder] ( identifier[self] , identifier[folder_id] , identifier[recursive] = keyword[True] ):
literal[string]
keyword[return] identifier[self] . identifier[__request] ( literal[string] , literal[string] %( identifier[folder_id] ,),
identifier[querystring] ={ literal[string] : identifier[unicode] ( identifier[recursive] ). identifier[lower] ()}) | def delete_folder(self, folder_id, recursive=True):
"""Delete an existing folder
Args:
folder_id (int): ID of the folder to delete.
recursive (bool): Delete all subfolder if True.
Returns:
dict. Response from Box.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
"""
return self.__request('DELETE', 'folders/%s' % (folder_id,), querystring={'recursive': unicode(recursive).lower()}) |
def gen_rand_str(*size, use=None, keyspace=None):
""" Generates a random string using random module specified in @use within
the @keyspace
@*size: #int size range for the length of the string
@use: the random module to use
@keyspace: #str chars allowed in the random string
..
from vital.debug import gen_rand_str
gen_rand_str()
# -> 'PRCpAq'
gen_rand_str(1, 2)
# -> 'Y'
gen_rand_str(12, keyspace="abcdefg")
# -> 'gaaacffbedf'
..
"""
keyspace = keyspace or (string.ascii_letters + string.digits)
keyspace = [char for char in keyspace]
use = use or _random
use.seed()
if size:
size = size if len(size) == 2 else (size[0], size[0])
else:
size = (10, 12)
return ''.join(
use.choice(keyspace)
for _ in range(use.randint(*size))) | def function[gen_rand_str, parameter[]]:
constant[ Generates a random string using random module specified in @use within
the @keyspace
@*size: #int size range for the length of the string
@use: the random module to use
@keyspace: #str chars allowed in the random string
..
from vital.debug import gen_rand_str
gen_rand_str()
# -> 'PRCpAq'
gen_rand_str(1, 2)
# -> 'Y'
gen_rand_str(12, keyspace="abcdefg")
# -> 'gaaacffbedf'
..
]
variable[keyspace] assign[=] <ast.BoolOp object at 0x7da1b1075150>
variable[keyspace] assign[=] <ast.ListComp object at 0x7da1b1075420>
variable[use] assign[=] <ast.BoolOp object at 0x7da1b1076f50>
call[name[use].seed, parameter[]]
if name[size] begin[:]
variable[size] assign[=] <ast.IfExp object at 0x7da1b1075030>
return[call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da1b1075510>]]] | keyword[def] identifier[gen_rand_str] (* identifier[size] , identifier[use] = keyword[None] , identifier[keyspace] = keyword[None] ):
literal[string]
identifier[keyspace] = identifier[keyspace] keyword[or] ( identifier[string] . identifier[ascii_letters] + identifier[string] . identifier[digits] )
identifier[keyspace] =[ identifier[char] keyword[for] identifier[char] keyword[in] identifier[keyspace] ]
identifier[use] = identifier[use] keyword[or] identifier[_random]
identifier[use] . identifier[seed] ()
keyword[if] identifier[size] :
identifier[size] = identifier[size] keyword[if] identifier[len] ( identifier[size] )== literal[int] keyword[else] ( identifier[size] [ literal[int] ], identifier[size] [ literal[int] ])
keyword[else] :
identifier[size] =( literal[int] , literal[int] )
keyword[return] literal[string] . identifier[join] (
identifier[use] . identifier[choice] ( identifier[keyspace] )
keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[use] . identifier[randint] (* identifier[size] ))) | def gen_rand_str(*size, use=None, keyspace=None):
""" Generates a random string using random module specified in @use within
the @keyspace
@*size: #int size range for the length of the string
@use: the random module to use
@keyspace: #str chars allowed in the random string
..
from vital.debug import gen_rand_str
gen_rand_str()
# -> 'PRCpAq'
gen_rand_str(1, 2)
# -> 'Y'
gen_rand_str(12, keyspace="abcdefg")
# -> 'gaaacffbedf'
..
"""
keyspace = keyspace or string.ascii_letters + string.digits
keyspace = [char for char in keyspace]
use = use or _random
use.seed()
if size:
size = size if len(size) == 2 else (size[0], size[0]) # depends on [control=['if'], data=[]]
else:
size = (10, 12)
return ''.join((use.choice(keyspace) for _ in range(use.randint(*size)))) |
def do_photometry(self, image, init_guesses=None):
"""
Perform PSF photometry in ``image``.
This method assumes that ``psf_model`` has centroids and flux
parameters which will be fitted to the data provided in
``image``. A compound model, in fact a sum of ``psf_model``,
will be fitted to groups of stars automatically identified by
``group_maker``. Also, ``image`` is not assumed to be background
subtracted. If ``init_guesses`` are not ``None`` then this
method uses ``init_guesses`` as initial guesses for the
centroids. If the centroid positions are set as ``fixed`` in the
PSF model ``psf_model``, then the optimizer will only consider
the flux as a variable.
Parameters
----------
image : 2D array-like, `~astropy.io.fits.ImageHDU`, `~astropy.io.fits.HDUList`
Image to perform photometry.
init_guesses: `~astropy.table.Table`
Table which contains the initial guesses (estimates) for the
set of parameters. Columns 'x_0' and 'y_0' which represent
the positions (in pixel coordinates) for each object must be
present. 'flux_0' can also be provided to set initial
fluxes. If 'flux_0' is not provided, aperture photometry is
used to estimate initial values for the fluxes. Additional
columns of the form '<parametername>_0' will be used to set
the initial guess for any parameters of the ``psf_model``
model that are not fixed.
Returns
-------
output_tab : `~astropy.table.Table` or None
Table with the photometry results, i.e., centroids and
fluxes estimations and the initial estimates used to start
the fitting process. Uncertainties on the fitted parameters
are reported as columns called ``<paramname>_unc`` provided
that the fitter object contains a dictionary called
``fit_info`` with the key ``param_cov``, which contains the
covariance matrix. If ``param_cov`` is not present,
uncertanties are not reported.
"""
if self.bkg_estimator is not None:
image = image - self.bkg_estimator(image)
if self.aperture_radius is None:
if hasattr(self.psf_model, 'fwhm'):
self.aperture_radius = self.psf_model.fwhm.value
elif hasattr(self.psf_model, 'sigma'):
self.aperture_radius = (self.psf_model.sigma.value *
gaussian_sigma_to_fwhm)
if init_guesses is not None:
# make sure the code does not modify user's input
init_guesses = init_guesses.copy()
if self.aperture_radius is None:
if 'flux_0' not in init_guesses.colnames:
raise ValueError('aperture_radius is None and could not '
'be determined by psf_model. Please, '
'either provided a value for '
'aperture_radius or define fwhm/sigma '
'at psf_model.')
if self.finder is not None:
warnings.warn('Both init_guesses and finder are different '
'than None, which is ambiguous. finder is '
'going to be ignored.', AstropyUserWarning)
if 'flux_0' not in init_guesses.colnames:
apertures = CircularAperture((init_guesses['x_0'],
init_guesses['y_0']),
r=self.aperture_radius)
init_guesses['flux_0'] = aperture_photometry(
image, apertures)['aperture_sum']
else:
if self.finder is None:
raise ValueError('Finder cannot be None if init_guesses are '
'not given.')
sources = self.finder(image)
if len(sources) > 0:
apertures = CircularAperture((sources['xcentroid'],
sources['ycentroid']),
r=self.aperture_radius)
sources['aperture_flux'] = aperture_photometry(
image, apertures)['aperture_sum']
init_guesses = Table(names=['x_0', 'y_0', 'flux_0'],
data=[sources['xcentroid'],
sources['ycentroid'],
sources['aperture_flux']])
self._define_fit_param_names()
for p0, param in self._pars_to_set.items():
if p0 not in init_guesses.colnames:
init_guesses[p0] = (len(init_guesses) *
[getattr(self.psf_model, param).value])
star_groups = self.group_maker(init_guesses)
output_tab, self._residual_image = self.nstar(image, star_groups)
star_groups = star_groups.group_by('group_id')
output_tab = hstack([star_groups, output_tab])
return output_tab | def function[do_photometry, parameter[self, image, init_guesses]]:
constant[
Perform PSF photometry in ``image``.
This method assumes that ``psf_model`` has centroids and flux
parameters which will be fitted to the data provided in
``image``. A compound model, in fact a sum of ``psf_model``,
will be fitted to groups of stars automatically identified by
``group_maker``. Also, ``image`` is not assumed to be background
subtracted. If ``init_guesses`` are not ``None`` then this
method uses ``init_guesses`` as initial guesses for the
centroids. If the centroid positions are set as ``fixed`` in the
PSF model ``psf_model``, then the optimizer will only consider
the flux as a variable.
Parameters
----------
image : 2D array-like, `~astropy.io.fits.ImageHDU`, `~astropy.io.fits.HDUList`
Image to perform photometry.
init_guesses: `~astropy.table.Table`
Table which contains the initial guesses (estimates) for the
set of parameters. Columns 'x_0' and 'y_0' which represent
the positions (in pixel coordinates) for each object must be
present. 'flux_0' can also be provided to set initial
fluxes. If 'flux_0' is not provided, aperture photometry is
used to estimate initial values for the fluxes. Additional
columns of the form '<parametername>_0' will be used to set
the initial guess for any parameters of the ``psf_model``
model that are not fixed.
Returns
-------
output_tab : `~astropy.table.Table` or None
Table with the photometry results, i.e., centroids and
fluxes estimations and the initial estimates used to start
the fitting process. Uncertainties on the fitted parameters
are reported as columns called ``<paramname>_unc`` provided
that the fitter object contains a dictionary called
``fit_info`` with the key ``param_cov``, which contains the
covariance matrix. If ``param_cov`` is not present,
uncertanties are not reported.
]
if compare[name[self].bkg_estimator is_not constant[None]] begin[:]
variable[image] assign[=] binary_operation[name[image] - call[name[self].bkg_estimator, parameter[name[image]]]]
if compare[name[self].aperture_radius is constant[None]] begin[:]
if call[name[hasattr], parameter[name[self].psf_model, constant[fwhm]]] begin[:]
name[self].aperture_radius assign[=] name[self].psf_model.fwhm.value
if compare[name[init_guesses] is_not constant[None]] begin[:]
variable[init_guesses] assign[=] call[name[init_guesses].copy, parameter[]]
if compare[name[self].aperture_radius is constant[None]] begin[:]
if compare[constant[flux_0] <ast.NotIn object at 0x7da2590d7190> name[init_guesses].colnames] begin[:]
<ast.Raise object at 0x7da1b11aadd0>
if compare[name[self].finder is_not constant[None]] begin[:]
call[name[warnings].warn, parameter[constant[Both init_guesses and finder are different than None, which is ambiguous. finder is going to be ignored.], name[AstropyUserWarning]]]
if compare[constant[flux_0] <ast.NotIn object at 0x7da2590d7190> name[init_guesses].colnames] begin[:]
variable[apertures] assign[=] call[name[CircularAperture], parameter[tuple[[<ast.Subscript object at 0x7da1b11a9a50>, <ast.Subscript object at 0x7da1b11a9150>]]]]
call[name[init_guesses]][constant[flux_0]] assign[=] call[call[name[aperture_photometry], parameter[name[image], name[apertures]]]][constant[aperture_sum]]
call[name[self]._define_fit_param_names, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b11aa770>, <ast.Name object at 0x7da1b11a8dc0>]]] in starred[call[name[self]._pars_to_set.items, parameter[]]] begin[:]
if compare[name[p0] <ast.NotIn object at 0x7da2590d7190> name[init_guesses].colnames] begin[:]
call[name[init_guesses]][name[p0]] assign[=] binary_operation[call[name[len], parameter[name[init_guesses]]] * list[[<ast.Attribute object at 0x7da1b11aaa40>]]]
variable[star_groups] assign[=] call[name[self].group_maker, parameter[name[init_guesses]]]
<ast.Tuple object at 0x7da1b11ab1f0> assign[=] call[name[self].nstar, parameter[name[image], name[star_groups]]]
variable[star_groups] assign[=] call[name[star_groups].group_by, parameter[constant[group_id]]]
variable[output_tab] assign[=] call[name[hstack], parameter[list[[<ast.Name object at 0x7da1b11aba60>, <ast.Name object at 0x7da1b11aac20>]]]]
return[name[output_tab]] | keyword[def] identifier[do_photometry] ( identifier[self] , identifier[image] , identifier[init_guesses] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[bkg_estimator] keyword[is] keyword[not] keyword[None] :
identifier[image] = identifier[image] - identifier[self] . identifier[bkg_estimator] ( identifier[image] )
keyword[if] identifier[self] . identifier[aperture_radius] keyword[is] keyword[None] :
keyword[if] identifier[hasattr] ( identifier[self] . identifier[psf_model] , literal[string] ):
identifier[self] . identifier[aperture_radius] = identifier[self] . identifier[psf_model] . identifier[fwhm] . identifier[value]
keyword[elif] identifier[hasattr] ( identifier[self] . identifier[psf_model] , literal[string] ):
identifier[self] . identifier[aperture_radius] =( identifier[self] . identifier[psf_model] . identifier[sigma] . identifier[value] *
identifier[gaussian_sigma_to_fwhm] )
keyword[if] identifier[init_guesses] keyword[is] keyword[not] keyword[None] :
identifier[init_guesses] = identifier[init_guesses] . identifier[copy] ()
keyword[if] identifier[self] . identifier[aperture_radius] keyword[is] keyword[None] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[init_guesses] . identifier[colnames] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
literal[string]
literal[string]
literal[string] )
keyword[if] identifier[self] . identifier[finder] keyword[is] keyword[not] keyword[None] :
identifier[warnings] . identifier[warn] ( literal[string]
literal[string]
literal[string] , identifier[AstropyUserWarning] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[init_guesses] . identifier[colnames] :
identifier[apertures] = identifier[CircularAperture] (( identifier[init_guesses] [ literal[string] ],
identifier[init_guesses] [ literal[string] ]),
identifier[r] = identifier[self] . identifier[aperture_radius] )
identifier[init_guesses] [ literal[string] ]= identifier[aperture_photometry] (
identifier[image] , identifier[apertures] )[ literal[string] ]
keyword[else] :
keyword[if] identifier[self] . identifier[finder] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[sources] = identifier[self] . identifier[finder] ( identifier[image] )
keyword[if] identifier[len] ( identifier[sources] )> literal[int] :
identifier[apertures] = identifier[CircularAperture] (( identifier[sources] [ literal[string] ],
identifier[sources] [ literal[string] ]),
identifier[r] = identifier[self] . identifier[aperture_radius] )
identifier[sources] [ literal[string] ]= identifier[aperture_photometry] (
identifier[image] , identifier[apertures] )[ literal[string] ]
identifier[init_guesses] = identifier[Table] ( identifier[names] =[ literal[string] , literal[string] , literal[string] ],
identifier[data] =[ identifier[sources] [ literal[string] ],
identifier[sources] [ literal[string] ],
identifier[sources] [ literal[string] ]])
identifier[self] . identifier[_define_fit_param_names] ()
keyword[for] identifier[p0] , identifier[param] keyword[in] identifier[self] . identifier[_pars_to_set] . identifier[items] ():
keyword[if] identifier[p0] keyword[not] keyword[in] identifier[init_guesses] . identifier[colnames] :
identifier[init_guesses] [ identifier[p0] ]=( identifier[len] ( identifier[init_guesses] )*
[ identifier[getattr] ( identifier[self] . identifier[psf_model] , identifier[param] ). identifier[value] ])
identifier[star_groups] = identifier[self] . identifier[group_maker] ( identifier[init_guesses] )
identifier[output_tab] , identifier[self] . identifier[_residual_image] = identifier[self] . identifier[nstar] ( identifier[image] , identifier[star_groups] )
identifier[star_groups] = identifier[star_groups] . identifier[group_by] ( literal[string] )
identifier[output_tab] = identifier[hstack] ([ identifier[star_groups] , identifier[output_tab] ])
keyword[return] identifier[output_tab] | def do_photometry(self, image, init_guesses=None):
"""
Perform PSF photometry in ``image``.
This method assumes that ``psf_model`` has centroids and flux
parameters which will be fitted to the data provided in
``image``. A compound model, in fact a sum of ``psf_model``,
will be fitted to groups of stars automatically identified by
``group_maker``. Also, ``image`` is not assumed to be background
subtracted. If ``init_guesses`` are not ``None`` then this
method uses ``init_guesses`` as initial guesses for the
centroids. If the centroid positions are set as ``fixed`` in the
PSF model ``psf_model``, then the optimizer will only consider
the flux as a variable.
Parameters
----------
image : 2D array-like, `~astropy.io.fits.ImageHDU`, `~astropy.io.fits.HDUList`
Image to perform photometry.
init_guesses: `~astropy.table.Table`
Table which contains the initial guesses (estimates) for the
set of parameters. Columns 'x_0' and 'y_0' which represent
the positions (in pixel coordinates) for each object must be
present. 'flux_0' can also be provided to set initial
fluxes. If 'flux_0' is not provided, aperture photometry is
used to estimate initial values for the fluxes. Additional
columns of the form '<parametername>_0' will be used to set
the initial guess for any parameters of the ``psf_model``
model that are not fixed.
Returns
-------
output_tab : `~astropy.table.Table` or None
Table with the photometry results, i.e., centroids and
fluxes estimations and the initial estimates used to start
the fitting process. Uncertainties on the fitted parameters
are reported as columns called ``<paramname>_unc`` provided
that the fitter object contains a dictionary called
``fit_info`` with the key ``param_cov``, which contains the
covariance matrix. If ``param_cov`` is not present,
uncertanties are not reported.
"""
if self.bkg_estimator is not None:
image = image - self.bkg_estimator(image) # depends on [control=['if'], data=[]]
if self.aperture_radius is None:
if hasattr(self.psf_model, 'fwhm'):
self.aperture_radius = self.psf_model.fwhm.value # depends on [control=['if'], data=[]]
elif hasattr(self.psf_model, 'sigma'):
self.aperture_radius = self.psf_model.sigma.value * gaussian_sigma_to_fwhm # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if init_guesses is not None:
# make sure the code does not modify user's input
init_guesses = init_guesses.copy()
if self.aperture_radius is None:
if 'flux_0' not in init_guesses.colnames:
raise ValueError('aperture_radius is None and could not be determined by psf_model. Please, either provided a value for aperture_radius or define fwhm/sigma at psf_model.') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if self.finder is not None:
warnings.warn('Both init_guesses and finder are different than None, which is ambiguous. finder is going to be ignored.', AstropyUserWarning) # depends on [control=['if'], data=[]]
if 'flux_0' not in init_guesses.colnames:
apertures = CircularAperture((init_guesses['x_0'], init_guesses['y_0']), r=self.aperture_radius)
init_guesses['flux_0'] = aperture_photometry(image, apertures)['aperture_sum'] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['init_guesses']]
else:
if self.finder is None:
raise ValueError('Finder cannot be None if init_guesses are not given.') # depends on [control=['if'], data=[]]
sources = self.finder(image)
if len(sources) > 0:
apertures = CircularAperture((sources['xcentroid'], sources['ycentroid']), r=self.aperture_radius)
sources['aperture_flux'] = aperture_photometry(image, apertures)['aperture_sum']
init_guesses = Table(names=['x_0', 'y_0', 'flux_0'], data=[sources['xcentroid'], sources['ycentroid'], sources['aperture_flux']]) # depends on [control=['if'], data=[]]
self._define_fit_param_names()
for (p0, param) in self._pars_to_set.items():
if p0 not in init_guesses.colnames:
init_guesses[p0] = len(init_guesses) * [getattr(self.psf_model, param).value] # depends on [control=['if'], data=['p0']] # depends on [control=['for'], data=[]]
star_groups = self.group_maker(init_guesses)
(output_tab, self._residual_image) = self.nstar(image, star_groups)
star_groups = star_groups.group_by('group_id')
output_tab = hstack([star_groups, output_tab])
return output_tab |
def f_store(self, recursive=True, store_data=pypetconstants.STORE_DATA,
max_depth=None):
"""Stores a group node to disk
:param recursive:
Whether recursively all children should be stored too. Default is ``True``.
:param store_data:
For how to choose 'store_data' see :ref:`more-on-storing`.
:param max_depth:
In case `recursive` is `True`, you can specify the maximum depth to store
data relative from current node. Leave `None` if you don't want to limit
the depth.
"""
traj = self._nn_interface._root_instance
storage_service = traj.v_storage_service
storage_service.store(pypetconstants.GROUP, self,
trajectory_name=traj.v_name,
recursive=recursive,
store_data=store_data,
max_depth=max_depth) | def function[f_store, parameter[self, recursive, store_data, max_depth]]:
constant[Stores a group node to disk
:param recursive:
Whether recursively all children should be stored too. Default is ``True``.
:param store_data:
For how to choose 'store_data' see :ref:`more-on-storing`.
:param max_depth:
In case `recursive` is `True`, you can specify the maximum depth to store
data relative from current node. Leave `None` if you don't want to limit
the depth.
]
variable[traj] assign[=] name[self]._nn_interface._root_instance
variable[storage_service] assign[=] name[traj].v_storage_service
call[name[storage_service].store, parameter[name[pypetconstants].GROUP, name[self]]] | keyword[def] identifier[f_store] ( identifier[self] , identifier[recursive] = keyword[True] , identifier[store_data] = identifier[pypetconstants] . identifier[STORE_DATA] ,
identifier[max_depth] = keyword[None] ):
literal[string]
identifier[traj] = identifier[self] . identifier[_nn_interface] . identifier[_root_instance]
identifier[storage_service] = identifier[traj] . identifier[v_storage_service]
identifier[storage_service] . identifier[store] ( identifier[pypetconstants] . identifier[GROUP] , identifier[self] ,
identifier[trajectory_name] = identifier[traj] . identifier[v_name] ,
identifier[recursive] = identifier[recursive] ,
identifier[store_data] = identifier[store_data] ,
identifier[max_depth] = identifier[max_depth] ) | def f_store(self, recursive=True, store_data=pypetconstants.STORE_DATA, max_depth=None):
"""Stores a group node to disk
:param recursive:
Whether recursively all children should be stored too. Default is ``True``.
:param store_data:
For how to choose 'store_data' see :ref:`more-on-storing`.
:param max_depth:
In case `recursive` is `True`, you can specify the maximum depth to store
data relative from current node. Leave `None` if you don't want to limit
the depth.
"""
traj = self._nn_interface._root_instance
storage_service = traj.v_storage_service
storage_service.store(pypetconstants.GROUP, self, trajectory_name=traj.v_name, recursive=recursive, store_data=store_data, max_depth=max_depth) |
def OPTIONS(self, *args, **kwargs):
"""Handles CORS requests for this controller
if self.cors is False then this will raise a 405, otherwise it sets everything
necessary to satisfy the request in self.response
"""
if not self.cors:
raise CallError(405)
req = self.request
origin = req.get_header('origin')
if not origin:
raise CallError(400, 'Need Origin header')
call_headers = [
('Access-Control-Request-Headers', 'Access-Control-Allow-Headers'),
('Access-Control-Request-Method', 'Access-Control-Allow-Methods')
]
for req_header, res_header in call_headers:
v = req.get_header(req_header)
if v:
self.response.set_header(res_header, v)
else:
raise CallError(400, 'Need {} header'.format(req_header))
other_headers = {
'Access-Control-Allow-Credentials': 'true',
'Access-Control-Max-Age': 3600
}
self.response.add_headers(other_headers) | def function[OPTIONS, parameter[self]]:
constant[Handles CORS requests for this controller
if self.cors is False then this will raise a 405, otherwise it sets everything
necessary to satisfy the request in self.response
]
if <ast.UnaryOp object at 0x7da20c992fe0> begin[:]
<ast.Raise object at 0x7da20c992080>
variable[req] assign[=] name[self].request
variable[origin] assign[=] call[name[req].get_header, parameter[constant[origin]]]
if <ast.UnaryOp object at 0x7da20c993d90> begin[:]
<ast.Raise object at 0x7da18bccba00>
variable[call_headers] assign[=] list[[<ast.Tuple object at 0x7da204346ce0>, <ast.Tuple object at 0x7da204347b20>]]
for taget[tuple[[<ast.Name object at 0x7da204344ee0>, <ast.Name object at 0x7da204344b50>]]] in starred[name[call_headers]] begin[:]
variable[v] assign[=] call[name[req].get_header, parameter[name[req_header]]]
if name[v] begin[:]
call[name[self].response.set_header, parameter[name[res_header], name[v]]]
variable[other_headers] assign[=] dictionary[[<ast.Constant object at 0x7da204347bb0>, <ast.Constant object at 0x7da204346b60>], [<ast.Constant object at 0x7da2043447c0>, <ast.Constant object at 0x7da204347400>]]
call[name[self].response.add_headers, parameter[name[other_headers]]] | keyword[def] identifier[OPTIONS] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[cors] :
keyword[raise] identifier[CallError] ( literal[int] )
identifier[req] = identifier[self] . identifier[request]
identifier[origin] = identifier[req] . identifier[get_header] ( literal[string] )
keyword[if] keyword[not] identifier[origin] :
keyword[raise] identifier[CallError] ( literal[int] , literal[string] )
identifier[call_headers] =[
( literal[string] , literal[string] ),
( literal[string] , literal[string] )
]
keyword[for] identifier[req_header] , identifier[res_header] keyword[in] identifier[call_headers] :
identifier[v] = identifier[req] . identifier[get_header] ( identifier[req_header] )
keyword[if] identifier[v] :
identifier[self] . identifier[response] . identifier[set_header] ( identifier[res_header] , identifier[v] )
keyword[else] :
keyword[raise] identifier[CallError] ( literal[int] , literal[string] . identifier[format] ( identifier[req_header] ))
identifier[other_headers] ={
literal[string] : literal[string] ,
literal[string] : literal[int]
}
identifier[self] . identifier[response] . identifier[add_headers] ( identifier[other_headers] ) | def OPTIONS(self, *args, **kwargs):
"""Handles CORS requests for this controller
if self.cors is False then this will raise a 405, otherwise it sets everything
necessary to satisfy the request in self.response
"""
if not self.cors:
raise CallError(405) # depends on [control=['if'], data=[]]
req = self.request
origin = req.get_header('origin')
if not origin:
raise CallError(400, 'Need Origin header') # depends on [control=['if'], data=[]]
call_headers = [('Access-Control-Request-Headers', 'Access-Control-Allow-Headers'), ('Access-Control-Request-Method', 'Access-Control-Allow-Methods')]
for (req_header, res_header) in call_headers:
v = req.get_header(req_header)
if v:
self.response.set_header(res_header, v) # depends on [control=['if'], data=[]]
else:
raise CallError(400, 'Need {} header'.format(req_header)) # depends on [control=['for'], data=[]]
other_headers = {'Access-Control-Allow-Credentials': 'true', 'Access-Control-Max-Age': 3600}
self.response.add_headers(other_headers) |
def _perform_call(self, query_url, default_response=None, timeout=10):
"""Returns the raw results from the API"""
try:
response = requests.get(query_url, timeout=timeout)
except socket.timeout:
current_plugin.logger.warning("Timeout contacting Piwik server")
return default_response
except Exception:
current_plugin.logger.exception("Unable to connect")
return default_response
return response.content | def function[_perform_call, parameter[self, query_url, default_response, timeout]]:
constant[Returns the raw results from the API]
<ast.Try object at 0x7da1b0383fa0>
return[name[response].content] | keyword[def] identifier[_perform_call] ( identifier[self] , identifier[query_url] , identifier[default_response] = keyword[None] , identifier[timeout] = literal[int] ):
literal[string]
keyword[try] :
identifier[response] = identifier[requests] . identifier[get] ( identifier[query_url] , identifier[timeout] = identifier[timeout] )
keyword[except] identifier[socket] . identifier[timeout] :
identifier[current_plugin] . identifier[logger] . identifier[warning] ( literal[string] )
keyword[return] identifier[default_response]
keyword[except] identifier[Exception] :
identifier[current_plugin] . identifier[logger] . identifier[exception] ( literal[string] )
keyword[return] identifier[default_response]
keyword[return] identifier[response] . identifier[content] | def _perform_call(self, query_url, default_response=None, timeout=10):
"""Returns the raw results from the API"""
try:
response = requests.get(query_url, timeout=timeout) # depends on [control=['try'], data=[]]
except socket.timeout:
current_plugin.logger.warning('Timeout contacting Piwik server')
return default_response # depends on [control=['except'], data=[]]
except Exception:
current_plugin.logger.exception('Unable to connect')
return default_response # depends on [control=['except'], data=[]]
return response.content |
def import_locations(self, zone_file):
"""Parse zoneinfo zone description data files.
``import_locations()`` returns a list of :class:`Zone` objects.
It expects data files in one of the following formats::
AN +1211-06900 America/Curacao
AO -0848+01314 Africa/Luanda
AQ -7750+16636 Antarctica/McMurdo McMurdo Station, Ross Island
Files containing the data in this format can be found in the
:file:`zone.tab` file that is normally found in
:file:`/usr/share/zoneinfo` on UNIX-like systems, or from the `standard
distribution site`_.
When processed by ``import_locations()`` a ``list`` object of the
following style will be returned::
[Zone(None, None, "AN", "America/Curacao", None),
Zone(None, None, "AO", "Africa/Luanda", None),
Zone(None, None, "AO", "Antartica/McMurdo",
["McMurdo Station", "Ross Island"])]
Args:
zone_file (iter): ``zone.tab`` data to read
Returns:
list: Locations as :class:`Zone` objects
Raises:
FileFormatError: Unknown file format
.. _standard distribution site: ftp://elsie.nci.nih.gov/pub/
"""
self._zone_file = zone_file
field_names = ('country', 'location', 'zone', 'comments')
data = utils.prepare_csv_read(zone_file, field_names, delimiter=r" ")
for row in (x for x in data if not x['country'].startswith('#')):
if row['comments']:
row['comments'] = row['comments'].split(', ')
self.append(Zone(**row)) | def function[import_locations, parameter[self, zone_file]]:
constant[Parse zoneinfo zone description data files.
``import_locations()`` returns a list of :class:`Zone` objects.
It expects data files in one of the following formats::
AN +1211-06900 America/Curacao
AO -0848+01314 Africa/Luanda
AQ -7750+16636 Antarctica/McMurdo McMurdo Station, Ross Island
Files containing the data in this format can be found in the
:file:`zone.tab` file that is normally found in
:file:`/usr/share/zoneinfo` on UNIX-like systems, or from the `standard
distribution site`_.
When processed by ``import_locations()`` a ``list`` object of the
following style will be returned::
[Zone(None, None, "AN", "America/Curacao", None),
Zone(None, None, "AO", "Africa/Luanda", None),
Zone(None, None, "AO", "Antartica/McMurdo",
["McMurdo Station", "Ross Island"])]
Args:
zone_file (iter): ``zone.tab`` data to read
Returns:
list: Locations as :class:`Zone` objects
Raises:
FileFormatError: Unknown file format
.. _standard distribution site: ftp://elsie.nci.nih.gov/pub/
]
name[self]._zone_file assign[=] name[zone_file]
variable[field_names] assign[=] tuple[[<ast.Constant object at 0x7da2047eb880>, <ast.Constant object at 0x7da2047ead40>, <ast.Constant object at 0x7da2047eb1c0>, <ast.Constant object at 0x7da2047ebfd0>]]
variable[data] assign[=] call[name[utils].prepare_csv_read, parameter[name[zone_file], name[field_names]]]
for taget[name[row]] in starred[<ast.GeneratorExp object at 0x7da2047e97b0>] begin[:]
if call[name[row]][constant[comments]] begin[:]
call[name[row]][constant[comments]] assign[=] call[call[name[row]][constant[comments]].split, parameter[constant[, ]]]
call[name[self].append, parameter[call[name[Zone], parameter[]]]] | keyword[def] identifier[import_locations] ( identifier[self] , identifier[zone_file] ):
literal[string]
identifier[self] . identifier[_zone_file] = identifier[zone_file]
identifier[field_names] =( literal[string] , literal[string] , literal[string] , literal[string] )
identifier[data] = identifier[utils] . identifier[prepare_csv_read] ( identifier[zone_file] , identifier[field_names] , identifier[delimiter] = literal[string] )
keyword[for] identifier[row] keyword[in] ( identifier[x] keyword[for] identifier[x] keyword[in] identifier[data] keyword[if] keyword[not] identifier[x] [ literal[string] ]. identifier[startswith] ( literal[string] )):
keyword[if] identifier[row] [ literal[string] ]:
identifier[row] [ literal[string] ]= identifier[row] [ literal[string] ]. identifier[split] ( literal[string] )
identifier[self] . identifier[append] ( identifier[Zone] (** identifier[row] )) | def import_locations(self, zone_file):
"""Parse zoneinfo zone description data files.
``import_locations()`` returns a list of :class:`Zone` objects.
It expects data files in one of the following formats::
AN +1211-06900 America/Curacao
AO -0848+01314 Africa/Luanda
AQ -7750+16636 Antarctica/McMurdo McMurdo Station, Ross Island
Files containing the data in this format can be found in the
:file:`zone.tab` file that is normally found in
:file:`/usr/share/zoneinfo` on UNIX-like systems, or from the `standard
distribution site`_.
When processed by ``import_locations()`` a ``list`` object of the
following style will be returned::
[Zone(None, None, "AN", "America/Curacao", None),
Zone(None, None, "AO", "Africa/Luanda", None),
Zone(None, None, "AO", "Antartica/McMurdo",
["McMurdo Station", "Ross Island"])]
Args:
zone_file (iter): ``zone.tab`` data to read
Returns:
list: Locations as :class:`Zone` objects
Raises:
FileFormatError: Unknown file format
.. _standard distribution site: ftp://elsie.nci.nih.gov/pub/
"""
self._zone_file = zone_file
field_names = ('country', 'location', 'zone', 'comments')
data = utils.prepare_csv_read(zone_file, field_names, delimiter='\t')
for row in (x for x in data if not x['country'].startswith('#')):
if row['comments']:
row['comments'] = row['comments'].split(', ') # depends on [control=['if'], data=[]]
self.append(Zone(**row)) # depends on [control=['for'], data=['row']] |
def build_state(
cls,
db: BaseAtomicDB,
header: BlockHeader,
previous_hashes: Iterable[Hash32] = ()) -> BaseState:
"""
You probably want `VM().state` instead of this.
Occasionally, you want to build custom state against a particular header and DB,
even if you don't have the VM initialized. This is a convenience method to do that.
"""
execution_context = header.create_execution_context(previous_hashes)
return cls.get_state_class()(db, execution_context, header.state_root) | def function[build_state, parameter[cls, db, header, previous_hashes]]:
constant[
You probably want `VM().state` instead of this.
Occasionally, you want to build custom state against a particular header and DB,
even if you don't have the VM initialized. This is a convenience method to do that.
]
variable[execution_context] assign[=] call[name[header].create_execution_context, parameter[name[previous_hashes]]]
return[call[call[name[cls].get_state_class, parameter[]], parameter[name[db], name[execution_context], name[header].state_root]]] | keyword[def] identifier[build_state] (
identifier[cls] ,
identifier[db] : identifier[BaseAtomicDB] ,
identifier[header] : identifier[BlockHeader] ,
identifier[previous_hashes] : identifier[Iterable] [ identifier[Hash32] ]=())-> identifier[BaseState] :
literal[string]
identifier[execution_context] = identifier[header] . identifier[create_execution_context] ( identifier[previous_hashes] )
keyword[return] identifier[cls] . identifier[get_state_class] ()( identifier[db] , identifier[execution_context] , identifier[header] . identifier[state_root] ) | def build_state(cls, db: BaseAtomicDB, header: BlockHeader, previous_hashes: Iterable[Hash32]=()) -> BaseState:
"""
You probably want `VM().state` instead of this.
Occasionally, you want to build custom state against a particular header and DB,
even if you don't have the VM initialized. This is a convenience method to do that.
"""
execution_context = header.create_execution_context(previous_hashes)
return cls.get_state_class()(db, execution_context, header.state_root) |
def remove_api_key(self, api_id, stage_name):
"""
Remove a generated API key for api_id and stage_name
"""
response = self.apigateway_client.get_api_keys(
limit=1,
nameQuery='{}_{}'.format(stage_name, api_id)
)
for api_key in response.get('items'):
self.apigateway_client.delete_api_key(
apiKey="{}".format(api_key['id'])
) | def function[remove_api_key, parameter[self, api_id, stage_name]]:
constant[
Remove a generated API key for api_id and stage_name
]
variable[response] assign[=] call[name[self].apigateway_client.get_api_keys, parameter[]]
for taget[name[api_key]] in starred[call[name[response].get, parameter[constant[items]]]] begin[:]
call[name[self].apigateway_client.delete_api_key, parameter[]] | keyword[def] identifier[remove_api_key] ( identifier[self] , identifier[api_id] , identifier[stage_name] ):
literal[string]
identifier[response] = identifier[self] . identifier[apigateway_client] . identifier[get_api_keys] (
identifier[limit] = literal[int] ,
identifier[nameQuery] = literal[string] . identifier[format] ( identifier[stage_name] , identifier[api_id] )
)
keyword[for] identifier[api_key] keyword[in] identifier[response] . identifier[get] ( literal[string] ):
identifier[self] . identifier[apigateway_client] . identifier[delete_api_key] (
identifier[apiKey] = literal[string] . identifier[format] ( identifier[api_key] [ literal[string] ])
) | def remove_api_key(self, api_id, stage_name):
"""
Remove a generated API key for api_id and stage_name
"""
response = self.apigateway_client.get_api_keys(limit=1, nameQuery='{}_{}'.format(stage_name, api_id))
for api_key in response.get('items'):
self.apigateway_client.delete_api_key(apiKey='{}'.format(api_key['id'])) # depends on [control=['for'], data=['api_key']] |
def dpss(N, NW=None, k=None):
r"""Discrete prolate spheroidal (Slepian) sequences
Calculation of the Discrete Prolate Spheroidal Sequences also known as the
slepian sequences, and the corresponding eigenvalues.
:param int N: desired window length
:param float NW: The time half bandwidth parameter (typical values are
2.5,3,3.5,4).
:param int k: returns the first k Slepian sequences. If *k* is not
provided, *k* is set to *NW*2*.
:return:
* tapers, a matrix of tapering windows. Matrix is a N by *k* (k
is the number of windows)
* eigen, a vector of eigenvalues of length *k*
The discrete prolate spheroidal or Slepian sequences derive from the following
time-frequency concentration problem. For all finite-energy sequences index
limited to some set , which sequence maximizes the following ratio:
.. math::
\lambda = \frac{\int_{-W}^{W}\left| X(f) \right|^2 df}
{\int_{-F_s/2}^{F_s/2}\left| X(f) \right|^2 df}
where :math:`F_s` is the sampling frequency and :math:`|W| < F_s/2`.
This ratio determines which index-limited sequence has the largest proportion of its
energy in the band :math:`[-W,W]` with :math:`0 < \lambda < 1`.
The sequence maximizing the ratio is the first
discrete prolate spheroidal or Slepian sequence. The second Slepian sequence
maximizes the ratio and is orthogonal to the first Slepian sequence. The third
Slepian sequence maximizes the ratio of integrals and is orthogonal to both
the first and second Slepian sequences and so on.
.. note:: Note about the implementation. Since the slepian generation is
computationally expensive, we use a C implementation based on the C
code written by Lees as published in:
Lees, J. M. and J. Park (1995): Multiple-taper spectral analysis: A stand-alone
C-subroutine: Computers & Geology: 21, 199-236.
However, the original C code has been trimmed. Indeed, we only require the
multitap function (that depends on jtridib, jtinvit functions only).
.. plot::
:width: 80%
:include-source:
from spectrum import *
from pylab import *
N = 512
[w, eigens] = dpss(N, 2.5, 4)
plot(w)
title('Slepian Sequences N=%s, NW=2.5' % N)
axis([0, N, -0.15, 0.15])
legend(['1st window','2nd window','3rd window','4th window'])
Windows are normalised:
.. math:: \sum_k h_k h_k = 1
:references: [Percival]_
Slepian, D. Prolate spheroidal wave functions, Fourier analysis, and
uncertainty V: The discrete case. Bell System Technical Journal,
Volume 57 (1978), 1371430
.. note:: the C code to create the slepian windows is extracted from original C code
from Lees and Park (1995) and uses the conventions of Percival and Walden (1993).
Functions that are not used here were removed.
"""
assert NW < N/2 , "NW ({}) must be stricly less than N/2 ({}/2)".format(NW, N)
if k is None:
k = min(round(2*NW),N)
k = int(max(k,1))
from numpy import dot, zeros, arange, sqrt
mtspeclib.multitap.restype = None
lam = zeros(k, dtype=float)
tapers = zeros(k*N, dtype=float)
tapsum = zeros(k, dtype=float)
res = mtspeclib.multitap(
c_int(N),
c_int(k),
lam.ctypes.data_as(c_void_p),
c_float(NW),
tapers.ctypes.data_as(c_void_p),
tapsum.ctypes.data_as(c_void_p),
)
# normalisation by sqtr(N). It is required to have normalised windows
tapers = tapers.reshape(k,N).transpose() / sqrt(N)
for i in range(k):
# By convention (Percival and Walden, 1993 pg 379)
# * symmetric tapers (k=0,2,4,...) should have a positive average.
# * antisymmetric tapers should begin with a positive lobe
if i%2 == 0:
if tapsum[i]<0:
tapsum[i] *= -1
tapers[:,i] *= -1
else:
if tapers[0,i] < 0:
tapsum[i] *= -1
tapers[:,i] *= -1
# Now find the eigenvalues of the original
# Use the autocovariance sequence technique from Percival and Walden, 1993
# pg 390 to get the eigenvalues more precisely (same as matlab output)
# The values returned in lam are not exacly the same as in the following methods.
acvs = _autocov(tapers.transpose(), debias=False) * N
nidx = arange(N)
W = float(NW)/N
r = 4*W*np.sinc(2*W*nidx)
r[0] = 2*W
eigvals = dot(acvs, r)
#return (tapers, lam)
return [tapers, eigvals] | def function[dpss, parameter[N, NW, k]]:
constant[Discrete prolate spheroidal (Slepian) sequences
Calculation of the Discrete Prolate Spheroidal Sequences also known as the
slepian sequences, and the corresponding eigenvalues.
:param int N: desired window length
:param float NW: The time half bandwidth parameter (typical values are
2.5,3,3.5,4).
:param int k: returns the first k Slepian sequences. If *k* is not
provided, *k* is set to *NW*2*.
:return:
* tapers, a matrix of tapering windows. Matrix is a N by *k* (k
is the number of windows)
* eigen, a vector of eigenvalues of length *k*
The discrete prolate spheroidal or Slepian sequences derive from the following
time-frequency concentration problem. For all finite-energy sequences index
limited to some set , which sequence maximizes the following ratio:
.. math::
\lambda = \frac{\int_{-W}^{W}\left| X(f) \right|^2 df}
{\int_{-F_s/2}^{F_s/2}\left| X(f) \right|^2 df}
where :math:`F_s` is the sampling frequency and :math:`|W| < F_s/2`.
This ratio determines which index-limited sequence has the largest proportion of its
energy in the band :math:`[-W,W]` with :math:`0 < \lambda < 1`.
The sequence maximizing the ratio is the first
discrete prolate spheroidal or Slepian sequence. The second Slepian sequence
maximizes the ratio and is orthogonal to the first Slepian sequence. The third
Slepian sequence maximizes the ratio of integrals and is orthogonal to both
the first and second Slepian sequences and so on.
.. note:: Note about the implementation. Since the slepian generation is
computationally expensive, we use a C implementation based on the C
code written by Lees as published in:
Lees, J. M. and J. Park (1995): Multiple-taper spectral analysis: A stand-alone
C-subroutine: Computers & Geology: 21, 199-236.
However, the original C code has been trimmed. Indeed, we only require the
multitap function (that depends on jtridib, jtinvit functions only).
.. plot::
:width: 80%
:include-source:
from spectrum import *
from pylab import *
N = 512
[w, eigens] = dpss(N, 2.5, 4)
plot(w)
title('Slepian Sequences N=%s, NW=2.5' % N)
axis([0, N, -0.15, 0.15])
legend(['1st window','2nd window','3rd window','4th window'])
Windows are normalised:
.. math:: \sum_k h_k h_k = 1
:references: [Percival]_
Slepian, D. Prolate spheroidal wave functions, Fourier analysis, and
uncertainty V: The discrete case. Bell System Technical Journal,
Volume 57 (1978), 1371430
.. note:: the C code to create the slepian windows is extracted from original C code
from Lees and Park (1995) and uses the conventions of Percival and Walden (1993).
Functions that are not used here were removed.
]
assert[compare[name[NW] less[<] binary_operation[name[N] / constant[2]]]]
if compare[name[k] is constant[None]] begin[:]
variable[k] assign[=] call[name[min], parameter[call[name[round], parameter[binary_operation[constant[2] * name[NW]]]], name[N]]]
variable[k] assign[=] call[name[int], parameter[call[name[max], parameter[name[k], constant[1]]]]]
from relative_module[numpy] import module[dot], module[zeros], module[arange], module[sqrt]
name[mtspeclib].multitap.restype assign[=] constant[None]
variable[lam] assign[=] call[name[zeros], parameter[name[k]]]
variable[tapers] assign[=] call[name[zeros], parameter[binary_operation[name[k] * name[N]]]]
variable[tapsum] assign[=] call[name[zeros], parameter[name[k]]]
variable[res] assign[=] call[name[mtspeclib].multitap, parameter[call[name[c_int], parameter[name[N]]], call[name[c_int], parameter[name[k]]], call[name[lam].ctypes.data_as, parameter[name[c_void_p]]], call[name[c_float], parameter[name[NW]]], call[name[tapers].ctypes.data_as, parameter[name[c_void_p]]], call[name[tapsum].ctypes.data_as, parameter[name[c_void_p]]]]]
variable[tapers] assign[=] binary_operation[call[call[name[tapers].reshape, parameter[name[k], name[N]]].transpose, parameter[]] / call[name[sqrt], parameter[name[N]]]]
for taget[name[i]] in starred[call[name[range], parameter[name[k]]]] begin[:]
if compare[binary_operation[name[i] <ast.Mod object at 0x7da2590d6920> constant[2]] equal[==] constant[0]] begin[:]
if compare[call[name[tapsum]][name[i]] less[<] constant[0]] begin[:]
<ast.AugAssign object at 0x7da18f09fb80>
<ast.AugAssign object at 0x7da18f09fb20>
variable[acvs] assign[=] binary_operation[call[name[_autocov], parameter[call[name[tapers].transpose, parameter[]]]] * name[N]]
variable[nidx] assign[=] call[name[arange], parameter[name[N]]]
variable[W] assign[=] binary_operation[call[name[float], parameter[name[NW]]] / name[N]]
variable[r] assign[=] binary_operation[binary_operation[constant[4] * name[W]] * call[name[np].sinc, parameter[binary_operation[binary_operation[constant[2] * name[W]] * name[nidx]]]]]
call[name[r]][constant[0]] assign[=] binary_operation[constant[2] * name[W]]
variable[eigvals] assign[=] call[name[dot], parameter[name[acvs], name[r]]]
return[list[[<ast.Name object at 0x7da18f09eb90>, <ast.Name object at 0x7da18f09e4d0>]]] | keyword[def] identifier[dpss] ( identifier[N] , identifier[NW] = keyword[None] , identifier[k] = keyword[None] ):
literal[string]
keyword[assert] identifier[NW] < identifier[N] / literal[int] , literal[string] . identifier[format] ( identifier[NW] , identifier[N] )
keyword[if] identifier[k] keyword[is] keyword[None] :
identifier[k] = identifier[min] ( identifier[round] ( literal[int] * identifier[NW] ), identifier[N] )
identifier[k] = identifier[int] ( identifier[max] ( identifier[k] , literal[int] ))
keyword[from] identifier[numpy] keyword[import] identifier[dot] , identifier[zeros] , identifier[arange] , identifier[sqrt]
identifier[mtspeclib] . identifier[multitap] . identifier[restype] = keyword[None]
identifier[lam] = identifier[zeros] ( identifier[k] , identifier[dtype] = identifier[float] )
identifier[tapers] = identifier[zeros] ( identifier[k] * identifier[N] , identifier[dtype] = identifier[float] )
identifier[tapsum] = identifier[zeros] ( identifier[k] , identifier[dtype] = identifier[float] )
identifier[res] = identifier[mtspeclib] . identifier[multitap] (
identifier[c_int] ( identifier[N] ),
identifier[c_int] ( identifier[k] ),
identifier[lam] . identifier[ctypes] . identifier[data_as] ( identifier[c_void_p] ),
identifier[c_float] ( identifier[NW] ),
identifier[tapers] . identifier[ctypes] . identifier[data_as] ( identifier[c_void_p] ),
identifier[tapsum] . identifier[ctypes] . identifier[data_as] ( identifier[c_void_p] ),
)
identifier[tapers] = identifier[tapers] . identifier[reshape] ( identifier[k] , identifier[N] ). identifier[transpose] ()/ identifier[sqrt] ( identifier[N] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[k] ):
keyword[if] identifier[i] % literal[int] == literal[int] :
keyword[if] identifier[tapsum] [ identifier[i] ]< literal[int] :
identifier[tapsum] [ identifier[i] ]*=- literal[int]
identifier[tapers] [:, identifier[i] ]*=- literal[int]
keyword[else] :
keyword[if] identifier[tapers] [ literal[int] , identifier[i] ]< literal[int] :
identifier[tapsum] [ identifier[i] ]*=- literal[int]
identifier[tapers] [:, identifier[i] ]*=- literal[int]
identifier[acvs] = identifier[_autocov] ( identifier[tapers] . identifier[transpose] (), identifier[debias] = keyword[False] )* identifier[N]
identifier[nidx] = identifier[arange] ( identifier[N] )
identifier[W] = identifier[float] ( identifier[NW] )/ identifier[N]
identifier[r] = literal[int] * identifier[W] * identifier[np] . identifier[sinc] ( literal[int] * identifier[W] * identifier[nidx] )
identifier[r] [ literal[int] ]= literal[int] * identifier[W]
identifier[eigvals] = identifier[dot] ( identifier[acvs] , identifier[r] )
keyword[return] [ identifier[tapers] , identifier[eigvals] ] | def dpss(N, NW=None, k=None):
"""Discrete prolate spheroidal (Slepian) sequences
Calculation of the Discrete Prolate Spheroidal Sequences also known as the
slepian sequences, and the corresponding eigenvalues.
:param int N: desired window length
:param float NW: The time half bandwidth parameter (typical values are
2.5,3,3.5,4).
:param int k: returns the first k Slepian sequences. If *k* is not
provided, *k* is set to *NW*2*.
:return:
* tapers, a matrix of tapering windows. Matrix is a N by *k* (k
is the number of windows)
* eigen, a vector of eigenvalues of length *k*
The discrete prolate spheroidal or Slepian sequences derive from the following
time-frequency concentration problem. For all finite-energy sequences index
limited to some set , which sequence maximizes the following ratio:
.. math::
\\lambda = \\frac{\\int_{-W}^{W}\\left| X(f) \\right|^2 df}
{\\int_{-F_s/2}^{F_s/2}\\left| X(f) \\right|^2 df}
where :math:`F_s` is the sampling frequency and :math:`|W| < F_s/2`.
This ratio determines which index-limited sequence has the largest proportion of its
energy in the band :math:`[-W,W]` with :math:`0 < \\lambda < 1`.
The sequence maximizing the ratio is the first
discrete prolate spheroidal or Slepian sequence. The second Slepian sequence
maximizes the ratio and is orthogonal to the first Slepian sequence. The third
Slepian sequence maximizes the ratio of integrals and is orthogonal to both
the first and second Slepian sequences and so on.
.. note:: Note about the implementation. Since the slepian generation is
computationally expensive, we use a C implementation based on the C
code written by Lees as published in:
Lees, J. M. and J. Park (1995): Multiple-taper spectral analysis: A stand-alone
C-subroutine: Computers & Geology: 21, 199-236.
However, the original C code has been trimmed. Indeed, we only require the
multitap function (that depends on jtridib, jtinvit functions only).
.. plot::
:width: 80%
:include-source:
from spectrum import *
from pylab import *
N = 512
[w, eigens] = dpss(N, 2.5, 4)
plot(w)
title('Slepian Sequences N=%s, NW=2.5' % N)
axis([0, N, -0.15, 0.15])
legend(['1st window','2nd window','3rd window','4th window'])
Windows are normalised:
.. math:: \\sum_k h_k h_k = 1
:references: [Percival]_
Slepian, D. Prolate spheroidal wave functions, Fourier analysis, and
uncertainty V: The discrete case. Bell System Technical Journal,
Volume 57 (1978), 1371430
.. note:: the C code to create the slepian windows is extracted from original C code
from Lees and Park (1995) and uses the conventions of Percival and Walden (1993).
Functions that are not used here were removed.
"""
assert NW < N / 2, 'NW ({}) must be stricly less than N/2 ({}/2)'.format(NW, N)
if k is None:
k = min(round(2 * NW), N)
k = int(max(k, 1)) # depends on [control=['if'], data=['k']]
from numpy import dot, zeros, arange, sqrt
mtspeclib.multitap.restype = None
lam = zeros(k, dtype=float)
tapers = zeros(k * N, dtype=float)
tapsum = zeros(k, dtype=float)
res = mtspeclib.multitap(c_int(N), c_int(k), lam.ctypes.data_as(c_void_p), c_float(NW), tapers.ctypes.data_as(c_void_p), tapsum.ctypes.data_as(c_void_p))
# normalisation by sqtr(N). It is required to have normalised windows
tapers = tapers.reshape(k, N).transpose() / sqrt(N)
for i in range(k):
# By convention (Percival and Walden, 1993 pg 379)
# * symmetric tapers (k=0,2,4,...) should have a positive average.
# * antisymmetric tapers should begin with a positive lobe
if i % 2 == 0:
if tapsum[i] < 0:
tapsum[i] *= -1
tapers[:, i] *= -1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif tapers[0, i] < 0:
tapsum[i] *= -1
tapers[:, i] *= -1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
# Now find the eigenvalues of the original
# Use the autocovariance sequence technique from Percival and Walden, 1993
# pg 390 to get the eigenvalues more precisely (same as matlab output)
# The values returned in lam are not exacly the same as in the following methods.
acvs = _autocov(tapers.transpose(), debias=False) * N
nidx = arange(N)
W = float(NW) / N
r = 4 * W * np.sinc(2 * W * nidx)
r[0] = 2 * W
eigvals = dot(acvs, r)
#return (tapers, lam)
return [tapers, eigvals] |
def create_dns(self):
"""Create DNS for the defined app and environment."""
utils.banner("Creating DNS")
elb_subnet = self.configs[self.env]['elb']['subnet_purpose']
regions = self.configs[self.env]['regions']
failover = self.configs[self.env]['dns']['failover_dns']
primary_region = self.configs['pipeline']['primary_region']
regionspecific_dns = self.configs[self.env]['dns']['region_specific']
dnsobj = dns.SpinnakerDns(
app=self.app, env=self.env, region=self.region, prop_path=self.json_path, elb_subnet=elb_subnet)
if len(regions) > 1 and failover:
dnsobj.create_elb_dns(regionspecific=True)
dnsobj.create_failover_dns(primary_region=primary_region)
else:
if regionspecific_dns:
dnsobj.create_elb_dns(regionspecific=True)
if self.region == primary_region:
dnsobj.create_elb_dns(regionspecific=False) | def function[create_dns, parameter[self]]:
constant[Create DNS for the defined app and environment.]
call[name[utils].banner, parameter[constant[Creating DNS]]]
variable[elb_subnet] assign[=] call[call[call[name[self].configs][name[self].env]][constant[elb]]][constant[subnet_purpose]]
variable[regions] assign[=] call[call[name[self].configs][name[self].env]][constant[regions]]
variable[failover] assign[=] call[call[call[name[self].configs][name[self].env]][constant[dns]]][constant[failover_dns]]
variable[primary_region] assign[=] call[call[name[self].configs][constant[pipeline]]][constant[primary_region]]
variable[regionspecific_dns] assign[=] call[call[call[name[self].configs][name[self].env]][constant[dns]]][constant[region_specific]]
variable[dnsobj] assign[=] call[name[dns].SpinnakerDns, parameter[]]
if <ast.BoolOp object at 0x7da20c794610> begin[:]
call[name[dnsobj].create_elb_dns, parameter[]]
call[name[dnsobj].create_failover_dns, parameter[]] | keyword[def] identifier[create_dns] ( identifier[self] ):
literal[string]
identifier[utils] . identifier[banner] ( literal[string] )
identifier[elb_subnet] = identifier[self] . identifier[configs] [ identifier[self] . identifier[env] ][ literal[string] ][ literal[string] ]
identifier[regions] = identifier[self] . identifier[configs] [ identifier[self] . identifier[env] ][ literal[string] ]
identifier[failover] = identifier[self] . identifier[configs] [ identifier[self] . identifier[env] ][ literal[string] ][ literal[string] ]
identifier[primary_region] = identifier[self] . identifier[configs] [ literal[string] ][ literal[string] ]
identifier[regionspecific_dns] = identifier[self] . identifier[configs] [ identifier[self] . identifier[env] ][ literal[string] ][ literal[string] ]
identifier[dnsobj] = identifier[dns] . identifier[SpinnakerDns] (
identifier[app] = identifier[self] . identifier[app] , identifier[env] = identifier[self] . identifier[env] , identifier[region] = identifier[self] . identifier[region] , identifier[prop_path] = identifier[self] . identifier[json_path] , identifier[elb_subnet] = identifier[elb_subnet] )
keyword[if] identifier[len] ( identifier[regions] )> literal[int] keyword[and] identifier[failover] :
identifier[dnsobj] . identifier[create_elb_dns] ( identifier[regionspecific] = keyword[True] )
identifier[dnsobj] . identifier[create_failover_dns] ( identifier[primary_region] = identifier[primary_region] )
keyword[else] :
keyword[if] identifier[regionspecific_dns] :
identifier[dnsobj] . identifier[create_elb_dns] ( identifier[regionspecific] = keyword[True] )
keyword[if] identifier[self] . identifier[region] == identifier[primary_region] :
identifier[dnsobj] . identifier[create_elb_dns] ( identifier[regionspecific] = keyword[False] ) | def create_dns(self):
"""Create DNS for the defined app and environment."""
utils.banner('Creating DNS')
elb_subnet = self.configs[self.env]['elb']['subnet_purpose']
regions = self.configs[self.env]['regions']
failover = self.configs[self.env]['dns']['failover_dns']
primary_region = self.configs['pipeline']['primary_region']
regionspecific_dns = self.configs[self.env]['dns']['region_specific']
dnsobj = dns.SpinnakerDns(app=self.app, env=self.env, region=self.region, prop_path=self.json_path, elb_subnet=elb_subnet)
if len(regions) > 1 and failover:
dnsobj.create_elb_dns(regionspecific=True)
dnsobj.create_failover_dns(primary_region=primary_region) # depends on [control=['if'], data=[]]
else:
if regionspecific_dns:
dnsobj.create_elb_dns(regionspecific=True) # depends on [control=['if'], data=[]]
if self.region == primary_region:
dnsobj.create_elb_dns(regionspecific=False) # depends on [control=['if'], data=[]] |
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# Download images and annotations that come in separate archives.
# Note, that the extension of archives is .tar.gz even though the actual
# archives format is uncompressed tar.
dl_paths = dl_manager.download_and_extract({
"images": tfds.download.Resource(
url=os.path.join(_BASE_URL, "102flowers.tgz"),
extract_method=tfds.download.ExtractMethod.TAR),
"labels": os.path.join(_BASE_URL, "imagelabels.mat"),
"setid": os.path.join(_BASE_URL, "setid.mat"),
})
gen_kwargs = dict(
images_dir_path=os.path.join(dl_paths["images"], "jpg"),
labels_path=dl_paths["labels"],
setid_path=dl_paths["setid"],
)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=1,
gen_kwargs=dict(split_name="trnid", **gen_kwargs)),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
num_shards=1,
gen_kwargs=dict(split_name="tstid", **gen_kwargs)),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
num_shards=1,
gen_kwargs=dict(split_name="valid", **gen_kwargs)),
] | def function[_split_generators, parameter[self, dl_manager]]:
constant[Returns SplitGenerators.]
variable[dl_paths] assign[=] call[name[dl_manager].download_and_extract, parameter[dictionary[[<ast.Constant object at 0x7da1b2006f80>, <ast.Constant object at 0x7da1b20060b0>, <ast.Constant object at 0x7da1b2006ec0>], [<ast.Call object at 0x7da1b2005a20>, <ast.Call object at 0x7da1b2006260>, <ast.Call object at 0x7da1b20070a0>]]]]
variable[gen_kwargs] assign[=] call[name[dict], parameter[]]
return[list[[<ast.Call object at 0x7da1b2005150>, <ast.Call object at 0x7da1b2006770>, <ast.Call object at 0x7da1b2006b90>]]] | keyword[def] identifier[_split_generators] ( identifier[self] , identifier[dl_manager] ):
literal[string]
identifier[dl_paths] = identifier[dl_manager] . identifier[download_and_extract] ({
literal[string] : identifier[tfds] . identifier[download] . identifier[Resource] (
identifier[url] = identifier[os] . identifier[path] . identifier[join] ( identifier[_BASE_URL] , literal[string] ),
identifier[extract_method] = identifier[tfds] . identifier[download] . identifier[ExtractMethod] . identifier[TAR] ),
literal[string] : identifier[os] . identifier[path] . identifier[join] ( identifier[_BASE_URL] , literal[string] ),
literal[string] : identifier[os] . identifier[path] . identifier[join] ( identifier[_BASE_URL] , literal[string] ),
})
identifier[gen_kwargs] = identifier[dict] (
identifier[images_dir_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[dl_paths] [ literal[string] ], literal[string] ),
identifier[labels_path] = identifier[dl_paths] [ literal[string] ],
identifier[setid_path] = identifier[dl_paths] [ literal[string] ],
)
keyword[return] [
identifier[tfds] . identifier[core] . identifier[SplitGenerator] (
identifier[name] = identifier[tfds] . identifier[Split] . identifier[TRAIN] ,
identifier[num_shards] = literal[int] ,
identifier[gen_kwargs] = identifier[dict] ( identifier[split_name] = literal[string] ,** identifier[gen_kwargs] )),
identifier[tfds] . identifier[core] . identifier[SplitGenerator] (
identifier[name] = identifier[tfds] . identifier[Split] . identifier[TEST] ,
identifier[num_shards] = literal[int] ,
identifier[gen_kwargs] = identifier[dict] ( identifier[split_name] = literal[string] ,** identifier[gen_kwargs] )),
identifier[tfds] . identifier[core] . identifier[SplitGenerator] (
identifier[name] = identifier[tfds] . identifier[Split] . identifier[VALIDATION] ,
identifier[num_shards] = literal[int] ,
identifier[gen_kwargs] = identifier[dict] ( identifier[split_name] = literal[string] ,** identifier[gen_kwargs] )),
] | def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# Download images and annotations that come in separate archives.
# Note, that the extension of archives is .tar.gz even though the actual
# archives format is uncompressed tar.
dl_paths = dl_manager.download_and_extract({'images': tfds.download.Resource(url=os.path.join(_BASE_URL, '102flowers.tgz'), extract_method=tfds.download.ExtractMethod.TAR), 'labels': os.path.join(_BASE_URL, 'imagelabels.mat'), 'setid': os.path.join(_BASE_URL, 'setid.mat')})
gen_kwargs = dict(images_dir_path=os.path.join(dl_paths['images'], 'jpg'), labels_path=dl_paths['labels'], setid_path=dl_paths['setid'])
return [tfds.core.SplitGenerator(name=tfds.Split.TRAIN, num_shards=1, gen_kwargs=dict(split_name='trnid', **gen_kwargs)), tfds.core.SplitGenerator(name=tfds.Split.TEST, num_shards=1, gen_kwargs=dict(split_name='tstid', **gen_kwargs)), tfds.core.SplitGenerator(name=tfds.Split.VALIDATION, num_shards=1, gen_kwargs=dict(split_name='valid', **gen_kwargs))] |
def up(self):
''' Bring up the bridge interface. Equivalent to ifconfig [iface] up. '''
# Get existing device flags
ifreq = struct.pack('16sh', self.name, 0)
flags = struct.unpack('16sh', fcntl.ioctl(sockfd, SIOCGIFFLAGS, ifreq))[1]
# Set new flags
flags = flags | IFF_UP
ifreq = struct.pack('16sh', self.name, flags)
fcntl.ioctl(sockfd, SIOCSIFFLAGS, ifreq) | def function[up, parameter[self]]:
constant[ Bring up the bridge interface. Equivalent to ifconfig [iface] up. ]
variable[ifreq] assign[=] call[name[struct].pack, parameter[constant[16sh], name[self].name, constant[0]]]
variable[flags] assign[=] call[call[name[struct].unpack, parameter[constant[16sh], call[name[fcntl].ioctl, parameter[name[sockfd], name[SIOCGIFFLAGS], name[ifreq]]]]]][constant[1]]
variable[flags] assign[=] binary_operation[name[flags] <ast.BitOr object at 0x7da2590d6aa0> name[IFF_UP]]
variable[ifreq] assign[=] call[name[struct].pack, parameter[constant[16sh], name[self].name, name[flags]]]
call[name[fcntl].ioctl, parameter[name[sockfd], name[SIOCSIFFLAGS], name[ifreq]]] | keyword[def] identifier[up] ( identifier[self] ):
literal[string]
identifier[ifreq] = identifier[struct] . identifier[pack] ( literal[string] , identifier[self] . identifier[name] , literal[int] )
identifier[flags] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[fcntl] . identifier[ioctl] ( identifier[sockfd] , identifier[SIOCGIFFLAGS] , identifier[ifreq] ))[ literal[int] ]
identifier[flags] = identifier[flags] | identifier[IFF_UP]
identifier[ifreq] = identifier[struct] . identifier[pack] ( literal[string] , identifier[self] . identifier[name] , identifier[flags] )
identifier[fcntl] . identifier[ioctl] ( identifier[sockfd] , identifier[SIOCSIFFLAGS] , identifier[ifreq] ) | def up(self):
""" Bring up the bridge interface. Equivalent to ifconfig [iface] up. """
# Get existing device flags
ifreq = struct.pack('16sh', self.name, 0)
flags = struct.unpack('16sh', fcntl.ioctl(sockfd, SIOCGIFFLAGS, ifreq))[1]
# Set new flags
flags = flags | IFF_UP
ifreq = struct.pack('16sh', self.name, flags)
fcntl.ioctl(sockfd, SIOCSIFFLAGS, ifreq) |
def bind_to_channel(self):
"""
Binds (subscribes) users private exchange to channel exchange
Automatically called at creation of subscription record.
"""
if self.channel.code_name != self.user.prv_exchange:
channel = self._connect_mq()
channel.exchange_bind(source=self.channel.code_name, destination=self.user.prv_exchange) | def function[bind_to_channel, parameter[self]]:
constant[
Binds (subscribes) users private exchange to channel exchange
Automatically called at creation of subscription record.
]
if compare[name[self].channel.code_name not_equal[!=] name[self].user.prv_exchange] begin[:]
variable[channel] assign[=] call[name[self]._connect_mq, parameter[]]
call[name[channel].exchange_bind, parameter[]] | keyword[def] identifier[bind_to_channel] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[channel] . identifier[code_name] != identifier[self] . identifier[user] . identifier[prv_exchange] :
identifier[channel] = identifier[self] . identifier[_connect_mq] ()
identifier[channel] . identifier[exchange_bind] ( identifier[source] = identifier[self] . identifier[channel] . identifier[code_name] , identifier[destination] = identifier[self] . identifier[user] . identifier[prv_exchange] ) | def bind_to_channel(self):
"""
Binds (subscribes) users private exchange to channel exchange
Automatically called at creation of subscription record.
"""
if self.channel.code_name != self.user.prv_exchange:
channel = self._connect_mq()
channel.exchange_bind(source=self.channel.code_name, destination=self.user.prv_exchange) # depends on [control=['if'], data=[]] |
def set_reverb(self, roomsize=-1.0, damping=-1.0, width=-1.0, level=-1.0):
"""
roomsize Reverb room size value (0.0-1.2)
damping Reverb damping value (0.0-1.0)
width Reverb width value (0.0-100.0)
level Reverb level value (0.0-1.0)
"""
set=0
if roomsize>=0:
set+=0b0001
if damping>=0:
set+=0b0010
if width>=0:
set+=0b0100
if level>=0:
set+=0b1000
return fluid_synth_set_reverb_full(self.synth, set, roomsize, damping, width, level) | def function[set_reverb, parameter[self, roomsize, damping, width, level]]:
constant[
roomsize Reverb room size value (0.0-1.2)
damping Reverb damping value (0.0-1.0)
width Reverb width value (0.0-100.0)
level Reverb level value (0.0-1.0)
]
variable[set] assign[=] constant[0]
if compare[name[roomsize] greater_or_equal[>=] constant[0]] begin[:]
<ast.AugAssign object at 0x7da20e9b1a50>
if compare[name[damping] greater_or_equal[>=] constant[0]] begin[:]
<ast.AugAssign object at 0x7da20e9b1270>
if compare[name[width] greater_or_equal[>=] constant[0]] begin[:]
<ast.AugAssign object at 0x7da20e9b3e20>
if compare[name[level] greater_or_equal[>=] constant[0]] begin[:]
<ast.AugAssign object at 0x7da20e9b1ba0>
return[call[name[fluid_synth_set_reverb_full], parameter[name[self].synth, name[set], name[roomsize], name[damping], name[width], name[level]]]] | keyword[def] identifier[set_reverb] ( identifier[self] , identifier[roomsize] =- literal[int] , identifier[damping] =- literal[int] , identifier[width] =- literal[int] , identifier[level] =- literal[int] ):
literal[string]
identifier[set] = literal[int]
keyword[if] identifier[roomsize] >= literal[int] :
identifier[set] += literal[int]
keyword[if] identifier[damping] >= literal[int] :
identifier[set] += literal[int]
keyword[if] identifier[width] >= literal[int] :
identifier[set] += literal[int]
keyword[if] identifier[level] >= literal[int] :
identifier[set] += literal[int]
keyword[return] identifier[fluid_synth_set_reverb_full] ( identifier[self] . identifier[synth] , identifier[set] , identifier[roomsize] , identifier[damping] , identifier[width] , identifier[level] ) | def set_reverb(self, roomsize=-1.0, damping=-1.0, width=-1.0, level=-1.0):
"""
roomsize Reverb room size value (0.0-1.2)
damping Reverb damping value (0.0-1.0)
width Reverb width value (0.0-100.0)
level Reverb level value (0.0-1.0)
"""
set = 0
if roomsize >= 0:
set += 1 # depends on [control=['if'], data=[]]
if damping >= 0:
set += 2 # depends on [control=['if'], data=[]]
if width >= 0:
set += 4 # depends on [control=['if'], data=[]]
if level >= 0:
set += 8 # depends on [control=['if'], data=[]]
return fluid_synth_set_reverb_full(self.synth, set, roomsize, damping, width, level) |
def static(self):
"""
Configures the server to use a static IP.
"""
fn = self.render_to_file('ip/ip_interfaces_static.template')
r = self.local_renderer
r.put(local_path=fn, remote_path=r.env.interfaces_fn, use_sudo=True) | def function[static, parameter[self]]:
constant[
Configures the server to use a static IP.
]
variable[fn] assign[=] call[name[self].render_to_file, parameter[constant[ip/ip_interfaces_static.template]]]
variable[r] assign[=] name[self].local_renderer
call[name[r].put, parameter[]] | keyword[def] identifier[static] ( identifier[self] ):
literal[string]
identifier[fn] = identifier[self] . identifier[render_to_file] ( literal[string] )
identifier[r] = identifier[self] . identifier[local_renderer]
identifier[r] . identifier[put] ( identifier[local_path] = identifier[fn] , identifier[remote_path] = identifier[r] . identifier[env] . identifier[interfaces_fn] , identifier[use_sudo] = keyword[True] ) | def static(self):
"""
Configures the server to use a static IP.
"""
fn = self.render_to_file('ip/ip_interfaces_static.template')
r = self.local_renderer
r.put(local_path=fn, remote_path=r.env.interfaces_fn, use_sudo=True) |
def compare_states(self, sl, sr):
"""
Compares two states for similarity.
"""
joint_solver = claripy.Solver()
# make sure the canonicalized constraints are the same
n_map, n_counter, n_canon_constraint = claripy.And(*sr.solver.constraints).canonicalize() #pylint:disable=no-member
u_map, u_counter, u_canon_constraint = claripy.And(*sl.solver.constraints).canonicalize() #pylint:disable=no-member
n_canoner_constraint = sr.solver.simplify(n_canon_constraint)
u_canoner_constraint = sl.solver.simplify(u_canon_constraint)
joint_solver.add((n_canoner_constraint, u_canoner_constraint))
if n_canoner_constraint is not u_canoner_constraint:
self._report_incongruency("Different constraints!")
return False
# get the differences in registers and memory
mem_diff = sr.memory.changed_bytes(sl.memory)
reg_diff = sr.registers.changed_bytes(sl.registers)
# this is only for unicorn
if "UNICORN" in sl.options or "UNICORN" in sr.options:
if sl.arch.name == "X86":
reg_diff -= set(range(40, 52)) #ignore cc psuedoregisters
reg_diff -= set(range(320, 324)) #some other VEX weirdness
reg_diff -= set(range(340, 344)) #ip_at_syscall
elif sl.arch.name == "AMD64":
reg_diff -= set(range(144, 168)) #ignore cc psuedoregisters
# make sure the differences in registers and memory are actually just renamed
# versions of the same ASTs
for diffs,(um,nm) in (
(reg_diff, (sl.registers, sr.registers)),
(mem_diff, (sl.memory, sr.memory)),
):
for i in diffs:
bn = nm.load(i, 1)
bu = um.load(i, 1)
bnc = bn.canonicalize(var_map=n_map, counter=n_counter)[-1]
buc = bu.canonicalize(var_map=u_map, counter=u_counter)[-1]
if bnc is not buc:
self._report_incongruency("Different memory or registers (index %d, values %r and %r)!", i, bn, bu)
return False
# make sure the flags are the same
if sl.arch.name in ("AMD64", "X86", "ARM", "ARMEL", "ARMHF", "AARCH64"):
# pylint: disable=unused-variable
n_bkp = sr.regs.cc_op, sr.regs.cc_dep1, sr.regs.cc_dep2, sr.regs.cc_ndep
u_bkp = sl.regs.cc_op, sl.regs.cc_dep1, sl.regs.cc_dep2, sl.regs.cc_ndep
if sl.arch.name in ('AMD64', 'X86'):
n_flags = sr.regs.eflags.canonicalize(var_map=n_map, counter=n_counter)[-1]
u_flags = sl.regs.eflags.canonicalize(var_map=u_map, counter=u_counter)[-1]
else:
n_flags = sr.regs.flags.canonicalize(var_map=n_map, counter=n_counter)[-1]
u_flags = sl.regs.flags.canonicalize(var_map=u_map, counter=u_counter)[-1]
if n_flags is not u_flags and sl.solver.simplify(n_flags) is not sr.solver.simplify(u_flags):
self._report_incongruency("Different flags!")
return False
return True | def function[compare_states, parameter[self, sl, sr]]:
constant[
Compares two states for similarity.
]
variable[joint_solver] assign[=] call[name[claripy].Solver, parameter[]]
<ast.Tuple object at 0x7da20c76d6c0> assign[=] call[call[name[claripy].And, parameter[<ast.Starred object at 0x7da20c76ca30>]].canonicalize, parameter[]]
<ast.Tuple object at 0x7da20c76c3a0> assign[=] call[call[name[claripy].And, parameter[<ast.Starred object at 0x7da20c76cf10>]].canonicalize, parameter[]]
variable[n_canoner_constraint] assign[=] call[name[sr].solver.simplify, parameter[name[n_canon_constraint]]]
variable[u_canoner_constraint] assign[=] call[name[sl].solver.simplify, parameter[name[u_canon_constraint]]]
call[name[joint_solver].add, parameter[tuple[[<ast.Name object at 0x7da20c76e620>, <ast.Name object at 0x7da20c76c280>]]]]
if compare[name[n_canoner_constraint] is_not name[u_canoner_constraint]] begin[:]
call[name[self]._report_incongruency, parameter[constant[Different constraints!]]]
return[constant[False]]
variable[mem_diff] assign[=] call[name[sr].memory.changed_bytes, parameter[name[sl].memory]]
variable[reg_diff] assign[=] call[name[sr].registers.changed_bytes, parameter[name[sl].registers]]
if <ast.BoolOp object at 0x7da20c76f8e0> begin[:]
if compare[name[sl].arch.name equal[==] constant[X86]] begin[:]
<ast.AugAssign object at 0x7da1b21e35b0>
<ast.AugAssign object at 0x7da1b21e0f10>
<ast.AugAssign object at 0x7da1b21e1a20>
for taget[tuple[[<ast.Name object at 0x7da1b21e13c0>, <ast.Tuple object at 0x7da1b21e04c0>]]] in starred[tuple[[<ast.Tuple object at 0x7da1b21e3d30>, <ast.Tuple object at 0x7da1b21e2ec0>]]] begin[:]
for taget[name[i]] in starred[name[diffs]] begin[:]
variable[bn] assign[=] call[name[nm].load, parameter[name[i], constant[1]]]
variable[bu] assign[=] call[name[um].load, parameter[name[i], constant[1]]]
variable[bnc] assign[=] call[call[name[bn].canonicalize, parameter[]]][<ast.UnaryOp object at 0x7da1b21e2da0>]
variable[buc] assign[=] call[call[name[bu].canonicalize, parameter[]]][<ast.UnaryOp object at 0x7da1b21e19f0>]
if compare[name[bnc] is_not name[buc]] begin[:]
call[name[self]._report_incongruency, parameter[constant[Different memory or registers (index %d, values %r and %r)!], name[i], name[bn], name[bu]]]
return[constant[False]]
if compare[name[sl].arch.name in tuple[[<ast.Constant object at 0x7da1b21e0610>, <ast.Constant object at 0x7da1b21e05e0>, <ast.Constant object at 0x7da1b21e2380>, <ast.Constant object at 0x7da1b21e16c0>, <ast.Constant object at 0x7da1b21e3250>, <ast.Constant object at 0x7da1b21e2a10>]]] begin[:]
variable[n_bkp] assign[=] tuple[[<ast.Attribute object at 0x7da1b21e3e50>, <ast.Attribute object at 0x7da1b21e39a0>, <ast.Attribute object at 0x7da1b21e3640>, <ast.Attribute object at 0x7da1b21e1720>]]
variable[u_bkp] assign[=] tuple[[<ast.Attribute object at 0x7da1b21e1d80>, <ast.Attribute object at 0x7da1b21e0ca0>, <ast.Attribute object at 0x7da1b21e0df0>, <ast.Attribute object at 0x7da1b21e0fa0>]]
if compare[name[sl].arch.name in tuple[[<ast.Constant object at 0x7da1b21e0280>, <ast.Constant object at 0x7da1b21e3a60>]]] begin[:]
variable[n_flags] assign[=] call[call[name[sr].regs.eflags.canonicalize, parameter[]]][<ast.UnaryOp object at 0x7da1b21e17b0>]
variable[u_flags] assign[=] call[call[name[sl].regs.eflags.canonicalize, parameter[]]][<ast.UnaryOp object at 0x7da1b21e2f80>]
if <ast.BoolOp object at 0x7da1b21e1300> begin[:]
call[name[self]._report_incongruency, parameter[constant[Different flags!]]]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[compare_states] ( identifier[self] , identifier[sl] , identifier[sr] ):
literal[string]
identifier[joint_solver] = identifier[claripy] . identifier[Solver] ()
identifier[n_map] , identifier[n_counter] , identifier[n_canon_constraint] = identifier[claripy] . identifier[And] (* identifier[sr] . identifier[solver] . identifier[constraints] ). identifier[canonicalize] ()
identifier[u_map] , identifier[u_counter] , identifier[u_canon_constraint] = identifier[claripy] . identifier[And] (* identifier[sl] . identifier[solver] . identifier[constraints] ). identifier[canonicalize] ()
identifier[n_canoner_constraint] = identifier[sr] . identifier[solver] . identifier[simplify] ( identifier[n_canon_constraint] )
identifier[u_canoner_constraint] = identifier[sl] . identifier[solver] . identifier[simplify] ( identifier[u_canon_constraint] )
identifier[joint_solver] . identifier[add] (( identifier[n_canoner_constraint] , identifier[u_canoner_constraint] ))
keyword[if] identifier[n_canoner_constraint] keyword[is] keyword[not] identifier[u_canoner_constraint] :
identifier[self] . identifier[_report_incongruency] ( literal[string] )
keyword[return] keyword[False]
identifier[mem_diff] = identifier[sr] . identifier[memory] . identifier[changed_bytes] ( identifier[sl] . identifier[memory] )
identifier[reg_diff] = identifier[sr] . identifier[registers] . identifier[changed_bytes] ( identifier[sl] . identifier[registers] )
keyword[if] literal[string] keyword[in] identifier[sl] . identifier[options] keyword[or] literal[string] keyword[in] identifier[sr] . identifier[options] :
keyword[if] identifier[sl] . identifier[arch] . identifier[name] == literal[string] :
identifier[reg_diff] -= identifier[set] ( identifier[range] ( literal[int] , literal[int] ))
identifier[reg_diff] -= identifier[set] ( identifier[range] ( literal[int] , literal[int] ))
identifier[reg_diff] -= identifier[set] ( identifier[range] ( literal[int] , literal[int] ))
keyword[elif] identifier[sl] . identifier[arch] . identifier[name] == literal[string] :
identifier[reg_diff] -= identifier[set] ( identifier[range] ( literal[int] , literal[int] ))
keyword[for] identifier[diffs] ,( identifier[um] , identifier[nm] ) keyword[in] (
( identifier[reg_diff] ,( identifier[sl] . identifier[registers] , identifier[sr] . identifier[registers] )),
( identifier[mem_diff] ,( identifier[sl] . identifier[memory] , identifier[sr] . identifier[memory] )),
):
keyword[for] identifier[i] keyword[in] identifier[diffs] :
identifier[bn] = identifier[nm] . identifier[load] ( identifier[i] , literal[int] )
identifier[bu] = identifier[um] . identifier[load] ( identifier[i] , literal[int] )
identifier[bnc] = identifier[bn] . identifier[canonicalize] ( identifier[var_map] = identifier[n_map] , identifier[counter] = identifier[n_counter] )[- literal[int] ]
identifier[buc] = identifier[bu] . identifier[canonicalize] ( identifier[var_map] = identifier[u_map] , identifier[counter] = identifier[u_counter] )[- literal[int] ]
keyword[if] identifier[bnc] keyword[is] keyword[not] identifier[buc] :
identifier[self] . identifier[_report_incongruency] ( literal[string] , identifier[i] , identifier[bn] , identifier[bu] )
keyword[return] keyword[False]
keyword[if] identifier[sl] . identifier[arch] . identifier[name] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ):
identifier[n_bkp] = identifier[sr] . identifier[regs] . identifier[cc_op] , identifier[sr] . identifier[regs] . identifier[cc_dep1] , identifier[sr] . identifier[regs] . identifier[cc_dep2] , identifier[sr] . identifier[regs] . identifier[cc_ndep]
identifier[u_bkp] = identifier[sl] . identifier[regs] . identifier[cc_op] , identifier[sl] . identifier[regs] . identifier[cc_dep1] , identifier[sl] . identifier[regs] . identifier[cc_dep2] , identifier[sl] . identifier[regs] . identifier[cc_ndep]
keyword[if] identifier[sl] . identifier[arch] . identifier[name] keyword[in] ( literal[string] , literal[string] ):
identifier[n_flags] = identifier[sr] . identifier[regs] . identifier[eflags] . identifier[canonicalize] ( identifier[var_map] = identifier[n_map] , identifier[counter] = identifier[n_counter] )[- literal[int] ]
identifier[u_flags] = identifier[sl] . identifier[regs] . identifier[eflags] . identifier[canonicalize] ( identifier[var_map] = identifier[u_map] , identifier[counter] = identifier[u_counter] )[- literal[int] ]
keyword[else] :
identifier[n_flags] = identifier[sr] . identifier[regs] . identifier[flags] . identifier[canonicalize] ( identifier[var_map] = identifier[n_map] , identifier[counter] = identifier[n_counter] )[- literal[int] ]
identifier[u_flags] = identifier[sl] . identifier[regs] . identifier[flags] . identifier[canonicalize] ( identifier[var_map] = identifier[u_map] , identifier[counter] = identifier[u_counter] )[- literal[int] ]
keyword[if] identifier[n_flags] keyword[is] keyword[not] identifier[u_flags] keyword[and] identifier[sl] . identifier[solver] . identifier[simplify] ( identifier[n_flags] ) keyword[is] keyword[not] identifier[sr] . identifier[solver] . identifier[simplify] ( identifier[u_flags] ):
identifier[self] . identifier[_report_incongruency] ( literal[string] )
keyword[return] keyword[False]
keyword[return] keyword[True] | def compare_states(self, sl, sr):
"""
Compares two states for similarity.
"""
joint_solver = claripy.Solver()
# make sure the canonicalized constraints are the same
(n_map, n_counter, n_canon_constraint) = claripy.And(*sr.solver.constraints).canonicalize() #pylint:disable=no-member
(u_map, u_counter, u_canon_constraint) = claripy.And(*sl.solver.constraints).canonicalize() #pylint:disable=no-member
n_canoner_constraint = sr.solver.simplify(n_canon_constraint)
u_canoner_constraint = sl.solver.simplify(u_canon_constraint)
joint_solver.add((n_canoner_constraint, u_canoner_constraint))
if n_canoner_constraint is not u_canoner_constraint:
self._report_incongruency('Different constraints!')
return False # depends on [control=['if'], data=[]]
# get the differences in registers and memory
mem_diff = sr.memory.changed_bytes(sl.memory)
reg_diff = sr.registers.changed_bytes(sl.registers)
# this is only for unicorn
if 'UNICORN' in sl.options or 'UNICORN' in sr.options:
if sl.arch.name == 'X86':
reg_diff -= set(range(40, 52)) #ignore cc psuedoregisters
reg_diff -= set(range(320, 324)) #some other VEX weirdness
reg_diff -= set(range(340, 344)) #ip_at_syscall # depends on [control=['if'], data=[]]
elif sl.arch.name == 'AMD64':
reg_diff -= set(range(144, 168)) #ignore cc psuedoregisters # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# make sure the differences in registers and memory are actually just renamed
# versions of the same ASTs
for (diffs, (um, nm)) in ((reg_diff, (sl.registers, sr.registers)), (mem_diff, (sl.memory, sr.memory))):
for i in diffs:
bn = nm.load(i, 1)
bu = um.load(i, 1)
bnc = bn.canonicalize(var_map=n_map, counter=n_counter)[-1]
buc = bu.canonicalize(var_map=u_map, counter=u_counter)[-1]
if bnc is not buc:
self._report_incongruency('Different memory or registers (index %d, values %r and %r)!', i, bn, bu)
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=[]]
# make sure the flags are the same
if sl.arch.name in ('AMD64', 'X86', 'ARM', 'ARMEL', 'ARMHF', 'AARCH64'):
# pylint: disable=unused-variable
n_bkp = (sr.regs.cc_op, sr.regs.cc_dep1, sr.regs.cc_dep2, sr.regs.cc_ndep)
u_bkp = (sl.regs.cc_op, sl.regs.cc_dep1, sl.regs.cc_dep2, sl.regs.cc_ndep)
if sl.arch.name in ('AMD64', 'X86'):
n_flags = sr.regs.eflags.canonicalize(var_map=n_map, counter=n_counter)[-1]
u_flags = sl.regs.eflags.canonicalize(var_map=u_map, counter=u_counter)[-1] # depends on [control=['if'], data=[]]
else:
n_flags = sr.regs.flags.canonicalize(var_map=n_map, counter=n_counter)[-1]
u_flags = sl.regs.flags.canonicalize(var_map=u_map, counter=u_counter)[-1]
if n_flags is not u_flags and sl.solver.simplify(n_flags) is not sr.solver.simplify(u_flags):
self._report_incongruency('Different flags!')
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return True |
def show_grid(data_frame,
show_toolbar=None,
precision=None,
grid_options=None,
column_options=None,
column_definitions=None,
row_edit_callback=None):
"""
Renders a DataFrame or Series as an interactive qgrid, represented by
an instance of the ``QgridWidget`` class. The ``QgridWidget`` instance
is constructed using the options passed in to this function. The
``data_frame`` argument to this function is used as the ``df`` kwarg in
call to the QgridWidget constructor, and the rest of the parameters
are passed through as is.
If the ``data_frame`` argument is a Series, it will be converted to a
DataFrame before being passed in to the QgridWidget constructor as the
``df`` kwarg.
:rtype: QgridWidget
Parameters
----------
data_frame : DataFrame
The DataFrame that will be displayed by this instance of
QgridWidget.
grid_options : dict
Options to use when creating the SlickGrid control (i.e. the
interactive grid). See the Notes section below for more information
on the available options, as well as the default options that this
widget uses.
precision : integer
The number of digits of precision to display for floating-point
values. If unset, we use the value of
`pandas.get_option('display.precision')`.
show_toolbar : bool
Whether to show a toolbar with options for adding/removing rows.
Adding/removing rows is an experimental feature which only works
with DataFrames that have an integer index.
column_options : dict
Column options that are to be applied to every column. See the
Notes section below for more information on the available options,
as well as the default options that this widget uses.
column_definitions : dict
Column options that are to be applied to individual
columns. The keys of the dict should be the column names, and each
value should be the column options for a particular column,
represented as a dict. The available options for each column are the
same options that are available to be set for all columns via the
``column_options`` parameter. See the Notes section below for more
information on those options.
row_edit_callback : callable
A callable that is called to determine whether a particular row
should be editable or not. Its signature should be
``callable(row)``, where ``row`` is a dictionary which contains a
particular row's values, keyed by column name. The callback should
return True if the provided row should be editable, and False
otherwise.
Notes
-----
The following dictionary is used for ``grid_options`` if none are
provided explicitly::
{
# SlickGrid options
'fullWidthRows': True,
'syncColumnCellResize': True,
'forceFitColumns': True,
'defaultColumnWidth': 150,
'rowHeight': 28,
'enableColumnReorder': False,
'enableTextSelectionOnCells': True,
'editable': True,
'autoEdit': False,
'explicitInitialization': True,
# Qgrid options
'maxVisibleRows': 15,
'minVisibleRows': 8,
'sortable': True,
'filterable': True,
'highlightSelectedCell': False,
'highlightSelectedRow': True
}
The first group of options are SlickGrid "grid options" which are
described in the `SlickGrid documentation
<https://github.com/mleibman/SlickGrid/wiki/Grid-Options>`_.
The second group of option are options that were added specifically
for Qgrid and therefore are not documented in the SlickGrid documentation.
The following bullet points describe these options.
* **maxVisibleRows** The maximum number of rows that Qgrid will show.
* **minVisibleRows** The minimum number of rows that Qgrid will show
* **sortable** Whether the Qgrid instance will allow the user to sort
columns by clicking the column headers. When this is set to ``False``,
nothing will happen when users click the column headers.
* **filterable** Whether the Qgrid instance will allow the user to filter
the grid. When this is set to ``False`` the filter icons won't be shown
for any columns.
* **highlightSelectedCell** If you set this to True, the selected cell
will be given a light blue border.
* **highlightSelectedRow** If you set this to False, the light blue
background that's shown by default for selected rows will be hidden.
The following dictionary is used for ``column_options`` if none are
provided explicitly::
{
# SlickGrid column options
'defaultSortAsc': True,
'maxWidth': None,
'minWidth': 30,
'resizable': True,
'sortable': True,
'toolTip': "",
'width': None
# Qgrid column options
'editable': True,
}
The first group of options are SlickGrid "column options" which are
described in the `SlickGrid documentation
<https://github.com/mleibman/SlickGrid/wiki/Column-Options>`_.
The ``editable`` option was added specifically for Qgrid and therefore is
not documented in the SlickGrid documentation. This option specifies
whether a column should be editable or not.
See Also
--------
set_defaults : Permanently set global defaults for the parameters
of ``show_grid``, with the exception of the ``data_frame``
and ``column_definitions`` parameters, since those
depend on the particular set of data being shown by an
instance, and therefore aren't parameters we would want
to set for all QgridWidet instances.
set_grid_option : Permanently set global defaults for individual
grid options. Does so by changing the defaults
that the ``show_grid`` method uses for the
``grid_options`` parameter.
QgridWidget : The widget class that is instantiated and returned by this
method.
"""
if show_toolbar is None:
show_toolbar = defaults.show_toolbar
if precision is None:
precision = defaults.precision
if not isinstance(precision, Integral):
raise TypeError("precision must be int, not %s" % type(precision))
if column_options is None:
column_options = defaults.column_options
else:
options = defaults.column_options.copy()
options.update(column_options)
column_options = options
if grid_options is None:
grid_options = defaults.grid_options
else:
options = defaults.grid_options.copy()
options.update(grid_options)
grid_options = options
if not isinstance(grid_options, dict):
raise TypeError(
"grid_options must be dict, not %s" % type(grid_options)
)
# if a Series is passed in, convert it to a DataFrame
if isinstance(data_frame, pd.Series):
data_frame = pd.DataFrame(data_frame)
elif not isinstance(data_frame, pd.DataFrame):
raise TypeError(
"data_frame must be DataFrame or Series, not %s" % type(data_frame)
)
column_definitions = (column_definitions or {})
# create a visualization for the dataframe
return QgridWidget(df=data_frame, precision=precision,
grid_options=grid_options,
column_options=column_options,
column_definitions=column_definitions,
row_edit_callback=row_edit_callback,
show_toolbar=show_toolbar) | def function[show_grid, parameter[data_frame, show_toolbar, precision, grid_options, column_options, column_definitions, row_edit_callback]]:
constant[
Renders a DataFrame or Series as an interactive qgrid, represented by
an instance of the ``QgridWidget`` class. The ``QgridWidget`` instance
is constructed using the options passed in to this function. The
``data_frame`` argument to this function is used as the ``df`` kwarg in
call to the QgridWidget constructor, and the rest of the parameters
are passed through as is.
If the ``data_frame`` argument is a Series, it will be converted to a
DataFrame before being passed in to the QgridWidget constructor as the
``df`` kwarg.
:rtype: QgridWidget
Parameters
----------
data_frame : DataFrame
The DataFrame that will be displayed by this instance of
QgridWidget.
grid_options : dict
Options to use when creating the SlickGrid control (i.e. the
interactive grid). See the Notes section below for more information
on the available options, as well as the default options that this
widget uses.
precision : integer
The number of digits of precision to display for floating-point
values. If unset, we use the value of
`pandas.get_option('display.precision')`.
show_toolbar : bool
Whether to show a toolbar with options for adding/removing rows.
Adding/removing rows is an experimental feature which only works
with DataFrames that have an integer index.
column_options : dict
Column options that are to be applied to every column. See the
Notes section below for more information on the available options,
as well as the default options that this widget uses.
column_definitions : dict
Column options that are to be applied to individual
columns. The keys of the dict should be the column names, and each
value should be the column options for a particular column,
represented as a dict. The available options for each column are the
same options that are available to be set for all columns via the
``column_options`` parameter. See the Notes section below for more
information on those options.
row_edit_callback : callable
A callable that is called to determine whether a particular row
should be editable or not. Its signature should be
``callable(row)``, where ``row`` is a dictionary which contains a
particular row's values, keyed by column name. The callback should
return True if the provided row should be editable, and False
otherwise.
Notes
-----
The following dictionary is used for ``grid_options`` if none are
provided explicitly::
{
# SlickGrid options
'fullWidthRows': True,
'syncColumnCellResize': True,
'forceFitColumns': True,
'defaultColumnWidth': 150,
'rowHeight': 28,
'enableColumnReorder': False,
'enableTextSelectionOnCells': True,
'editable': True,
'autoEdit': False,
'explicitInitialization': True,
# Qgrid options
'maxVisibleRows': 15,
'minVisibleRows': 8,
'sortable': True,
'filterable': True,
'highlightSelectedCell': False,
'highlightSelectedRow': True
}
The first group of options are SlickGrid "grid options" which are
described in the `SlickGrid documentation
<https://github.com/mleibman/SlickGrid/wiki/Grid-Options>`_.
The second group of option are options that were added specifically
for Qgrid and therefore are not documented in the SlickGrid documentation.
The following bullet points describe these options.
* **maxVisibleRows** The maximum number of rows that Qgrid will show.
* **minVisibleRows** The minimum number of rows that Qgrid will show
* **sortable** Whether the Qgrid instance will allow the user to sort
columns by clicking the column headers. When this is set to ``False``,
nothing will happen when users click the column headers.
* **filterable** Whether the Qgrid instance will allow the user to filter
the grid. When this is set to ``False`` the filter icons won't be shown
for any columns.
* **highlightSelectedCell** If you set this to True, the selected cell
will be given a light blue border.
* **highlightSelectedRow** If you set this to False, the light blue
background that's shown by default for selected rows will be hidden.
The following dictionary is used for ``column_options`` if none are
provided explicitly::
{
# SlickGrid column options
'defaultSortAsc': True,
'maxWidth': None,
'minWidth': 30,
'resizable': True,
'sortable': True,
'toolTip': "",
'width': None
# Qgrid column options
'editable': True,
}
The first group of options are SlickGrid "column options" which are
described in the `SlickGrid documentation
<https://github.com/mleibman/SlickGrid/wiki/Column-Options>`_.
The ``editable`` option was added specifically for Qgrid and therefore is
not documented in the SlickGrid documentation. This option specifies
whether a column should be editable or not.
See Also
--------
set_defaults : Permanently set global defaults for the parameters
of ``show_grid``, with the exception of the ``data_frame``
and ``column_definitions`` parameters, since those
depend on the particular set of data being shown by an
instance, and therefore aren't parameters we would want
to set for all QgridWidet instances.
set_grid_option : Permanently set global defaults for individual
grid options. Does so by changing the defaults
that the ``show_grid`` method uses for the
``grid_options`` parameter.
QgridWidget : The widget class that is instantiated and returned by this
method.
]
if compare[name[show_toolbar] is constant[None]] begin[:]
variable[show_toolbar] assign[=] name[defaults].show_toolbar
if compare[name[precision] is constant[None]] begin[:]
variable[precision] assign[=] name[defaults].precision
if <ast.UnaryOp object at 0x7da204566aa0> begin[:]
<ast.Raise object at 0x7da204565a80>
if compare[name[column_options] is constant[None]] begin[:]
variable[column_options] assign[=] name[defaults].column_options
if compare[name[grid_options] is constant[None]] begin[:]
variable[grid_options] assign[=] name[defaults].grid_options
if <ast.UnaryOp object at 0x7da204565720> begin[:]
<ast.Raise object at 0x7da18ede7fd0>
if call[name[isinstance], parameter[name[data_frame], name[pd].Series]] begin[:]
variable[data_frame] assign[=] call[name[pd].DataFrame, parameter[name[data_frame]]]
variable[column_definitions] assign[=] <ast.BoolOp object at 0x7da18ede76d0>
return[call[name[QgridWidget], parameter[]]] | keyword[def] identifier[show_grid] ( identifier[data_frame] ,
identifier[show_toolbar] = keyword[None] ,
identifier[precision] = keyword[None] ,
identifier[grid_options] = keyword[None] ,
identifier[column_options] = keyword[None] ,
identifier[column_definitions] = keyword[None] ,
identifier[row_edit_callback] = keyword[None] ):
literal[string]
keyword[if] identifier[show_toolbar] keyword[is] keyword[None] :
identifier[show_toolbar] = identifier[defaults] . identifier[show_toolbar]
keyword[if] identifier[precision] keyword[is] keyword[None] :
identifier[precision] = identifier[defaults] . identifier[precision]
keyword[if] keyword[not] identifier[isinstance] ( identifier[precision] , identifier[Integral] ):
keyword[raise] identifier[TypeError] ( literal[string] % identifier[type] ( identifier[precision] ))
keyword[if] identifier[column_options] keyword[is] keyword[None] :
identifier[column_options] = identifier[defaults] . identifier[column_options]
keyword[else] :
identifier[options] = identifier[defaults] . identifier[column_options] . identifier[copy] ()
identifier[options] . identifier[update] ( identifier[column_options] )
identifier[column_options] = identifier[options]
keyword[if] identifier[grid_options] keyword[is] keyword[None] :
identifier[grid_options] = identifier[defaults] . identifier[grid_options]
keyword[else] :
identifier[options] = identifier[defaults] . identifier[grid_options] . identifier[copy] ()
identifier[options] . identifier[update] ( identifier[grid_options] )
identifier[grid_options] = identifier[options]
keyword[if] keyword[not] identifier[isinstance] ( identifier[grid_options] , identifier[dict] ):
keyword[raise] identifier[TypeError] (
literal[string] % identifier[type] ( identifier[grid_options] )
)
keyword[if] identifier[isinstance] ( identifier[data_frame] , identifier[pd] . identifier[Series] ):
identifier[data_frame] = identifier[pd] . identifier[DataFrame] ( identifier[data_frame] )
keyword[elif] keyword[not] identifier[isinstance] ( identifier[data_frame] , identifier[pd] . identifier[DataFrame] ):
keyword[raise] identifier[TypeError] (
literal[string] % identifier[type] ( identifier[data_frame] )
)
identifier[column_definitions] =( identifier[column_definitions] keyword[or] {})
keyword[return] identifier[QgridWidget] ( identifier[df] = identifier[data_frame] , identifier[precision] = identifier[precision] ,
identifier[grid_options] = identifier[grid_options] ,
identifier[column_options] = identifier[column_options] ,
identifier[column_definitions] = identifier[column_definitions] ,
identifier[row_edit_callback] = identifier[row_edit_callback] ,
identifier[show_toolbar] = identifier[show_toolbar] ) | def show_grid(data_frame, show_toolbar=None, precision=None, grid_options=None, column_options=None, column_definitions=None, row_edit_callback=None):
"""
Renders a DataFrame or Series as an interactive qgrid, represented by
an instance of the ``QgridWidget`` class. The ``QgridWidget`` instance
is constructed using the options passed in to this function. The
``data_frame`` argument to this function is used as the ``df`` kwarg in
call to the QgridWidget constructor, and the rest of the parameters
are passed through as is.
If the ``data_frame`` argument is a Series, it will be converted to a
DataFrame before being passed in to the QgridWidget constructor as the
``df`` kwarg.
:rtype: QgridWidget
Parameters
----------
data_frame : DataFrame
The DataFrame that will be displayed by this instance of
QgridWidget.
grid_options : dict
Options to use when creating the SlickGrid control (i.e. the
interactive grid). See the Notes section below for more information
on the available options, as well as the default options that this
widget uses.
precision : integer
The number of digits of precision to display for floating-point
values. If unset, we use the value of
`pandas.get_option('display.precision')`.
show_toolbar : bool
Whether to show a toolbar with options for adding/removing rows.
Adding/removing rows is an experimental feature which only works
with DataFrames that have an integer index.
column_options : dict
Column options that are to be applied to every column. See the
Notes section below for more information on the available options,
as well as the default options that this widget uses.
column_definitions : dict
Column options that are to be applied to individual
columns. The keys of the dict should be the column names, and each
value should be the column options for a particular column,
represented as a dict. The available options for each column are the
same options that are available to be set for all columns via the
``column_options`` parameter. See the Notes section below for more
information on those options.
row_edit_callback : callable
A callable that is called to determine whether a particular row
should be editable or not. Its signature should be
``callable(row)``, where ``row`` is a dictionary which contains a
particular row's values, keyed by column name. The callback should
return True if the provided row should be editable, and False
otherwise.
Notes
-----
The following dictionary is used for ``grid_options`` if none are
provided explicitly::
{
# SlickGrid options
'fullWidthRows': True,
'syncColumnCellResize': True,
'forceFitColumns': True,
'defaultColumnWidth': 150,
'rowHeight': 28,
'enableColumnReorder': False,
'enableTextSelectionOnCells': True,
'editable': True,
'autoEdit': False,
'explicitInitialization': True,
# Qgrid options
'maxVisibleRows': 15,
'minVisibleRows': 8,
'sortable': True,
'filterable': True,
'highlightSelectedCell': False,
'highlightSelectedRow': True
}
The first group of options are SlickGrid "grid options" which are
described in the `SlickGrid documentation
<https://github.com/mleibman/SlickGrid/wiki/Grid-Options>`_.
The second group of option are options that were added specifically
for Qgrid and therefore are not documented in the SlickGrid documentation.
The following bullet points describe these options.
* **maxVisibleRows** The maximum number of rows that Qgrid will show.
* **minVisibleRows** The minimum number of rows that Qgrid will show
* **sortable** Whether the Qgrid instance will allow the user to sort
columns by clicking the column headers. When this is set to ``False``,
nothing will happen when users click the column headers.
* **filterable** Whether the Qgrid instance will allow the user to filter
the grid. When this is set to ``False`` the filter icons won't be shown
for any columns.
* **highlightSelectedCell** If you set this to True, the selected cell
will be given a light blue border.
* **highlightSelectedRow** If you set this to False, the light blue
background that's shown by default for selected rows will be hidden.
The following dictionary is used for ``column_options`` if none are
provided explicitly::
{
# SlickGrid column options
'defaultSortAsc': True,
'maxWidth': None,
'minWidth': 30,
'resizable': True,
'sortable': True,
'toolTip': "",
'width': None
# Qgrid column options
'editable': True,
}
The first group of options are SlickGrid "column options" which are
described in the `SlickGrid documentation
<https://github.com/mleibman/SlickGrid/wiki/Column-Options>`_.
The ``editable`` option was added specifically for Qgrid and therefore is
not documented in the SlickGrid documentation. This option specifies
whether a column should be editable or not.
See Also
--------
set_defaults : Permanently set global defaults for the parameters
of ``show_grid``, with the exception of the ``data_frame``
and ``column_definitions`` parameters, since those
depend on the particular set of data being shown by an
instance, and therefore aren't parameters we would want
to set for all QgridWidet instances.
set_grid_option : Permanently set global defaults for individual
grid options. Does so by changing the defaults
that the ``show_grid`` method uses for the
``grid_options`` parameter.
QgridWidget : The widget class that is instantiated and returned by this
method.
"""
if show_toolbar is None:
show_toolbar = defaults.show_toolbar # depends on [control=['if'], data=['show_toolbar']]
if precision is None:
precision = defaults.precision # depends on [control=['if'], data=['precision']]
if not isinstance(precision, Integral):
raise TypeError('precision must be int, not %s' % type(precision)) # depends on [control=['if'], data=[]]
if column_options is None:
column_options = defaults.column_options # depends on [control=['if'], data=['column_options']]
else:
options = defaults.column_options.copy()
options.update(column_options)
column_options = options
if grid_options is None:
grid_options = defaults.grid_options # depends on [control=['if'], data=['grid_options']]
else:
options = defaults.grid_options.copy()
options.update(grid_options)
grid_options = options
if not isinstance(grid_options, dict):
raise TypeError('grid_options must be dict, not %s' % type(grid_options)) # depends on [control=['if'], data=[]]
# if a Series is passed in, convert it to a DataFrame
if isinstance(data_frame, pd.Series):
data_frame = pd.DataFrame(data_frame) # depends on [control=['if'], data=[]]
elif not isinstance(data_frame, pd.DataFrame):
raise TypeError('data_frame must be DataFrame or Series, not %s' % type(data_frame)) # depends on [control=['if'], data=[]]
column_definitions = column_definitions or {}
# create a visualization for the dataframe
return QgridWidget(df=data_frame, precision=precision, grid_options=grid_options, column_options=column_options, column_definitions=column_definitions, row_edit_callback=row_edit_callback, show_toolbar=show_toolbar) |
def _make_token_request(session, token_request_data):
"""Make OAuth token request.
Raises GoogleAuthError if authentication fails.
Returns dict response.
"""
try:
r = session.post(OAUTH2_TOKEN_REQUEST_URL, data=token_request_data)
r.raise_for_status()
except requests.RequestException as e:
raise GoogleAuthError('Token request failed: {}'.format(e))
else:
res = r.json()
# If an error occurred, a key 'error' will contain an error code.
if 'error' in res:
raise GoogleAuthError(
'Token request error: {!r}'.format(res['error'])
)
return res | def function[_make_token_request, parameter[session, token_request_data]]:
constant[Make OAuth token request.
Raises GoogleAuthError if authentication fails.
Returns dict response.
]
<ast.Try object at 0x7da20c991ab0> | keyword[def] identifier[_make_token_request] ( identifier[session] , identifier[token_request_data] ):
literal[string]
keyword[try] :
identifier[r] = identifier[session] . identifier[post] ( identifier[OAUTH2_TOKEN_REQUEST_URL] , identifier[data] = identifier[token_request_data] )
identifier[r] . identifier[raise_for_status] ()
keyword[except] identifier[requests] . identifier[RequestException] keyword[as] identifier[e] :
keyword[raise] identifier[GoogleAuthError] ( literal[string] . identifier[format] ( identifier[e] ))
keyword[else] :
identifier[res] = identifier[r] . identifier[json] ()
keyword[if] literal[string] keyword[in] identifier[res] :
keyword[raise] identifier[GoogleAuthError] (
literal[string] . identifier[format] ( identifier[res] [ literal[string] ])
)
keyword[return] identifier[res] | def _make_token_request(session, token_request_data):
"""Make OAuth token request.
Raises GoogleAuthError if authentication fails.
Returns dict response.
"""
try:
r = session.post(OAUTH2_TOKEN_REQUEST_URL, data=token_request_data)
r.raise_for_status() # depends on [control=['try'], data=[]]
except requests.RequestException as e:
raise GoogleAuthError('Token request failed: {}'.format(e)) # depends on [control=['except'], data=['e']]
else:
res = r.json()
# If an error occurred, a key 'error' will contain an error code.
if 'error' in res:
raise GoogleAuthError('Token request error: {!r}'.format(res['error'])) # depends on [control=['if'], data=['res']]
return res |
def validate_email_with_regex(email_address):
"""
Note that this will only filter out syntax mistakes in emailaddresses.
If a human would think it is probably a valid email, it will most likely pass.
However, it could still very well be that the actual emailaddress has simply
not be claimed by anyone (so then this function fails to devalidate).
"""
if not re.match(VALID_ADDRESS_REGEXP, email_address):
emsg = 'Emailaddress "{}" is not valid according to RFC 2822 standards'.format(
email_address)
raise YagInvalidEmailAddress(emsg)
# apart from the standard, I personally do not trust email addresses without dot.
if "." not in email_address and "localhost" not in email_address.lower():
raise YagInvalidEmailAddress("Missing dot in emailaddress") | def function[validate_email_with_regex, parameter[email_address]]:
constant[
Note that this will only filter out syntax mistakes in emailaddresses.
If a human would think it is probably a valid email, it will most likely pass.
However, it could still very well be that the actual emailaddress has simply
not be claimed by anyone (so then this function fails to devalidate).
]
if <ast.UnaryOp object at 0x7da18f721bd0> begin[:]
variable[emsg] assign[=] call[constant[Emailaddress "{}" is not valid according to RFC 2822 standards].format, parameter[name[email_address]]]
<ast.Raise object at 0x7da18f720d00>
if <ast.BoolOp object at 0x7da18f723880> begin[:]
<ast.Raise object at 0x7da18f7233d0> | keyword[def] identifier[validate_email_with_regex] ( identifier[email_address] ):
literal[string]
keyword[if] keyword[not] identifier[re] . identifier[match] ( identifier[VALID_ADDRESS_REGEXP] , identifier[email_address] ):
identifier[emsg] = literal[string] . identifier[format] (
identifier[email_address] )
keyword[raise] identifier[YagInvalidEmailAddress] ( identifier[emsg] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[email_address] keyword[and] literal[string] keyword[not] keyword[in] identifier[email_address] . identifier[lower] ():
keyword[raise] identifier[YagInvalidEmailAddress] ( literal[string] ) | def validate_email_with_regex(email_address):
"""
Note that this will only filter out syntax mistakes in emailaddresses.
If a human would think it is probably a valid email, it will most likely pass.
However, it could still very well be that the actual emailaddress has simply
not be claimed by anyone (so then this function fails to devalidate).
"""
if not re.match(VALID_ADDRESS_REGEXP, email_address):
emsg = 'Emailaddress "{}" is not valid according to RFC 2822 standards'.format(email_address)
raise YagInvalidEmailAddress(emsg) # depends on [control=['if'], data=[]]
# apart from the standard, I personally do not trust email addresses without dot.
if '.' not in email_address and 'localhost' not in email_address.lower():
raise YagInvalidEmailAddress('Missing dot in emailaddress') # depends on [control=['if'], data=[]] |
def Rconverter(Robj, dataframe=False):
"""
Convert an object in R's namespace to one suitable
for ipython's namespace.
For a data.frame, it tries to return a structured array.
It first checks for colnames, then names.
If all are NULL, it returns np.asarray(Robj), else
it tries to construct a recarray
Parameters
----------
Robj: an R object returned from rpy2
"""
is_data_frame = ro.r('is.data.frame')
colnames = ro.r('colnames')
rownames = ro.r('rownames') # with pandas, these could be used for the index
names = ro.r('names')
if dataframe:
as_data_frame = ro.r('as.data.frame')
cols = colnames(Robj)
_names = names(Robj)
if cols != ri.NULL:
Robj = as_data_frame(Robj)
names = tuple(np.array(cols))
elif _names != ri.NULL:
names = tuple(np.array(_names))
else: # failed to find names
return np.asarray(Robj)
Robj = np.rec.fromarrays(Robj, names = names)
return np.asarray(Robj) | def function[Rconverter, parameter[Robj, dataframe]]:
constant[
Convert an object in R's namespace to one suitable
for ipython's namespace.
For a data.frame, it tries to return a structured array.
It first checks for colnames, then names.
If all are NULL, it returns np.asarray(Robj), else
it tries to construct a recarray
Parameters
----------
Robj: an R object returned from rpy2
]
variable[is_data_frame] assign[=] call[name[ro].r, parameter[constant[is.data.frame]]]
variable[colnames] assign[=] call[name[ro].r, parameter[constant[colnames]]]
variable[rownames] assign[=] call[name[ro].r, parameter[constant[rownames]]]
variable[names] assign[=] call[name[ro].r, parameter[constant[names]]]
if name[dataframe] begin[:]
variable[as_data_frame] assign[=] call[name[ro].r, parameter[constant[as.data.frame]]]
variable[cols] assign[=] call[name[colnames], parameter[name[Robj]]]
variable[_names] assign[=] call[name[names], parameter[name[Robj]]]
if compare[name[cols] not_equal[!=] name[ri].NULL] begin[:]
variable[Robj] assign[=] call[name[as_data_frame], parameter[name[Robj]]]
variable[names] assign[=] call[name[tuple], parameter[call[name[np].array, parameter[name[cols]]]]]
variable[Robj] assign[=] call[name[np].rec.fromarrays, parameter[name[Robj]]]
return[call[name[np].asarray, parameter[name[Robj]]]] | keyword[def] identifier[Rconverter] ( identifier[Robj] , identifier[dataframe] = keyword[False] ):
literal[string]
identifier[is_data_frame] = identifier[ro] . identifier[r] ( literal[string] )
identifier[colnames] = identifier[ro] . identifier[r] ( literal[string] )
identifier[rownames] = identifier[ro] . identifier[r] ( literal[string] )
identifier[names] = identifier[ro] . identifier[r] ( literal[string] )
keyword[if] identifier[dataframe] :
identifier[as_data_frame] = identifier[ro] . identifier[r] ( literal[string] )
identifier[cols] = identifier[colnames] ( identifier[Robj] )
identifier[_names] = identifier[names] ( identifier[Robj] )
keyword[if] identifier[cols] != identifier[ri] . identifier[NULL] :
identifier[Robj] = identifier[as_data_frame] ( identifier[Robj] )
identifier[names] = identifier[tuple] ( identifier[np] . identifier[array] ( identifier[cols] ))
keyword[elif] identifier[_names] != identifier[ri] . identifier[NULL] :
identifier[names] = identifier[tuple] ( identifier[np] . identifier[array] ( identifier[_names] ))
keyword[else] :
keyword[return] identifier[np] . identifier[asarray] ( identifier[Robj] )
identifier[Robj] = identifier[np] . identifier[rec] . identifier[fromarrays] ( identifier[Robj] , identifier[names] = identifier[names] )
keyword[return] identifier[np] . identifier[asarray] ( identifier[Robj] ) | def Rconverter(Robj, dataframe=False):
"""
Convert an object in R's namespace to one suitable
for ipython's namespace.
For a data.frame, it tries to return a structured array.
It first checks for colnames, then names.
If all are NULL, it returns np.asarray(Robj), else
it tries to construct a recarray
Parameters
----------
Robj: an R object returned from rpy2
"""
is_data_frame = ro.r('is.data.frame')
colnames = ro.r('colnames')
rownames = ro.r('rownames') # with pandas, these could be used for the index
names = ro.r('names')
if dataframe:
as_data_frame = ro.r('as.data.frame')
cols = colnames(Robj)
_names = names(Robj)
if cols != ri.NULL:
Robj = as_data_frame(Robj)
names = tuple(np.array(cols)) # depends on [control=['if'], data=['cols']]
elif _names != ri.NULL:
names = tuple(np.array(_names)) # depends on [control=['if'], data=['_names']]
else: # failed to find names
return np.asarray(Robj)
Robj = np.rec.fromarrays(Robj, names=names) # depends on [control=['if'], data=[]]
return np.asarray(Robj) |
def arm(self, value):
"""Arm or disarm system."""
if value:
return api.request_system_arm(self.blink, self.network_id)
return api.request_system_disarm(self.blink, self.network_id) | def function[arm, parameter[self, value]]:
constant[Arm or disarm system.]
if name[value] begin[:]
return[call[name[api].request_system_arm, parameter[name[self].blink, name[self].network_id]]]
return[call[name[api].request_system_disarm, parameter[name[self].blink, name[self].network_id]]] | keyword[def] identifier[arm] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[value] :
keyword[return] identifier[api] . identifier[request_system_arm] ( identifier[self] . identifier[blink] , identifier[self] . identifier[network_id] )
keyword[return] identifier[api] . identifier[request_system_disarm] ( identifier[self] . identifier[blink] , identifier[self] . identifier[network_id] ) | def arm(self, value):
"""Arm or disarm system."""
if value:
return api.request_system_arm(self.blink, self.network_id) # depends on [control=['if'], data=[]]
return api.request_system_disarm(self.blink, self.network_id) |
def checkFinite(value, name='value'):
"""Check that value is a finite number.
If it is, return it. If not, raise GraphError describing the
problem, using name in the error message.
"""
if math.isnan(value):
raise GraphError('Encountered NaN %s' % (name,))
elif math.isinf(value):
raise GraphError('Encountered infinite %s' % (name,))
return value | def function[checkFinite, parameter[value, name]]:
constant[Check that value is a finite number.
If it is, return it. If not, raise GraphError describing the
problem, using name in the error message.
]
if call[name[math].isnan, parameter[name[value]]] begin[:]
<ast.Raise object at 0x7da18c4cc070>
return[name[value]] | keyword[def] identifier[checkFinite] ( identifier[value] , identifier[name] = literal[string] ):
literal[string]
keyword[if] identifier[math] . identifier[isnan] ( identifier[value] ):
keyword[raise] identifier[GraphError] ( literal[string] %( identifier[name] ,))
keyword[elif] identifier[math] . identifier[isinf] ( identifier[value] ):
keyword[raise] identifier[GraphError] ( literal[string] %( identifier[name] ,))
keyword[return] identifier[value] | def checkFinite(value, name='value'):
"""Check that value is a finite number.
If it is, return it. If not, raise GraphError describing the
problem, using name in the error message.
"""
if math.isnan(value):
raise GraphError('Encountered NaN %s' % (name,)) # depends on [control=['if'], data=[]]
elif math.isinf(value):
raise GraphError('Encountered infinite %s' % (name,)) # depends on [control=['if'], data=[]]
return value |
def write(self, data):
"""
write data on the OUT endpoint associated to the HID interface
"""
for _ in range(64 - len(data)):
data.append(0)
#logging.debug("send: %s", data)
self.report.send(bytearray([0]) + data)
return | def function[write, parameter[self, data]]:
constant[
write data on the OUT endpoint associated to the HID interface
]
for taget[name[_]] in starred[call[name[range], parameter[binary_operation[constant[64] - call[name[len], parameter[name[data]]]]]]] begin[:]
call[name[data].append, parameter[constant[0]]]
call[name[self].report.send, parameter[binary_operation[call[name[bytearray], parameter[list[[<ast.Constant object at 0x7da1b065a560>]]]] + name[data]]]]
return[None] | keyword[def] identifier[write] ( identifier[self] , identifier[data] ):
literal[string]
keyword[for] identifier[_] keyword[in] identifier[range] ( literal[int] - identifier[len] ( identifier[data] )):
identifier[data] . identifier[append] ( literal[int] )
identifier[self] . identifier[report] . identifier[send] ( identifier[bytearray] ([ literal[int] ])+ identifier[data] )
keyword[return] | def write(self, data):
"""
write data on the OUT endpoint associated to the HID interface
"""
for _ in range(64 - len(data)):
data.append(0) # depends on [control=['for'], data=[]]
#logging.debug("send: %s", data)
self.report.send(bytearray([0]) + data)
return |
def _write_branch_and_tag_to_meta_yaml(self):
"""
Write branch and tag to meta.yaml by editing in place
"""
## set the branch to pull source from
with open(self.meta_yaml.replace("meta", "template"), 'r') as infile:
dat = infile.read()
newdat = dat.format(**{'tag': self.tag, 'branch': self.branch})
with open(self.meta_yaml, 'w') as outfile:
outfile.write(newdat) | def function[_write_branch_and_tag_to_meta_yaml, parameter[self]]:
constant[
Write branch and tag to meta.yaml by editing in place
]
with call[name[open], parameter[call[name[self].meta_yaml.replace, parameter[constant[meta], constant[template]]], constant[r]]] begin[:]
variable[dat] assign[=] call[name[infile].read, parameter[]]
variable[newdat] assign[=] call[name[dat].format, parameter[]]
with call[name[open], parameter[name[self].meta_yaml, constant[w]]] begin[:]
call[name[outfile].write, parameter[name[newdat]]] | keyword[def] identifier[_write_branch_and_tag_to_meta_yaml] ( identifier[self] ):
literal[string]
keyword[with] identifier[open] ( identifier[self] . identifier[meta_yaml] . identifier[replace] ( literal[string] , literal[string] ), literal[string] ) keyword[as] identifier[infile] :
identifier[dat] = identifier[infile] . identifier[read] ()
identifier[newdat] = identifier[dat] . identifier[format] (**{ literal[string] : identifier[self] . identifier[tag] , literal[string] : identifier[self] . identifier[branch] })
keyword[with] identifier[open] ( identifier[self] . identifier[meta_yaml] , literal[string] ) keyword[as] identifier[outfile] :
identifier[outfile] . identifier[write] ( identifier[newdat] ) | def _write_branch_and_tag_to_meta_yaml(self):
"""
Write branch and tag to meta.yaml by editing in place
"""
## set the branch to pull source from
with open(self.meta_yaml.replace('meta', 'template'), 'r') as infile:
dat = infile.read()
newdat = dat.format(**{'tag': self.tag, 'branch': self.branch}) # depends on [control=['with'], data=['infile']]
with open(self.meta_yaml, 'w') as outfile:
outfile.write(newdat) # depends on [control=['with'], data=['outfile']] |
def get_sequence_properties(self, clean_seq=False, representatives_only=True):
"""Run Biopython ProteinAnalysis and EMBOSS pepstats to summarize basic statistics of all protein sequences.
Results are stored in the protein's respective SeqProp objects at ``.annotations``
Args:
representative_only (bool): If analysis should only be run on the representative sequences
"""
for g in tqdm(self.genes):
g.protein.get_sequence_properties(clean_seq=clean_seq, representative_only=representatives_only) | def function[get_sequence_properties, parameter[self, clean_seq, representatives_only]]:
constant[Run Biopython ProteinAnalysis and EMBOSS pepstats to summarize basic statistics of all protein sequences.
Results are stored in the protein's respective SeqProp objects at ``.annotations``
Args:
representative_only (bool): If analysis should only be run on the representative sequences
]
for taget[name[g]] in starred[call[name[tqdm], parameter[name[self].genes]]] begin[:]
call[name[g].protein.get_sequence_properties, parameter[]] | keyword[def] identifier[get_sequence_properties] ( identifier[self] , identifier[clean_seq] = keyword[False] , identifier[representatives_only] = keyword[True] ):
literal[string]
keyword[for] identifier[g] keyword[in] identifier[tqdm] ( identifier[self] . identifier[genes] ):
identifier[g] . identifier[protein] . identifier[get_sequence_properties] ( identifier[clean_seq] = identifier[clean_seq] , identifier[representative_only] = identifier[representatives_only] ) | def get_sequence_properties(self, clean_seq=False, representatives_only=True):
"""Run Biopython ProteinAnalysis and EMBOSS pepstats to summarize basic statistics of all protein sequences.
Results are stored in the protein's respective SeqProp objects at ``.annotations``
Args:
representative_only (bool): If analysis should only be run on the representative sequences
"""
for g in tqdm(self.genes):
g.protein.get_sequence_properties(clean_seq=clean_seq, representative_only=representatives_only) # depends on [control=['for'], data=['g']] |
def del_restriction(self, command, user, event_types):
"""
Removes restriction for given `command`.
:param command: command on which the restriction should be removed.
:type command: str
:param user: username for which restriction should be removed.
:type user: str
:param event_types: types of events that should be removed from restriction.
:type event_types: list
"""
if user.lower() in self.commands_rights[command]:
for event_type in event_types:
try:
self.commands_rights[command][user.lower()].remove(event_type)
except ValueError:
pass
if not self.commands_rights[command][user.lower()]:
self.commands_rights[command].pop(user.lower()) | def function[del_restriction, parameter[self, command, user, event_types]]:
constant[
Removes restriction for given `command`.
:param command: command on which the restriction should be removed.
:type command: str
:param user: username for which restriction should be removed.
:type user: str
:param event_types: types of events that should be removed from restriction.
:type event_types: list
]
if compare[call[name[user].lower, parameter[]] in call[name[self].commands_rights][name[command]]] begin[:]
for taget[name[event_type]] in starred[name[event_types]] begin[:]
<ast.Try object at 0x7da20c6a8fd0>
if <ast.UnaryOp object at 0x7da1b16e35b0> begin[:]
call[call[name[self].commands_rights][name[command]].pop, parameter[call[name[user].lower, parameter[]]]] | keyword[def] identifier[del_restriction] ( identifier[self] , identifier[command] , identifier[user] , identifier[event_types] ):
literal[string]
keyword[if] identifier[user] . identifier[lower] () keyword[in] identifier[self] . identifier[commands_rights] [ identifier[command] ]:
keyword[for] identifier[event_type] keyword[in] identifier[event_types] :
keyword[try] :
identifier[self] . identifier[commands_rights] [ identifier[command] ][ identifier[user] . identifier[lower] ()]. identifier[remove] ( identifier[event_type] )
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[if] keyword[not] identifier[self] . identifier[commands_rights] [ identifier[command] ][ identifier[user] . identifier[lower] ()]:
identifier[self] . identifier[commands_rights] [ identifier[command] ]. identifier[pop] ( identifier[user] . identifier[lower] ()) | def del_restriction(self, command, user, event_types):
"""
Removes restriction for given `command`.
:param command: command on which the restriction should be removed.
:type command: str
:param user: username for which restriction should be removed.
:type user: str
:param event_types: types of events that should be removed from restriction.
:type event_types: list
"""
if user.lower() in self.commands_rights[command]:
for event_type in event_types:
try:
self.commands_rights[command][user.lower()].remove(event_type) # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['event_type']]
if not self.commands_rights[command][user.lower()]:
self.commands_rights[command].pop(user.lower()) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def status(context):
"""See which files have changed, checked in, and uploaded"""
context.obj.find_repo_type()
context.obj.call([context.obj.vc_name, 'status']) | def function[status, parameter[context]]:
constant[See which files have changed, checked in, and uploaded]
call[name[context].obj.find_repo_type, parameter[]]
call[name[context].obj.call, parameter[list[[<ast.Attribute object at 0x7da18f723850>, <ast.Constant object at 0x7da18f7206d0>]]]] | keyword[def] identifier[status] ( identifier[context] ):
literal[string]
identifier[context] . identifier[obj] . identifier[find_repo_type] ()
identifier[context] . identifier[obj] . identifier[call] ([ identifier[context] . identifier[obj] . identifier[vc_name] , literal[string] ]) | def status(context):
"""See which files have changed, checked in, and uploaded"""
context.obj.find_repo_type()
context.obj.call([context.obj.vc_name, 'status']) |
def gen_tx(self):
"""Generate a :class:`Transaction
<stellar_base.transaction.Transaction>` object from the list of
operations contained within this object.
:return: A transaction representing all of the operations that have
been appended to this builder.
:rtype: :class:`Transaction <stellar_base.transaction.Transaction>`
"""
if not self.address:
raise StellarAddressInvalidError('Transaction does not have any source address.')
if self.sequence is None:
raise SequenceError('No sequence is present, maybe not funded?')
tx = Transaction(
source=self.address,
sequence=self.sequence,
time_bounds=self.time_bounds,
memo=self.memo,
fee=self.fee * len(self.ops),
operations=self.ops)
self.tx = tx
return tx | def function[gen_tx, parameter[self]]:
constant[Generate a :class:`Transaction
<stellar_base.transaction.Transaction>` object from the list of
operations contained within this object.
:return: A transaction representing all of the operations that have
been appended to this builder.
:rtype: :class:`Transaction <stellar_base.transaction.Transaction>`
]
if <ast.UnaryOp object at 0x7da18dc98af0> begin[:]
<ast.Raise object at 0x7da18dc98580>
if compare[name[self].sequence is constant[None]] begin[:]
<ast.Raise object at 0x7da18dc9b730>
variable[tx] assign[=] call[name[Transaction], parameter[]]
name[self].tx assign[=] name[tx]
return[name[tx]] | keyword[def] identifier[gen_tx] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[address] :
keyword[raise] identifier[StellarAddressInvalidError] ( literal[string] )
keyword[if] identifier[self] . identifier[sequence] keyword[is] keyword[None] :
keyword[raise] identifier[SequenceError] ( literal[string] )
identifier[tx] = identifier[Transaction] (
identifier[source] = identifier[self] . identifier[address] ,
identifier[sequence] = identifier[self] . identifier[sequence] ,
identifier[time_bounds] = identifier[self] . identifier[time_bounds] ,
identifier[memo] = identifier[self] . identifier[memo] ,
identifier[fee] = identifier[self] . identifier[fee] * identifier[len] ( identifier[self] . identifier[ops] ),
identifier[operations] = identifier[self] . identifier[ops] )
identifier[self] . identifier[tx] = identifier[tx]
keyword[return] identifier[tx] | def gen_tx(self):
"""Generate a :class:`Transaction
<stellar_base.transaction.Transaction>` object from the list of
operations contained within this object.
:return: A transaction representing all of the operations that have
been appended to this builder.
:rtype: :class:`Transaction <stellar_base.transaction.Transaction>`
"""
if not self.address:
raise StellarAddressInvalidError('Transaction does not have any source address.') # depends on [control=['if'], data=[]]
if self.sequence is None:
raise SequenceError('No sequence is present, maybe not funded?') # depends on [control=['if'], data=[]]
tx = Transaction(source=self.address, sequence=self.sequence, time_bounds=self.time_bounds, memo=self.memo, fee=self.fee * len(self.ops), operations=self.ops)
self.tx = tx
return tx |
def _rest_patch(self, suburi, request_headers, request_body):
"""REST PATCH operation.
HTTP response codes could be 500, 404, 202 etc.
"""
return self._rest_op('PATCH', suburi, request_headers, request_body) | def function[_rest_patch, parameter[self, suburi, request_headers, request_body]]:
constant[REST PATCH operation.
HTTP response codes could be 500, 404, 202 etc.
]
return[call[name[self]._rest_op, parameter[constant[PATCH], name[suburi], name[request_headers], name[request_body]]]] | keyword[def] identifier[_rest_patch] ( identifier[self] , identifier[suburi] , identifier[request_headers] , identifier[request_body] ):
literal[string]
keyword[return] identifier[self] . identifier[_rest_op] ( literal[string] , identifier[suburi] , identifier[request_headers] , identifier[request_body] ) | def _rest_patch(self, suburi, request_headers, request_body):
"""REST PATCH operation.
HTTP response codes could be 500, 404, 202 etc.
"""
return self._rest_op('PATCH', suburi, request_headers, request_body) |
def to_capabilities(self):
"""
Creates a capabilities with all the options that have been set and
returns a dictionary with everything
"""
caps = self._caps
browser_options = {}
if self.binary_location:
browser_options["binary"] = self.binary_location
if self.arguments:
browser_options["args"] = self.arguments
browser_options["useOverlayScrollbars"] = self.overlay_scrollbars_enabled
caps[Options.KEY] = browser_options
return caps | def function[to_capabilities, parameter[self]]:
constant[
Creates a capabilities with all the options that have been set and
returns a dictionary with everything
]
variable[caps] assign[=] name[self]._caps
variable[browser_options] assign[=] dictionary[[], []]
if name[self].binary_location begin[:]
call[name[browser_options]][constant[binary]] assign[=] name[self].binary_location
if name[self].arguments begin[:]
call[name[browser_options]][constant[args]] assign[=] name[self].arguments
call[name[browser_options]][constant[useOverlayScrollbars]] assign[=] name[self].overlay_scrollbars_enabled
call[name[caps]][name[Options].KEY] assign[=] name[browser_options]
return[name[caps]] | keyword[def] identifier[to_capabilities] ( identifier[self] ):
literal[string]
identifier[caps] = identifier[self] . identifier[_caps]
identifier[browser_options] ={}
keyword[if] identifier[self] . identifier[binary_location] :
identifier[browser_options] [ literal[string] ]= identifier[self] . identifier[binary_location]
keyword[if] identifier[self] . identifier[arguments] :
identifier[browser_options] [ literal[string] ]= identifier[self] . identifier[arguments]
identifier[browser_options] [ literal[string] ]= identifier[self] . identifier[overlay_scrollbars_enabled]
identifier[caps] [ identifier[Options] . identifier[KEY] ]= identifier[browser_options]
keyword[return] identifier[caps] | def to_capabilities(self):
"""
Creates a capabilities with all the options that have been set and
returns a dictionary with everything
"""
caps = self._caps
browser_options = {}
if self.binary_location:
browser_options['binary'] = self.binary_location # depends on [control=['if'], data=[]]
if self.arguments:
browser_options['args'] = self.arguments # depends on [control=['if'], data=[]]
browser_options['useOverlayScrollbars'] = self.overlay_scrollbars_enabled
caps[Options.KEY] = browser_options
return caps |
def get_previous_version(self):
"""Query GitHub releases to find the previous production release"""
gh = self.get_github_api()
repo = gh.repository(self.repo_owner, self.repo_name)
most_recent = None
for release in repo.releases():
# Return the second release that matches the release prefix
if release.tag_name.startswith(self.project__git__prefix_release):
if most_recent is None:
most_recent = release
else:
return LooseVersion(self.get_version_for_tag(release.tag_name)) | def function[get_previous_version, parameter[self]]:
constant[Query GitHub releases to find the previous production release]
variable[gh] assign[=] call[name[self].get_github_api, parameter[]]
variable[repo] assign[=] call[name[gh].repository, parameter[name[self].repo_owner, name[self].repo_name]]
variable[most_recent] assign[=] constant[None]
for taget[name[release]] in starred[call[name[repo].releases, parameter[]]] begin[:]
if call[name[release].tag_name.startswith, parameter[name[self].project__git__prefix_release]] begin[:]
if compare[name[most_recent] is constant[None]] begin[:]
variable[most_recent] assign[=] name[release] | keyword[def] identifier[get_previous_version] ( identifier[self] ):
literal[string]
identifier[gh] = identifier[self] . identifier[get_github_api] ()
identifier[repo] = identifier[gh] . identifier[repository] ( identifier[self] . identifier[repo_owner] , identifier[self] . identifier[repo_name] )
identifier[most_recent] = keyword[None]
keyword[for] identifier[release] keyword[in] identifier[repo] . identifier[releases] ():
keyword[if] identifier[release] . identifier[tag_name] . identifier[startswith] ( identifier[self] . identifier[project__git__prefix_release] ):
keyword[if] identifier[most_recent] keyword[is] keyword[None] :
identifier[most_recent] = identifier[release]
keyword[else] :
keyword[return] identifier[LooseVersion] ( identifier[self] . identifier[get_version_for_tag] ( identifier[release] . identifier[tag_name] )) | def get_previous_version(self):
"""Query GitHub releases to find the previous production release"""
gh = self.get_github_api()
repo = gh.repository(self.repo_owner, self.repo_name)
most_recent = None
for release in repo.releases():
# Return the second release that matches the release prefix
if release.tag_name.startswith(self.project__git__prefix_release):
if most_recent is None:
most_recent = release # depends on [control=['if'], data=['most_recent']]
else:
return LooseVersion(self.get_version_for_tag(release.tag_name)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['release']] |
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_date_and_time_info(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries")
date_and_time_info = ET.SubElement(fwdl_entries, "date-and-time-info")
date_and_time_info.text = kwargs.pop('date_and_time_info')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_date_and_time_info, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[logical_chassis_fwdl_status] assign[=] call[name[ET].Element, parameter[constant[logical_chassis_fwdl_status]]]
variable[config] assign[=] name[logical_chassis_fwdl_status]
variable[output] assign[=] call[name[ET].SubElement, parameter[name[logical_chassis_fwdl_status], constant[output]]]
variable[cluster_fwdl_entries] assign[=] call[name[ET].SubElement, parameter[name[output], constant[cluster-fwdl-entries]]]
variable[fwdl_entries] assign[=] call[name[ET].SubElement, parameter[name[cluster_fwdl_entries], constant[fwdl-entries]]]
variable[date_and_time_info] assign[=] call[name[ET].SubElement, parameter[name[fwdl_entries], constant[date-and-time-info]]]
name[date_and_time_info].text assign[=] call[name[kwargs].pop, parameter[constant[date_and_time_info]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_date_and_time_info] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[logical_chassis_fwdl_status] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[logical_chassis_fwdl_status]
identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[logical_chassis_fwdl_status] , literal[string] )
identifier[cluster_fwdl_entries] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] )
identifier[fwdl_entries] = identifier[ET] . identifier[SubElement] ( identifier[cluster_fwdl_entries] , literal[string] )
identifier[date_and_time_info] = identifier[ET] . identifier[SubElement] ( identifier[fwdl_entries] , literal[string] )
identifier[date_and_time_info] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_date_and_time_info(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
logical_chassis_fwdl_status = ET.Element('logical_chassis_fwdl_status')
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, 'output')
cluster_fwdl_entries = ET.SubElement(output, 'cluster-fwdl-entries')
fwdl_entries = ET.SubElement(cluster_fwdl_entries, 'fwdl-entries')
date_and_time_info = ET.SubElement(fwdl_entries, 'date-and-time-info')
date_and_time_info.text = kwargs.pop('date_and_time_info')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def aggregation_summary(aggregate_hazard, aggregation):
"""Compute the summary from the aggregate hazard to the analysis layer.
Source layer :
| haz_id | haz_class | aggr_id | aggr_name | total_feature |
Target layer :
| aggr_id | aggr_name |
Output layer :
| aggr_id | aggr_name | count of affected features per exposure type
:param aggregate_hazard: The layer to aggregate vector layer.
:type aggregate_hazard: QgsVectorLayer
:param aggregation: The aggregation vector layer where to write statistics.
:type aggregation: QgsVectorLayer
:return: The new aggregation layer with summary.
:rtype: QgsVectorLayer
.. versionadded:: 4.0
"""
source_fields = aggregate_hazard.keywords['inasafe_fields']
target_fields = aggregation.keywords['inasafe_fields']
target_compulsory_fields = [
aggregation_id_field,
aggregation_name_field,
]
check_inputs(target_compulsory_fields, target_fields)
# Missing exposure_count_field
source_compulsory_fields = [
aggregation_id_field,
aggregation_name_field,
hazard_id_field,
hazard_class_field,
affected_field,
]
check_inputs(source_compulsory_fields, source_fields)
pattern = exposure_count_field['key']
pattern = pattern.replace('%s', '')
unique_exposure = read_dynamic_inasafe_field(
source_fields, exposure_count_field)
absolute_values = create_absolute_values_structure(
aggregate_hazard, ['aggregation_id'])
flat_table = FlatTable('aggregation_id', 'exposure_class')
aggregation_index = source_fields[aggregation_id_field['key']]
# We want to loop over affected features only.
request = QgsFeatureRequest()
request.setFlags(QgsFeatureRequest.NoGeometry)
expression = '\"%s\" = \'%s\'' % (
affected_field['field_name'], tr('True'))
request.setFilterExpression(expression)
for area in aggregate_hazard.getFeatures(request):
for key, name_field in list(source_fields.items()):
if key.endswith(pattern):
aggregation_id = area[aggregation_index]
exposure_class = key.replace(pattern, '')
value = area[name_field]
flat_table.add_value(
value,
aggregation_id=aggregation_id,
exposure_class=exposure_class
)
# We summarize every absolute values.
for field, field_definition in list(absolute_values.items()):
value = area[field]
if (value is None or value == ''
or (hasattr(value, 'isNull')
and value.isNull())):
value = 0
field_definition[0].add_value(
value,
aggregation_id=area[aggregation_index],
)
shift = aggregation.fields().count()
aggregation.startEditing()
dynamic_structure = [
[affected_exposure_count_field, unique_exposure],
]
add_fields(
aggregation,
absolute_values,
[total_affected_field],
dynamic_structure)
aggregation_index = target_fields[aggregation_id_field['key']]
request = QgsFeatureRequest()
request.setFlags(QgsFeatureRequest.NoGeometry)
for area in aggregation.getFeatures(request):
aggregation_value = area[aggregation_index]
total = 0
for i, val in enumerate(unique_exposure):
sum = flat_table.get_value(
aggregation_id=aggregation_value,
exposure_class=val
)
total += sum
aggregation.changeAttributeValue(area.id(), shift + i, sum)
aggregation.changeAttributeValue(
area.id(), shift + len(unique_exposure), total)
for i, field in enumerate(absolute_values.values()):
value = field[0].get_value(
aggregation_id=aggregation_value,
)
target_index = shift + len(unique_exposure) + 1 + i
aggregation.changeAttributeValue(
area.id(), target_index, value)
aggregation.commitChanges()
aggregation.keywords['title'] = layer_purpose_aggregation_summary['name']
if qgis_version() >= 21800:
aggregation.setName(aggregation.keywords['title'])
else:
aggregation.setLayerName(aggregation.keywords['title'])
aggregation.keywords['layer_purpose'] = (
layer_purpose_aggregation_summary['key'])
aggregation.keywords['exposure_keywords'] = (
aggregate_hazard.keywords['exposure_keywords'].copy())
check_layer(aggregation)
return aggregation | def function[aggregation_summary, parameter[aggregate_hazard, aggregation]]:
constant[Compute the summary from the aggregate hazard to the analysis layer.
Source layer :
| haz_id | haz_class | aggr_id | aggr_name | total_feature |
Target layer :
| aggr_id | aggr_name |
Output layer :
| aggr_id | aggr_name | count of affected features per exposure type
:param aggregate_hazard: The layer to aggregate vector layer.
:type aggregate_hazard: QgsVectorLayer
:param aggregation: The aggregation vector layer where to write statistics.
:type aggregation: QgsVectorLayer
:return: The new aggregation layer with summary.
:rtype: QgsVectorLayer
.. versionadded:: 4.0
]
variable[source_fields] assign[=] call[name[aggregate_hazard].keywords][constant[inasafe_fields]]
variable[target_fields] assign[=] call[name[aggregation].keywords][constant[inasafe_fields]]
variable[target_compulsory_fields] assign[=] list[[<ast.Name object at 0x7da20c6aabf0>, <ast.Name object at 0x7da20c6a9e70>]]
call[name[check_inputs], parameter[name[target_compulsory_fields], name[target_fields]]]
variable[source_compulsory_fields] assign[=] list[[<ast.Name object at 0x7da20c6aab60>, <ast.Name object at 0x7da20c6aa830>, <ast.Name object at 0x7da20c6a8970>, <ast.Name object at 0x7da20c6ab010>, <ast.Name object at 0x7da20c6a92a0>]]
call[name[check_inputs], parameter[name[source_compulsory_fields], name[source_fields]]]
variable[pattern] assign[=] call[name[exposure_count_field]][constant[key]]
variable[pattern] assign[=] call[name[pattern].replace, parameter[constant[%s], constant[]]]
variable[unique_exposure] assign[=] call[name[read_dynamic_inasafe_field], parameter[name[source_fields], name[exposure_count_field]]]
variable[absolute_values] assign[=] call[name[create_absolute_values_structure], parameter[name[aggregate_hazard], list[[<ast.Constant object at 0x7da20c6a9ea0>]]]]
variable[flat_table] assign[=] call[name[FlatTable], parameter[constant[aggregation_id], constant[exposure_class]]]
variable[aggregation_index] assign[=] call[name[source_fields]][call[name[aggregation_id_field]][constant[key]]]
variable[request] assign[=] call[name[QgsFeatureRequest], parameter[]]
call[name[request].setFlags, parameter[name[QgsFeatureRequest].NoGeometry]]
variable[expression] assign[=] binary_operation[constant["%s" = '%s'] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b0efa470>, <ast.Call object at 0x7da1b0ef9d50>]]]
call[name[request].setFilterExpression, parameter[name[expression]]]
for taget[name[area]] in starred[call[name[aggregate_hazard].getFeatures, parameter[name[request]]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0ef9b10>, <ast.Name object at 0x7da1b0efab30>]]] in starred[call[name[list], parameter[call[name[source_fields].items, parameter[]]]]] begin[:]
if call[name[key].endswith, parameter[name[pattern]]] begin[:]
variable[aggregation_id] assign[=] call[name[area]][name[aggregation_index]]
variable[exposure_class] assign[=] call[name[key].replace, parameter[name[pattern], constant[]]]
variable[value] assign[=] call[name[area]][name[name_field]]
call[name[flat_table].add_value, parameter[name[value]]]
for taget[tuple[[<ast.Name object at 0x7da1b0ef8ac0>, <ast.Name object at 0x7da1b0ef89d0>]]] in starred[call[name[list], parameter[call[name[absolute_values].items, parameter[]]]]] begin[:]
variable[value] assign[=] call[name[area]][name[field]]
if <ast.BoolOp object at 0x7da1b0ef9b70> begin[:]
variable[value] assign[=] constant[0]
call[call[name[field_definition]][constant[0]].add_value, parameter[name[value]]]
variable[shift] assign[=] call[call[name[aggregation].fields, parameter[]].count, parameter[]]
call[name[aggregation].startEditing, parameter[]]
variable[dynamic_structure] assign[=] list[[<ast.List object at 0x7da20c6aa3e0>]]
call[name[add_fields], parameter[name[aggregation], name[absolute_values], list[[<ast.Name object at 0x7da20e956b90>]], name[dynamic_structure]]]
variable[aggregation_index] assign[=] call[name[target_fields]][call[name[aggregation_id_field]][constant[key]]]
variable[request] assign[=] call[name[QgsFeatureRequest], parameter[]]
call[name[request].setFlags, parameter[name[QgsFeatureRequest].NoGeometry]]
for taget[name[area]] in starred[call[name[aggregation].getFeatures, parameter[name[request]]]] begin[:]
variable[aggregation_value] assign[=] call[name[area]][name[aggregation_index]]
variable[total] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da20e956200>, <ast.Name object at 0x7da20e956920>]]] in starred[call[name[enumerate], parameter[name[unique_exposure]]]] begin[:]
variable[sum] assign[=] call[name[flat_table].get_value, parameter[]]
<ast.AugAssign object at 0x7da20e955360>
call[name[aggregation].changeAttributeValue, parameter[call[name[area].id, parameter[]], binary_operation[name[shift] + name[i]], name[sum]]]
call[name[aggregation].changeAttributeValue, parameter[call[name[area].id, parameter[]], binary_operation[name[shift] + call[name[len], parameter[name[unique_exposure]]]], name[total]]]
for taget[tuple[[<ast.Name object at 0x7da20e957970>, <ast.Name object at 0x7da20e957f10>]]] in starred[call[name[enumerate], parameter[call[name[absolute_values].values, parameter[]]]]] begin[:]
variable[value] assign[=] call[call[name[field]][constant[0]].get_value, parameter[]]
variable[target_index] assign[=] binary_operation[binary_operation[binary_operation[name[shift] + call[name[len], parameter[name[unique_exposure]]]] + constant[1]] + name[i]]
call[name[aggregation].changeAttributeValue, parameter[call[name[area].id, parameter[]], name[target_index], name[value]]]
call[name[aggregation].commitChanges, parameter[]]
call[name[aggregation].keywords][constant[title]] assign[=] call[name[layer_purpose_aggregation_summary]][constant[name]]
if compare[call[name[qgis_version], parameter[]] greater_or_equal[>=] constant[21800]] begin[:]
call[name[aggregation].setName, parameter[call[name[aggregation].keywords][constant[title]]]]
call[name[aggregation].keywords][constant[layer_purpose]] assign[=] call[name[layer_purpose_aggregation_summary]][constant[key]]
call[name[aggregation].keywords][constant[exposure_keywords]] assign[=] call[call[name[aggregate_hazard].keywords][constant[exposure_keywords]].copy, parameter[]]
call[name[check_layer], parameter[name[aggregation]]]
return[name[aggregation]] | keyword[def] identifier[aggregation_summary] ( identifier[aggregate_hazard] , identifier[aggregation] ):
literal[string]
identifier[source_fields] = identifier[aggregate_hazard] . identifier[keywords] [ literal[string] ]
identifier[target_fields] = identifier[aggregation] . identifier[keywords] [ literal[string] ]
identifier[target_compulsory_fields] =[
identifier[aggregation_id_field] ,
identifier[aggregation_name_field] ,
]
identifier[check_inputs] ( identifier[target_compulsory_fields] , identifier[target_fields] )
identifier[source_compulsory_fields] =[
identifier[aggregation_id_field] ,
identifier[aggregation_name_field] ,
identifier[hazard_id_field] ,
identifier[hazard_class_field] ,
identifier[affected_field] ,
]
identifier[check_inputs] ( identifier[source_compulsory_fields] , identifier[source_fields] )
identifier[pattern] = identifier[exposure_count_field] [ literal[string] ]
identifier[pattern] = identifier[pattern] . identifier[replace] ( literal[string] , literal[string] )
identifier[unique_exposure] = identifier[read_dynamic_inasafe_field] (
identifier[source_fields] , identifier[exposure_count_field] )
identifier[absolute_values] = identifier[create_absolute_values_structure] (
identifier[aggregate_hazard] ,[ literal[string] ])
identifier[flat_table] = identifier[FlatTable] ( literal[string] , literal[string] )
identifier[aggregation_index] = identifier[source_fields] [ identifier[aggregation_id_field] [ literal[string] ]]
identifier[request] = identifier[QgsFeatureRequest] ()
identifier[request] . identifier[setFlags] ( identifier[QgsFeatureRequest] . identifier[NoGeometry] )
identifier[expression] = literal[string] %(
identifier[affected_field] [ literal[string] ], identifier[tr] ( literal[string] ))
identifier[request] . identifier[setFilterExpression] ( identifier[expression] )
keyword[for] identifier[area] keyword[in] identifier[aggregate_hazard] . identifier[getFeatures] ( identifier[request] ):
keyword[for] identifier[key] , identifier[name_field] keyword[in] identifier[list] ( identifier[source_fields] . identifier[items] ()):
keyword[if] identifier[key] . identifier[endswith] ( identifier[pattern] ):
identifier[aggregation_id] = identifier[area] [ identifier[aggregation_index] ]
identifier[exposure_class] = identifier[key] . identifier[replace] ( identifier[pattern] , literal[string] )
identifier[value] = identifier[area] [ identifier[name_field] ]
identifier[flat_table] . identifier[add_value] (
identifier[value] ,
identifier[aggregation_id] = identifier[aggregation_id] ,
identifier[exposure_class] = identifier[exposure_class]
)
keyword[for] identifier[field] , identifier[field_definition] keyword[in] identifier[list] ( identifier[absolute_values] . identifier[items] ()):
identifier[value] = identifier[area] [ identifier[field] ]
keyword[if] ( identifier[value] keyword[is] keyword[None] keyword[or] identifier[value] == literal[string]
keyword[or] ( identifier[hasattr] ( identifier[value] , literal[string] )
keyword[and] identifier[value] . identifier[isNull] ())):
identifier[value] = literal[int]
identifier[field_definition] [ literal[int] ]. identifier[add_value] (
identifier[value] ,
identifier[aggregation_id] = identifier[area] [ identifier[aggregation_index] ],
)
identifier[shift] = identifier[aggregation] . identifier[fields] (). identifier[count] ()
identifier[aggregation] . identifier[startEditing] ()
identifier[dynamic_structure] =[
[ identifier[affected_exposure_count_field] , identifier[unique_exposure] ],
]
identifier[add_fields] (
identifier[aggregation] ,
identifier[absolute_values] ,
[ identifier[total_affected_field] ],
identifier[dynamic_structure] )
identifier[aggregation_index] = identifier[target_fields] [ identifier[aggregation_id_field] [ literal[string] ]]
identifier[request] = identifier[QgsFeatureRequest] ()
identifier[request] . identifier[setFlags] ( identifier[QgsFeatureRequest] . identifier[NoGeometry] )
keyword[for] identifier[area] keyword[in] identifier[aggregation] . identifier[getFeatures] ( identifier[request] ):
identifier[aggregation_value] = identifier[area] [ identifier[aggregation_index] ]
identifier[total] = literal[int]
keyword[for] identifier[i] , identifier[val] keyword[in] identifier[enumerate] ( identifier[unique_exposure] ):
identifier[sum] = identifier[flat_table] . identifier[get_value] (
identifier[aggregation_id] = identifier[aggregation_value] ,
identifier[exposure_class] = identifier[val]
)
identifier[total] += identifier[sum]
identifier[aggregation] . identifier[changeAttributeValue] ( identifier[area] . identifier[id] (), identifier[shift] + identifier[i] , identifier[sum] )
identifier[aggregation] . identifier[changeAttributeValue] (
identifier[area] . identifier[id] (), identifier[shift] + identifier[len] ( identifier[unique_exposure] ), identifier[total] )
keyword[for] identifier[i] , identifier[field] keyword[in] identifier[enumerate] ( identifier[absolute_values] . identifier[values] ()):
identifier[value] = identifier[field] [ literal[int] ]. identifier[get_value] (
identifier[aggregation_id] = identifier[aggregation_value] ,
)
identifier[target_index] = identifier[shift] + identifier[len] ( identifier[unique_exposure] )+ literal[int] + identifier[i]
identifier[aggregation] . identifier[changeAttributeValue] (
identifier[area] . identifier[id] (), identifier[target_index] , identifier[value] )
identifier[aggregation] . identifier[commitChanges] ()
identifier[aggregation] . identifier[keywords] [ literal[string] ]= identifier[layer_purpose_aggregation_summary] [ literal[string] ]
keyword[if] identifier[qgis_version] ()>= literal[int] :
identifier[aggregation] . identifier[setName] ( identifier[aggregation] . identifier[keywords] [ literal[string] ])
keyword[else] :
identifier[aggregation] . identifier[setLayerName] ( identifier[aggregation] . identifier[keywords] [ literal[string] ])
identifier[aggregation] . identifier[keywords] [ literal[string] ]=(
identifier[layer_purpose_aggregation_summary] [ literal[string] ])
identifier[aggregation] . identifier[keywords] [ literal[string] ]=(
identifier[aggregate_hazard] . identifier[keywords] [ literal[string] ]. identifier[copy] ())
identifier[check_layer] ( identifier[aggregation] )
keyword[return] identifier[aggregation] | def aggregation_summary(aggregate_hazard, aggregation):
"""Compute the summary from the aggregate hazard to the analysis layer.
Source layer :
| haz_id | haz_class | aggr_id | aggr_name | total_feature |
Target layer :
| aggr_id | aggr_name |
Output layer :
| aggr_id | aggr_name | count of affected features per exposure type
:param aggregate_hazard: The layer to aggregate vector layer.
:type aggregate_hazard: QgsVectorLayer
:param aggregation: The aggregation vector layer where to write statistics.
:type aggregation: QgsVectorLayer
:return: The new aggregation layer with summary.
:rtype: QgsVectorLayer
.. versionadded:: 4.0
"""
source_fields = aggregate_hazard.keywords['inasafe_fields']
target_fields = aggregation.keywords['inasafe_fields']
target_compulsory_fields = [aggregation_id_field, aggregation_name_field]
check_inputs(target_compulsory_fields, target_fields)
# Missing exposure_count_field
source_compulsory_fields = [aggregation_id_field, aggregation_name_field, hazard_id_field, hazard_class_field, affected_field]
check_inputs(source_compulsory_fields, source_fields)
pattern = exposure_count_field['key']
pattern = pattern.replace('%s', '')
unique_exposure = read_dynamic_inasafe_field(source_fields, exposure_count_field)
absolute_values = create_absolute_values_structure(aggregate_hazard, ['aggregation_id'])
flat_table = FlatTable('aggregation_id', 'exposure_class')
aggregation_index = source_fields[aggregation_id_field['key']]
# We want to loop over affected features only.
request = QgsFeatureRequest()
request.setFlags(QgsFeatureRequest.NoGeometry)
expression = '"%s" = \'%s\'' % (affected_field['field_name'], tr('True'))
request.setFilterExpression(expression)
for area in aggregate_hazard.getFeatures(request):
for (key, name_field) in list(source_fields.items()):
if key.endswith(pattern):
aggregation_id = area[aggregation_index]
exposure_class = key.replace(pattern, '')
value = area[name_field]
flat_table.add_value(value, aggregation_id=aggregation_id, exposure_class=exposure_class) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# We summarize every absolute values.
for (field, field_definition) in list(absolute_values.items()):
value = area[field]
if value is None or value == '' or (hasattr(value, 'isNull') and value.isNull()):
value = 0 # depends on [control=['if'], data=[]]
field_definition[0].add_value(value, aggregation_id=area[aggregation_index]) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['area']]
shift = aggregation.fields().count()
aggregation.startEditing()
dynamic_structure = [[affected_exposure_count_field, unique_exposure]]
add_fields(aggregation, absolute_values, [total_affected_field], dynamic_structure)
aggregation_index = target_fields[aggregation_id_field['key']]
request = QgsFeatureRequest()
request.setFlags(QgsFeatureRequest.NoGeometry)
for area in aggregation.getFeatures(request):
aggregation_value = area[aggregation_index]
total = 0
for (i, val) in enumerate(unique_exposure):
sum = flat_table.get_value(aggregation_id=aggregation_value, exposure_class=val)
total += sum
aggregation.changeAttributeValue(area.id(), shift + i, sum) # depends on [control=['for'], data=[]]
aggregation.changeAttributeValue(area.id(), shift + len(unique_exposure), total)
for (i, field) in enumerate(absolute_values.values()):
value = field[0].get_value(aggregation_id=aggregation_value)
target_index = shift + len(unique_exposure) + 1 + i
aggregation.changeAttributeValue(area.id(), target_index, value) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['area']]
aggregation.commitChanges()
aggregation.keywords['title'] = layer_purpose_aggregation_summary['name']
if qgis_version() >= 21800:
aggregation.setName(aggregation.keywords['title']) # depends on [control=['if'], data=[]]
else:
aggregation.setLayerName(aggregation.keywords['title'])
aggregation.keywords['layer_purpose'] = layer_purpose_aggregation_summary['key']
aggregation.keywords['exposure_keywords'] = aggregate_hazard.keywords['exposure_keywords'].copy()
check_layer(aggregation)
return aggregation |
def _addRawResult(self, resid, values={}, override=False):
""" Structure of values dict (dict entry for each analysis/field):
{'ALC': {'ALC': '13.55',
'DefaultResult': 'ALC',
'Remarks': ''},
'CO2': {'CO2': '0.66',
'DefaultResult': 'CO2',
'Remarks': ''},
'Date': {'Date': '21/11/2013',
'DefaultResult': 'Date',
'Remarks': ''},
'Malo': {'DefaultResult': 'Malo',
'Malo': '0.26',
'Remarks': ''},
'Meth': {'DefaultResult': 'Meth',
'Meth': '0.58',
'Rep #': {'DefaultResult': 'Rep #',
'Remarks': '',
'Rep #': '1'}
}
"""
if 'Date' in values and 'Time' in values:
try:
dtstr = '%s %s' % (values.get('Date')['Date'], values.get('Time')['Time'])
# 2/11/2005 13:33 PM
from datetime import datetime
dtobj = datetime.strptime(dtstr, '%d/%m/%Y %H:%M %p')
dateTime = dtobj.strftime("%Y%m%d %H:%M:%S")
except:
pass
del values['Date']
del values['Time']
# Adding the date, time and calibration inside each analysis service result.
# I'm adding the calibration number here because it is the way we can avoid
# WINE-76 easly
for keyword in values.keys():
values[keyword]['DateTime'] = dateTime
values[keyword]['Calibration'] = self._calibration
# First, we must find if already exists a row with results for
# the same date, in order to take into account replicas, Mean
# and Standard Deviation
dtidx = values.get('Calibration',{}).get('Calibration',0)
rows = self.getRawResults().get(resid, [])
row, rows = self._extractrowbycalibration(rows, self._calibration)
is_std = values.get('Rep #',{}).get('Rep #','') == 'Sd'
is_mean = values.get('Rep #',{}).get('Rep #','') == 'Mean'
if is_std:
# Add the results of Standard Deviation. For each acode, add
# the Standard Result
del values['Rep #']
for key, value in values.iteritems():
row['Sd-%s' % key] = value
elif is_mean:
# Remove the # item and override with new values
row = values
del row['Rep #']
else:
# Override with new values
row = values
rows.append(row)
isfirst = True
for row in rows:
WinescanCSVParser._addRawResult(self, resid, row, isfirst)
isfirst = False | def function[_addRawResult, parameter[self, resid, values, override]]:
constant[ Structure of values dict (dict entry for each analysis/field):
{'ALC': {'ALC': '13.55',
'DefaultResult': 'ALC',
'Remarks': ''},
'CO2': {'CO2': '0.66',
'DefaultResult': 'CO2',
'Remarks': ''},
'Date': {'Date': '21/11/2013',
'DefaultResult': 'Date',
'Remarks': ''},
'Malo': {'DefaultResult': 'Malo',
'Malo': '0.26',
'Remarks': ''},
'Meth': {'DefaultResult': 'Meth',
'Meth': '0.58',
'Rep #': {'DefaultResult': 'Rep #',
'Remarks': '',
'Rep #': '1'}
}
]
if <ast.BoolOp object at 0x7da18bc70eb0> begin[:]
<ast.Try object at 0x7da18bc70520>
<ast.Delete object at 0x7da204961750>
<ast.Delete object at 0x7da204962c50>
for taget[name[keyword]] in starred[call[name[values].keys, parameter[]]] begin[:]
call[call[name[values]][name[keyword]]][constant[DateTime]] assign[=] name[dateTime]
call[call[name[values]][name[keyword]]][constant[Calibration]] assign[=] name[self]._calibration
variable[dtidx] assign[=] call[call[name[values].get, parameter[constant[Calibration], dictionary[[], []]]].get, parameter[constant[Calibration], constant[0]]]
variable[rows] assign[=] call[call[name[self].getRawResults, parameter[]].get, parameter[name[resid], list[[]]]]
<ast.Tuple object at 0x7da204960ca0> assign[=] call[name[self]._extractrowbycalibration, parameter[name[rows], name[self]._calibration]]
variable[is_std] assign[=] compare[call[call[name[values].get, parameter[constant[Rep #], dictionary[[], []]]].get, parameter[constant[Rep #], constant[]]] equal[==] constant[Sd]]
variable[is_mean] assign[=] compare[call[call[name[values].get, parameter[constant[Rep #], dictionary[[], []]]].get, parameter[constant[Rep #], constant[]]] equal[==] constant[Mean]]
if name[is_std] begin[:]
<ast.Delete object at 0x7da18fe91f60>
for taget[tuple[[<ast.Name object at 0x7da18fe92500>, <ast.Name object at 0x7da18fe92b00>]]] in starred[call[name[values].iteritems, parameter[]]] begin[:]
call[name[row]][binary_operation[constant[Sd-%s] <ast.Mod object at 0x7da2590d6920> name[key]]] assign[=] name[value]
call[name[rows].append, parameter[name[row]]]
variable[isfirst] assign[=] constant[True]
for taget[name[row]] in starred[name[rows]] begin[:]
call[name[WinescanCSVParser]._addRawResult, parameter[name[self], name[resid], name[row], name[isfirst]]]
variable[isfirst] assign[=] constant[False] | keyword[def] identifier[_addRawResult] ( identifier[self] , identifier[resid] , identifier[values] ={}, identifier[override] = keyword[False] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[values] keyword[and] literal[string] keyword[in] identifier[values] :
keyword[try] :
identifier[dtstr] = literal[string] %( identifier[values] . identifier[get] ( literal[string] )[ literal[string] ], identifier[values] . identifier[get] ( literal[string] )[ literal[string] ])
keyword[from] identifier[datetime] keyword[import] identifier[datetime]
identifier[dtobj] = identifier[datetime] . identifier[strptime] ( identifier[dtstr] , literal[string] )
identifier[dateTime] = identifier[dtobj] . identifier[strftime] ( literal[string] )
keyword[except] :
keyword[pass]
keyword[del] identifier[values] [ literal[string] ]
keyword[del] identifier[values] [ literal[string] ]
keyword[for] identifier[keyword] keyword[in] identifier[values] . identifier[keys] ():
identifier[values] [ identifier[keyword] ][ literal[string] ]= identifier[dateTime]
identifier[values] [ identifier[keyword] ][ literal[string] ]= identifier[self] . identifier[_calibration]
identifier[dtidx] = identifier[values] . identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] , literal[int] )
identifier[rows] = identifier[self] . identifier[getRawResults] (). identifier[get] ( identifier[resid] ,[])
identifier[row] , identifier[rows] = identifier[self] . identifier[_extractrowbycalibration] ( identifier[rows] , identifier[self] . identifier[_calibration] )
identifier[is_std] = identifier[values] . identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] , literal[string] )== literal[string]
identifier[is_mean] = identifier[values] . identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] , literal[string] )== literal[string]
keyword[if] identifier[is_std] :
keyword[del] identifier[values] [ literal[string] ]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[values] . identifier[iteritems] ():
identifier[row] [ literal[string] % identifier[key] ]= identifier[value]
keyword[elif] identifier[is_mean] :
identifier[row] = identifier[values]
keyword[del] identifier[row] [ literal[string] ]
keyword[else] :
identifier[row] = identifier[values]
identifier[rows] . identifier[append] ( identifier[row] )
identifier[isfirst] = keyword[True]
keyword[for] identifier[row] keyword[in] identifier[rows] :
identifier[WinescanCSVParser] . identifier[_addRawResult] ( identifier[self] , identifier[resid] , identifier[row] , identifier[isfirst] )
identifier[isfirst] = keyword[False] | def _addRawResult(self, resid, values={}, override=False):
""" Structure of values dict (dict entry for each analysis/field):
{'ALC': {'ALC': '13.55',
'DefaultResult': 'ALC',
'Remarks': ''},
'CO2': {'CO2': '0.66',
'DefaultResult': 'CO2',
'Remarks': ''},
'Date': {'Date': '21/11/2013',
'DefaultResult': 'Date',
'Remarks': ''},
'Malo': {'DefaultResult': 'Malo',
'Malo': '0.26',
'Remarks': ''},
'Meth': {'DefaultResult': 'Meth',
'Meth': '0.58',
'Rep #': {'DefaultResult': 'Rep #',
'Remarks': '',
'Rep #': '1'}
}
"""
if 'Date' in values and 'Time' in values:
try:
dtstr = '%s %s' % (values.get('Date')['Date'], values.get('Time')['Time'])
# 2/11/2005 13:33 PM
from datetime import datetime
dtobj = datetime.strptime(dtstr, '%d/%m/%Y %H:%M %p')
dateTime = dtobj.strftime('%Y%m%d %H:%M:%S') # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]]
del values['Date']
del values['Time'] # depends on [control=['if'], data=[]]
# Adding the date, time and calibration inside each analysis service result.
# I'm adding the calibration number here because it is the way we can avoid
# WINE-76 easly
for keyword in values.keys():
values[keyword]['DateTime'] = dateTime
values[keyword]['Calibration'] = self._calibration # depends on [control=['for'], data=['keyword']]
# First, we must find if already exists a row with results for
# the same date, in order to take into account replicas, Mean
# and Standard Deviation
dtidx = values.get('Calibration', {}).get('Calibration', 0)
rows = self.getRawResults().get(resid, [])
(row, rows) = self._extractrowbycalibration(rows, self._calibration)
is_std = values.get('Rep #', {}).get('Rep #', '') == 'Sd'
is_mean = values.get('Rep #', {}).get('Rep #', '') == 'Mean'
if is_std:
# Add the results of Standard Deviation. For each acode, add
# the Standard Result
del values['Rep #']
for (key, value) in values.iteritems():
row['Sd-%s' % key] = value # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif is_mean:
# Remove the # item and override with new values
row = values
del row['Rep #'] # depends on [control=['if'], data=[]]
else:
# Override with new values
row = values
rows.append(row)
isfirst = True
for row in rows:
WinescanCSVParser._addRawResult(self, resid, row, isfirst)
isfirst = False # depends on [control=['for'], data=['row']] |
def pin_assets(self, file_or_dir_path: Path) -> List[Dict[str, str]]:
"""
Return a dict containing the IPFS hash, file name, and size of a file.
"""
if file_or_dir_path.is_dir():
asset_data = [dummy_ipfs_pin(path) for path in file_or_dir_path.glob("*")]
elif file_or_dir_path.is_file():
asset_data = [dummy_ipfs_pin(file_or_dir_path)]
else:
raise FileNotFoundError(
f"{file_or_dir_path} is not a valid file or directory path."
)
return asset_data | def function[pin_assets, parameter[self, file_or_dir_path]]:
constant[
Return a dict containing the IPFS hash, file name, and size of a file.
]
if call[name[file_or_dir_path].is_dir, parameter[]] begin[:]
variable[asset_data] assign[=] <ast.ListComp object at 0x7da18f58d480>
return[name[asset_data]] | keyword[def] identifier[pin_assets] ( identifier[self] , identifier[file_or_dir_path] : identifier[Path] )-> identifier[List] [ identifier[Dict] [ identifier[str] , identifier[str] ]]:
literal[string]
keyword[if] identifier[file_or_dir_path] . identifier[is_dir] ():
identifier[asset_data] =[ identifier[dummy_ipfs_pin] ( identifier[path] ) keyword[for] identifier[path] keyword[in] identifier[file_or_dir_path] . identifier[glob] ( literal[string] )]
keyword[elif] identifier[file_or_dir_path] . identifier[is_file] ():
identifier[asset_data] =[ identifier[dummy_ipfs_pin] ( identifier[file_or_dir_path] )]
keyword[else] :
keyword[raise] identifier[FileNotFoundError] (
literal[string]
)
keyword[return] identifier[asset_data] | def pin_assets(self, file_or_dir_path: Path) -> List[Dict[str, str]]:
"""
Return a dict containing the IPFS hash, file name, and size of a file.
"""
if file_or_dir_path.is_dir():
asset_data = [dummy_ipfs_pin(path) for path in file_or_dir_path.glob('*')] # depends on [control=['if'], data=[]]
elif file_or_dir_path.is_file():
asset_data = [dummy_ipfs_pin(file_or_dir_path)] # depends on [control=['if'], data=[]]
else:
raise FileNotFoundError(f'{file_or_dir_path} is not a valid file or directory path.')
return asset_data |
def add_service_subnet(self, context_id, subnet_id):
"""Adds a service subnet to a tunnel context.
:param int context_id: The id-value representing the context instance.
:param int subnet_id: The id-value representing the service subnet.
:return bool: True if service subnet addition was successful.
"""
return self.context.addServiceSubnetToNetworkTunnel(subnet_id,
id=context_id) | def function[add_service_subnet, parameter[self, context_id, subnet_id]]:
constant[Adds a service subnet to a tunnel context.
:param int context_id: The id-value representing the context instance.
:param int subnet_id: The id-value representing the service subnet.
:return bool: True if service subnet addition was successful.
]
return[call[name[self].context.addServiceSubnetToNetworkTunnel, parameter[name[subnet_id]]]] | keyword[def] identifier[add_service_subnet] ( identifier[self] , identifier[context_id] , identifier[subnet_id] ):
literal[string]
keyword[return] identifier[self] . identifier[context] . identifier[addServiceSubnetToNetworkTunnel] ( identifier[subnet_id] ,
identifier[id] = identifier[context_id] ) | def add_service_subnet(self, context_id, subnet_id):
"""Adds a service subnet to a tunnel context.
:param int context_id: The id-value representing the context instance.
:param int subnet_id: The id-value representing the service subnet.
:return bool: True if service subnet addition was successful.
"""
return self.context.addServiceSubnetToNetworkTunnel(subnet_id, id=context_id) |
def evaluate(self, filename):
"""Runs the lemmatize function over the contents of the file, counting the proportion of unfound lemmas."""
with open(filename, 'r') as infile:
lines = infile.read().splitlines()
lemma_count = 0
token_count = 0
for line in lines:
line = re.sub(r'[.,!?:;0-9]', ' ', line)
lemmas = [lemma for (_, lemma) in self.lemmatize(line, best_guess=False)]
token_count += len(lemmas)
lemma_count += len(lemmas) - lemmas.count([])
return lemma_count/token_count | def function[evaluate, parameter[self, filename]]:
constant[Runs the lemmatize function over the contents of the file, counting the proportion of unfound lemmas.]
with call[name[open], parameter[name[filename], constant[r]]] begin[:]
variable[lines] assign[=] call[call[name[infile].read, parameter[]].splitlines, parameter[]]
variable[lemma_count] assign[=] constant[0]
variable[token_count] assign[=] constant[0]
for taget[name[line]] in starred[name[lines]] begin[:]
variable[line] assign[=] call[name[re].sub, parameter[constant[[.,!?:;0-9]], constant[ ], name[line]]]
variable[lemmas] assign[=] <ast.ListComp object at 0x7da204565090>
<ast.AugAssign object at 0x7da204566050>
<ast.AugAssign object at 0x7da204567e80>
return[binary_operation[name[lemma_count] / name[token_count]]] | keyword[def] identifier[evaluate] ( identifier[self] , identifier[filename] ):
literal[string]
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[infile] :
identifier[lines] = identifier[infile] . identifier[read] (). identifier[splitlines] ()
identifier[lemma_count] = literal[int]
identifier[token_count] = literal[int]
keyword[for] identifier[line] keyword[in] identifier[lines] :
identifier[line] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[line] )
identifier[lemmas] =[ identifier[lemma] keyword[for] ( identifier[_] , identifier[lemma] ) keyword[in] identifier[self] . identifier[lemmatize] ( identifier[line] , identifier[best_guess] = keyword[False] )]
identifier[token_count] += identifier[len] ( identifier[lemmas] )
identifier[lemma_count] += identifier[len] ( identifier[lemmas] )- identifier[lemmas] . identifier[count] ([])
keyword[return] identifier[lemma_count] / identifier[token_count] | def evaluate(self, filename):
"""Runs the lemmatize function over the contents of the file, counting the proportion of unfound lemmas."""
with open(filename, 'r') as infile:
lines = infile.read().splitlines()
lemma_count = 0
token_count = 0
for line in lines:
line = re.sub('[.,!?:;0-9]', ' ', line)
lemmas = [lemma for (_, lemma) in self.lemmatize(line, best_guess=False)]
token_count += len(lemmas)
lemma_count += len(lemmas) - lemmas.count([]) # depends on [control=['for'], data=['line']]
return lemma_count / token_count # depends on [control=['with'], data=['infile']] |
def OnApprove(self, event):
"""File approve event handler"""
if not self.main_window.safe_mode:
return
msg = _(u"You are going to approve and trust a file that\n"
u"you have not created yourself.\n"
u"After proceeding, the file is executed.\n \n"
u"It may harm your system as any program can.\n"
u"Please check all cells thoroughly before\nproceeding.\n \n"
u"Proceed and sign this file as trusted?")
short_msg = _("Security warning")
if self.main_window.interfaces.get_warning_choice(msg, short_msg):
# Leave safe mode
self.main_window.grid.actions.leave_safe_mode()
# Display safe mode end in status bar
statustext = _("Safe mode deactivated.")
post_command_event(self.main_window, self.main_window.StatusBarMsg,
text=statustext) | def function[OnApprove, parameter[self, event]]:
constant[File approve event handler]
if <ast.UnaryOp object at 0x7da204620040> begin[:]
return[None]
variable[msg] assign[=] call[name[_], parameter[constant[You are going to approve and trust a file that
you have not created yourself.
After proceeding, the file is executed.
It may harm your system as any program can.
Please check all cells thoroughly before
proceeding.
Proceed and sign this file as trusted?]]]
variable[short_msg] assign[=] call[name[_], parameter[constant[Security warning]]]
if call[name[self].main_window.interfaces.get_warning_choice, parameter[name[msg], name[short_msg]]] begin[:]
call[name[self].main_window.grid.actions.leave_safe_mode, parameter[]]
variable[statustext] assign[=] call[name[_], parameter[constant[Safe mode deactivated.]]]
call[name[post_command_event], parameter[name[self].main_window, name[self].main_window.StatusBarMsg]] | keyword[def] identifier[OnApprove] ( identifier[self] , identifier[event] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[main_window] . identifier[safe_mode] :
keyword[return]
identifier[msg] = identifier[_] ( literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string] )
identifier[short_msg] = identifier[_] ( literal[string] )
keyword[if] identifier[self] . identifier[main_window] . identifier[interfaces] . identifier[get_warning_choice] ( identifier[msg] , identifier[short_msg] ):
identifier[self] . identifier[main_window] . identifier[grid] . identifier[actions] . identifier[leave_safe_mode] ()
identifier[statustext] = identifier[_] ( literal[string] )
identifier[post_command_event] ( identifier[self] . identifier[main_window] , identifier[self] . identifier[main_window] . identifier[StatusBarMsg] ,
identifier[text] = identifier[statustext] ) | def OnApprove(self, event):
"""File approve event handler"""
if not self.main_window.safe_mode:
return # depends on [control=['if'], data=[]]
msg = _(u'You are going to approve and trust a file that\nyou have not created yourself.\nAfter proceeding, the file is executed.\n \nIt may harm your system as any program can.\nPlease check all cells thoroughly before\nproceeding.\n \nProceed and sign this file as trusted?')
short_msg = _('Security warning')
if self.main_window.interfaces.get_warning_choice(msg, short_msg):
# Leave safe mode
self.main_window.grid.actions.leave_safe_mode()
# Display safe mode end in status bar
statustext = _('Safe mode deactivated.')
post_command_event(self.main_window, self.main_window.StatusBarMsg, text=statustext) # depends on [control=['if'], data=[]] |
def get_trail_ids(cls, event, mode):
"""extract resources ids from a cloud trail event."""
resource_ids = ()
event_name = event['detail']['eventName']
event_source = event['detail']['eventSource']
for e in mode.get('events', []):
if not isinstance(e, dict):
# Check if we have a short cut / alias
info = CloudWatchEvents.match(event)
if info:
return info['ids'].search(event)
continue
if event_name != e.get('event'):
continue
if event_source != e.get('source'):
continue
id_query = e.get('ids')
if not id_query:
raise ValueError("No id query configured")
evt = event
# be forgiving for users specifying with details or without
if not id_query.startswith('detail.'):
evt = event.get('detail', {})
resource_ids = jmespath.search(id_query, evt)
if resource_ids:
break
return resource_ids | def function[get_trail_ids, parameter[cls, event, mode]]:
constant[extract resources ids from a cloud trail event.]
variable[resource_ids] assign[=] tuple[[]]
variable[event_name] assign[=] call[call[name[event]][constant[detail]]][constant[eventName]]
variable[event_source] assign[=] call[call[name[event]][constant[detail]]][constant[eventSource]]
for taget[name[e]] in starred[call[name[mode].get, parameter[constant[events], list[[]]]]] begin[:]
if <ast.UnaryOp object at 0x7da1b1ff5480> begin[:]
variable[info] assign[=] call[name[CloudWatchEvents].match, parameter[name[event]]]
if name[info] begin[:]
return[call[call[name[info]][constant[ids]].search, parameter[name[event]]]]
continue
if compare[name[event_name] not_equal[!=] call[name[e].get, parameter[constant[event]]]] begin[:]
continue
if compare[name[event_source] not_equal[!=] call[name[e].get, parameter[constant[source]]]] begin[:]
continue
variable[id_query] assign[=] call[name[e].get, parameter[constant[ids]]]
if <ast.UnaryOp object at 0x7da1b1fc8f40> begin[:]
<ast.Raise object at 0x7da1b1fcb010>
variable[evt] assign[=] name[event]
if <ast.UnaryOp object at 0x7da1b1fc8430> begin[:]
variable[evt] assign[=] call[name[event].get, parameter[constant[detail], dictionary[[], []]]]
variable[resource_ids] assign[=] call[name[jmespath].search, parameter[name[id_query], name[evt]]]
if name[resource_ids] begin[:]
break
return[name[resource_ids]] | keyword[def] identifier[get_trail_ids] ( identifier[cls] , identifier[event] , identifier[mode] ):
literal[string]
identifier[resource_ids] =()
identifier[event_name] = identifier[event] [ literal[string] ][ literal[string] ]
identifier[event_source] = identifier[event] [ literal[string] ][ literal[string] ]
keyword[for] identifier[e] keyword[in] identifier[mode] . identifier[get] ( literal[string] ,[]):
keyword[if] keyword[not] identifier[isinstance] ( identifier[e] , identifier[dict] ):
identifier[info] = identifier[CloudWatchEvents] . identifier[match] ( identifier[event] )
keyword[if] identifier[info] :
keyword[return] identifier[info] [ literal[string] ]. identifier[search] ( identifier[event] )
keyword[continue]
keyword[if] identifier[event_name] != identifier[e] . identifier[get] ( literal[string] ):
keyword[continue]
keyword[if] identifier[event_source] != identifier[e] . identifier[get] ( literal[string] ):
keyword[continue]
identifier[id_query] = identifier[e] . identifier[get] ( literal[string] )
keyword[if] keyword[not] identifier[id_query] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[evt] = identifier[event]
keyword[if] keyword[not] identifier[id_query] . identifier[startswith] ( literal[string] ):
identifier[evt] = identifier[event] . identifier[get] ( literal[string] ,{})
identifier[resource_ids] = identifier[jmespath] . identifier[search] ( identifier[id_query] , identifier[evt] )
keyword[if] identifier[resource_ids] :
keyword[break]
keyword[return] identifier[resource_ids] | def get_trail_ids(cls, event, mode):
"""extract resources ids from a cloud trail event."""
resource_ids = ()
event_name = event['detail']['eventName']
event_source = event['detail']['eventSource']
for e in mode.get('events', []):
if not isinstance(e, dict):
# Check if we have a short cut / alias
info = CloudWatchEvents.match(event)
if info:
return info['ids'].search(event) # depends on [control=['if'], data=[]]
continue # depends on [control=['if'], data=[]]
if event_name != e.get('event'):
continue # depends on [control=['if'], data=[]]
if event_source != e.get('source'):
continue # depends on [control=['if'], data=[]]
id_query = e.get('ids')
if not id_query:
raise ValueError('No id query configured') # depends on [control=['if'], data=[]]
evt = event
# be forgiving for users specifying with details or without
if not id_query.startswith('detail.'):
evt = event.get('detail', {}) # depends on [control=['if'], data=[]]
resource_ids = jmespath.search(id_query, evt)
if resource_ids:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['e']]
return resource_ids |
def _locateAll_opencv(needleImage, haystackImage, grayscale=None, limit=10000, region=None, step=1,
confidence=0.999):
""" faster but more memory-intensive than pure python
step 2 skips every other row and column = ~3x faster but prone to miss;
to compensate, the algorithm automatically reduces the confidence
threshold by 5% (which helps but will not avoid all misses).
limitations:
- OpenCV 3.x & python 3.x not tested
- RGBA images are treated as RBG (ignores alpha channel)
"""
if grayscale is None:
grayscale = GRAYSCALE_DEFAULT
confidence = float(confidence)
needleImage = _load_cv2(needleImage, grayscale)
needleHeight, needleWidth = needleImage.shape[:2]
haystackImage = _load_cv2(haystackImage, grayscale)
if region:
haystackImage = haystackImage[region[1]:region[1]+region[3],
region[0]:region[0]+region[2]]
else:
region = (0, 0) # full image; these values used in the yield statement
if (haystackImage.shape[0] < needleImage.shape[0] or
haystackImage.shape[1] < needleImage.shape[1]):
# avoid semi-cryptic OpenCV error below if bad size
raise ValueError('needle dimension(s) exceed the haystack image or region dimensions')
if step == 2:
confidence *= 0.95
needleImage = needleImage[::step, ::step]
haystackImage = haystackImage[::step, ::step]
else:
step = 1
# get all matches at once, credit: https://stackoverflow.com/questions/7670112/finding-a-subimage-inside-a-numpy-image/9253805#9253805
result = cv2.matchTemplate(haystackImage, needleImage, cv2.TM_CCOEFF_NORMED)
match_indices = numpy.arange(result.size)[(result > confidence).flatten()]
matches = numpy.unravel_index(match_indices[:limit], result.shape)
if len(matches[0]) == 0:
if USE_IMAGE_NOT_FOUND_EXCEPTION:
raise ImageNotFoundException('Could not locate the image (highest confidence = %.3f)' % result.max())
else:
return None
# use a generator for API consistency:
matchx = matches[1] * step + region[0] # vectorized
matchy = matches[0] * step + region[1]
for x, y in zip(matchx, matchy):
yield Box(x, y, needleWidth, needleHeight) | def function[_locateAll_opencv, parameter[needleImage, haystackImage, grayscale, limit, region, step, confidence]]:
constant[ faster but more memory-intensive than pure python
step 2 skips every other row and column = ~3x faster but prone to miss;
to compensate, the algorithm automatically reduces the confidence
threshold by 5% (which helps but will not avoid all misses).
limitations:
- OpenCV 3.x & python 3.x not tested
- RGBA images are treated as RBG (ignores alpha channel)
]
if compare[name[grayscale] is constant[None]] begin[:]
variable[grayscale] assign[=] name[GRAYSCALE_DEFAULT]
variable[confidence] assign[=] call[name[float], parameter[name[confidence]]]
variable[needleImage] assign[=] call[name[_load_cv2], parameter[name[needleImage], name[grayscale]]]
<ast.Tuple object at 0x7da18dc05030> assign[=] call[name[needleImage].shape][<ast.Slice object at 0x7da18dc06530>]
variable[haystackImage] assign[=] call[name[_load_cv2], parameter[name[haystackImage], name[grayscale]]]
if name[region] begin[:]
variable[haystackImage] assign[=] call[name[haystackImage]][tuple[[<ast.Slice object at 0x7da18dc05000>, <ast.Slice object at 0x7da20c6e49d0>]]]
if <ast.BoolOp object at 0x7da20c6e5420> begin[:]
<ast.Raise object at 0x7da20c6e7ee0>
if compare[name[step] equal[==] constant[2]] begin[:]
<ast.AugAssign object at 0x7da20c6e7dc0>
variable[needleImage] assign[=] call[name[needleImage]][tuple[[<ast.Slice object at 0x7da20c6e4af0>, <ast.Slice object at 0x7da20c6e6320>]]]
variable[haystackImage] assign[=] call[name[haystackImage]][tuple[[<ast.Slice object at 0x7da18bcc91e0>, <ast.Slice object at 0x7da18bccaf20>]]]
variable[result] assign[=] call[name[cv2].matchTemplate, parameter[name[haystackImage], name[needleImage], name[cv2].TM_CCOEFF_NORMED]]
variable[match_indices] assign[=] call[call[name[numpy].arange, parameter[name[result].size]]][call[compare[name[result] greater[>] name[confidence]].flatten, parameter[]]]
variable[matches] assign[=] call[name[numpy].unravel_index, parameter[call[name[match_indices]][<ast.Slice object at 0x7da18dc04d30>], name[result].shape]]
if compare[call[name[len], parameter[call[name[matches]][constant[0]]]] equal[==] constant[0]] begin[:]
if name[USE_IMAGE_NOT_FOUND_EXCEPTION] begin[:]
<ast.Raise object at 0x7da18dc078e0>
variable[matchx] assign[=] binary_operation[binary_operation[call[name[matches]][constant[1]] * name[step]] + call[name[region]][constant[0]]]
variable[matchy] assign[=] binary_operation[binary_operation[call[name[matches]][constant[0]] * name[step]] + call[name[region]][constant[1]]]
for taget[tuple[[<ast.Name object at 0x7da18dc074f0>, <ast.Name object at 0x7da18dc06230>]]] in starred[call[name[zip], parameter[name[matchx], name[matchy]]]] begin[:]
<ast.Yield object at 0x7da18dc04040> | keyword[def] identifier[_locateAll_opencv] ( identifier[needleImage] , identifier[haystackImage] , identifier[grayscale] = keyword[None] , identifier[limit] = literal[int] , identifier[region] = keyword[None] , identifier[step] = literal[int] ,
identifier[confidence] = literal[int] ):
literal[string]
keyword[if] identifier[grayscale] keyword[is] keyword[None] :
identifier[grayscale] = identifier[GRAYSCALE_DEFAULT]
identifier[confidence] = identifier[float] ( identifier[confidence] )
identifier[needleImage] = identifier[_load_cv2] ( identifier[needleImage] , identifier[grayscale] )
identifier[needleHeight] , identifier[needleWidth] = identifier[needleImage] . identifier[shape] [: literal[int] ]
identifier[haystackImage] = identifier[_load_cv2] ( identifier[haystackImage] , identifier[grayscale] )
keyword[if] identifier[region] :
identifier[haystackImage] = identifier[haystackImage] [ identifier[region] [ literal[int] ]: identifier[region] [ literal[int] ]+ identifier[region] [ literal[int] ],
identifier[region] [ literal[int] ]: identifier[region] [ literal[int] ]+ identifier[region] [ literal[int] ]]
keyword[else] :
identifier[region] =( literal[int] , literal[int] )
keyword[if] ( identifier[haystackImage] . identifier[shape] [ literal[int] ]< identifier[needleImage] . identifier[shape] [ literal[int] ] keyword[or]
identifier[haystackImage] . identifier[shape] [ literal[int] ]< identifier[needleImage] . identifier[shape] [ literal[int] ]):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[step] == literal[int] :
identifier[confidence] *= literal[int]
identifier[needleImage] = identifier[needleImage] [:: identifier[step] ,:: identifier[step] ]
identifier[haystackImage] = identifier[haystackImage] [:: identifier[step] ,:: identifier[step] ]
keyword[else] :
identifier[step] = literal[int]
identifier[result] = identifier[cv2] . identifier[matchTemplate] ( identifier[haystackImage] , identifier[needleImage] , identifier[cv2] . identifier[TM_CCOEFF_NORMED] )
identifier[match_indices] = identifier[numpy] . identifier[arange] ( identifier[result] . identifier[size] )[( identifier[result] > identifier[confidence] ). identifier[flatten] ()]
identifier[matches] = identifier[numpy] . identifier[unravel_index] ( identifier[match_indices] [: identifier[limit] ], identifier[result] . identifier[shape] )
keyword[if] identifier[len] ( identifier[matches] [ literal[int] ])== literal[int] :
keyword[if] identifier[USE_IMAGE_NOT_FOUND_EXCEPTION] :
keyword[raise] identifier[ImageNotFoundException] ( literal[string] % identifier[result] . identifier[max] ())
keyword[else] :
keyword[return] keyword[None]
identifier[matchx] = identifier[matches] [ literal[int] ]* identifier[step] + identifier[region] [ literal[int] ]
identifier[matchy] = identifier[matches] [ literal[int] ]* identifier[step] + identifier[region] [ literal[int] ]
keyword[for] identifier[x] , identifier[y] keyword[in] identifier[zip] ( identifier[matchx] , identifier[matchy] ):
keyword[yield] identifier[Box] ( identifier[x] , identifier[y] , identifier[needleWidth] , identifier[needleHeight] ) | def _locateAll_opencv(needleImage, haystackImage, grayscale=None, limit=10000, region=None, step=1, confidence=0.999):
""" faster but more memory-intensive than pure python
step 2 skips every other row and column = ~3x faster but prone to miss;
to compensate, the algorithm automatically reduces the confidence
threshold by 5% (which helps but will not avoid all misses).
limitations:
- OpenCV 3.x & python 3.x not tested
- RGBA images are treated as RBG (ignores alpha channel)
"""
if grayscale is None:
grayscale = GRAYSCALE_DEFAULT # depends on [control=['if'], data=['grayscale']]
confidence = float(confidence)
needleImage = _load_cv2(needleImage, grayscale)
(needleHeight, needleWidth) = needleImage.shape[:2]
haystackImage = _load_cv2(haystackImage, grayscale)
if region:
haystackImage = haystackImage[region[1]:region[1] + region[3], region[0]:region[0] + region[2]] # depends on [control=['if'], data=[]]
else:
region = (0, 0) # full image; these values used in the yield statement
if haystackImage.shape[0] < needleImage.shape[0] or haystackImage.shape[1] < needleImage.shape[1]:
# avoid semi-cryptic OpenCV error below if bad size
raise ValueError('needle dimension(s) exceed the haystack image or region dimensions') # depends on [control=['if'], data=[]]
if step == 2:
confidence *= 0.95
needleImage = needleImage[::step, ::step]
haystackImage = haystackImage[::step, ::step] # depends on [control=['if'], data=['step']]
else:
step = 1
# get all matches at once, credit: https://stackoverflow.com/questions/7670112/finding-a-subimage-inside-a-numpy-image/9253805#9253805
result = cv2.matchTemplate(haystackImage, needleImage, cv2.TM_CCOEFF_NORMED)
match_indices = numpy.arange(result.size)[(result > confidence).flatten()]
matches = numpy.unravel_index(match_indices[:limit], result.shape)
if len(matches[0]) == 0:
if USE_IMAGE_NOT_FOUND_EXCEPTION:
raise ImageNotFoundException('Could not locate the image (highest confidence = %.3f)' % result.max()) # depends on [control=['if'], data=[]]
else:
return None # depends on [control=['if'], data=[]]
# use a generator for API consistency:
matchx = matches[1] * step + region[0] # vectorized
matchy = matches[0] * step + region[1]
for (x, y) in zip(matchx, matchy):
yield Box(x, y, needleWidth, needleHeight) # depends on [control=['for'], data=[]] |
def iter_sprite_scripts(scratch):
"""A generator for all scripts contained in a scratch file.
yields stage scripts first, then scripts for each sprite
"""
for script in scratch.stage.scripts:
if not isinstance(script, kurt.Comment):
yield ('Stage', script)
for sprite in scratch.sprites:
for script in sprite.scripts:
if not isinstance(script, kurt.Comment):
yield (sprite.name, script) | def function[iter_sprite_scripts, parameter[scratch]]:
constant[A generator for all scripts contained in a scratch file.
yields stage scripts first, then scripts for each sprite
]
for taget[name[script]] in starred[name[scratch].stage.scripts] begin[:]
if <ast.UnaryOp object at 0x7da18f58e860> begin[:]
<ast.Yield object at 0x7da1b0ff1000>
for taget[name[sprite]] in starred[name[scratch].sprites] begin[:]
for taget[name[script]] in starred[name[sprite].scripts] begin[:]
if <ast.UnaryOp object at 0x7da1b0ff3f70> begin[:]
<ast.Yield object at 0x7da1b0ff2d40> | keyword[def] identifier[iter_sprite_scripts] ( identifier[scratch] ):
literal[string]
keyword[for] identifier[script] keyword[in] identifier[scratch] . identifier[stage] . identifier[scripts] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[script] , identifier[kurt] . identifier[Comment] ):
keyword[yield] ( literal[string] , identifier[script] )
keyword[for] identifier[sprite] keyword[in] identifier[scratch] . identifier[sprites] :
keyword[for] identifier[script] keyword[in] identifier[sprite] . identifier[scripts] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[script] , identifier[kurt] . identifier[Comment] ):
keyword[yield] ( identifier[sprite] . identifier[name] , identifier[script] ) | def iter_sprite_scripts(scratch):
"""A generator for all scripts contained in a scratch file.
yields stage scripts first, then scripts for each sprite
"""
for script in scratch.stage.scripts:
if not isinstance(script, kurt.Comment):
yield ('Stage', script) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['script']]
for sprite in scratch.sprites:
for script in sprite.scripts:
if not isinstance(script, kurt.Comment):
yield (sprite.name, script) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['script']] # depends on [control=['for'], data=['sprite']] |
def spherical_to_cartesian(r,theta,phi):
"""
Simple conversion of spherical to cartesian coordinates
Args:
r,theta,phi = scalar spherical coordinates
Returns:
x,y,z = scalar cartesian coordinates
"""
x = r * np.sin(phi) * np.cos(theta)
y = r * np.sin(phi) * np.sin(theta)
z = r * np.cos(phi)
return (x,y,z) | def function[spherical_to_cartesian, parameter[r, theta, phi]]:
constant[
Simple conversion of spherical to cartesian coordinates
Args:
r,theta,phi = scalar spherical coordinates
Returns:
x,y,z = scalar cartesian coordinates
]
variable[x] assign[=] binary_operation[binary_operation[name[r] * call[name[np].sin, parameter[name[phi]]]] * call[name[np].cos, parameter[name[theta]]]]
variable[y] assign[=] binary_operation[binary_operation[name[r] * call[name[np].sin, parameter[name[phi]]]] * call[name[np].sin, parameter[name[theta]]]]
variable[z] assign[=] binary_operation[name[r] * call[name[np].cos, parameter[name[phi]]]]
return[tuple[[<ast.Name object at 0x7da1b113f580>, <ast.Name object at 0x7da1b113f460>, <ast.Name object at 0x7da1b113f550>]]] | keyword[def] identifier[spherical_to_cartesian] ( identifier[r] , identifier[theta] , identifier[phi] ):
literal[string]
identifier[x] = identifier[r] * identifier[np] . identifier[sin] ( identifier[phi] )* identifier[np] . identifier[cos] ( identifier[theta] )
identifier[y] = identifier[r] * identifier[np] . identifier[sin] ( identifier[phi] )* identifier[np] . identifier[sin] ( identifier[theta] )
identifier[z] = identifier[r] * identifier[np] . identifier[cos] ( identifier[phi] )
keyword[return] ( identifier[x] , identifier[y] , identifier[z] ) | def spherical_to_cartesian(r, theta, phi):
"""
Simple conversion of spherical to cartesian coordinates
Args:
r,theta,phi = scalar spherical coordinates
Returns:
x,y,z = scalar cartesian coordinates
"""
x = r * np.sin(phi) * np.cos(theta)
y = r * np.sin(phi) * np.sin(theta)
z = r * np.cos(phi)
return (x, y, z) |
def _ltu16(ins):
''' Compares & pops top 2 operands out of the stack, and checks
if the 1st operand < 2nd operand (top of the stack).
Pushes 0 if False, 1 if True.
16 bit unsigned version
'''
output = _16bit_oper(ins.quad[2], ins.quad[3])
output.append('or a')
output.append('sbc hl, de')
output.append('sbc a, a')
output.append('push af')
return output | def function[_ltu16, parameter[ins]]:
constant[ Compares & pops top 2 operands out of the stack, and checks
if the 1st operand < 2nd operand (top of the stack).
Pushes 0 if False, 1 if True.
16 bit unsigned version
]
variable[output] assign[=] call[name[_16bit_oper], parameter[call[name[ins].quad][constant[2]], call[name[ins].quad][constant[3]]]]
call[name[output].append, parameter[constant[or a]]]
call[name[output].append, parameter[constant[sbc hl, de]]]
call[name[output].append, parameter[constant[sbc a, a]]]
call[name[output].append, parameter[constant[push af]]]
return[name[output]] | keyword[def] identifier[_ltu16] ( identifier[ins] ):
literal[string]
identifier[output] = identifier[_16bit_oper] ( identifier[ins] . identifier[quad] [ literal[int] ], identifier[ins] . identifier[quad] [ literal[int] ])
identifier[output] . identifier[append] ( literal[string] )
identifier[output] . identifier[append] ( literal[string] )
identifier[output] . identifier[append] ( literal[string] )
identifier[output] . identifier[append] ( literal[string] )
keyword[return] identifier[output] | def _ltu16(ins):
""" Compares & pops top 2 operands out of the stack, and checks
if the 1st operand < 2nd operand (top of the stack).
Pushes 0 if False, 1 if True.
16 bit unsigned version
"""
output = _16bit_oper(ins.quad[2], ins.quad[3])
output.append('or a')
output.append('sbc hl, de')
output.append('sbc a, a')
output.append('push af')
return output |
def get_intersections_with_segments(self):
"""
Return a list of unordered intersection '(point, segment)' pairs,
where segments may contain 2 or more values.
"""
if Real is float:
return [
(p, [event.segment for event in event_set])
for p, event_set in self.intersections.items()
]
else:
return [
(
(float(p[0]), float(p[1])),
[((float(event.segment[0][0]), float(event.segment[0][1])),
(float(event.segment[1][0]), float(event.segment[1][1])))
for event in event_set],
)
for p, event_set in self.intersections.items()
] | def function[get_intersections_with_segments, parameter[self]]:
constant[
Return a list of unordered intersection '(point, segment)' pairs,
where segments may contain 2 or more values.
]
if compare[name[Real] is name[float]] begin[:]
return[<ast.ListComp object at 0x7da20c7cbdc0>] | keyword[def] identifier[get_intersections_with_segments] ( identifier[self] ):
literal[string]
keyword[if] identifier[Real] keyword[is] identifier[float] :
keyword[return] [
( identifier[p] ,[ identifier[event] . identifier[segment] keyword[for] identifier[event] keyword[in] identifier[event_set] ])
keyword[for] identifier[p] , identifier[event_set] keyword[in] identifier[self] . identifier[intersections] . identifier[items] ()
]
keyword[else] :
keyword[return] [
(
( identifier[float] ( identifier[p] [ literal[int] ]), identifier[float] ( identifier[p] [ literal[int] ])),
[(( identifier[float] ( identifier[event] . identifier[segment] [ literal[int] ][ literal[int] ]), identifier[float] ( identifier[event] . identifier[segment] [ literal[int] ][ literal[int] ])),
( identifier[float] ( identifier[event] . identifier[segment] [ literal[int] ][ literal[int] ]), identifier[float] ( identifier[event] . identifier[segment] [ literal[int] ][ literal[int] ])))
keyword[for] identifier[event] keyword[in] identifier[event_set] ],
)
keyword[for] identifier[p] , identifier[event_set] keyword[in] identifier[self] . identifier[intersections] . identifier[items] ()
] | def get_intersections_with_segments(self):
"""
Return a list of unordered intersection '(point, segment)' pairs,
where segments may contain 2 or more values.
"""
if Real is float:
return [(p, [event.segment for event in event_set]) for (p, event_set) in self.intersections.items()] # depends on [control=['if'], data=[]]
else:
return [((float(p[0]), float(p[1])), [((float(event.segment[0][0]), float(event.segment[0][1])), (float(event.segment[1][0]), float(event.segment[1][1]))) for event in event_set]) for (p, event_set) in self.intersections.items()] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.