Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
3,800 | OSSOS/MOP | src/ossos/plotting/scripts/plot_aq.py | parse_nate_sims | def parse_nate_sims(path):
'''
parts0.dat) contains the id number, particle fraction (ignore) a, ecc, inc, long. asc., arg. per, and mean anomaly
for every particle in the simulation at t=0.
The second (parts3999.dat) contains the same info at t=3.999 Gyrs for these particles.
:return:
'''
zerostate = pandas.read_table(path + 'parts0.dat', delim_whitespace=True)
endstate = pandas.read_table(path + 'parts3999.dat', delim_whitespace=True)
# add perihelion
zerostate['q'] = zerostate['a'] * (1 - zerostate['e'])
endstate['q'] = endstate['a'] * (1 - endstate['e'])
return zerostate, endstate | python | def parse_nate_sims(path):
'''
parts0.dat) contains the id number, particle fraction (ignore) a, ecc, inc, long. asc., arg. per, and mean anomaly
for every particle in the simulation at t=0.
The second (parts3999.dat) contains the same info at t=3.999 Gyrs for these particles.
:return:
'''
zerostate = pandas.read_table(path + 'parts0.dat', delim_whitespace=True)
endstate = pandas.read_table(path + 'parts3999.dat', delim_whitespace=True)
# add perihelion
zerostate['q'] = zerostate['a'] * (1 - zerostate['e'])
endstate['q'] = endstate['a'] * (1 - endstate['e'])
return zerostate, endstate | ['def', 'parse_nate_sims', '(', 'path', ')', ':', 'zerostate', '=', 'pandas', '.', 'read_table', '(', 'path', '+', "'parts0.dat'", ',', 'delim_whitespace', '=', 'True', ')', 'endstate', '=', 'pandas', '.', 'read_table', '(', 'path', '+', "'parts3999.dat'", ',', 'delim_whitespace', '=', 'True', ')', '# add perihelion', 'zerostate', '[', "'q'", ']', '=', 'zerostate', '[', "'a'", ']', '*', '(', '1', '-', 'zerostate', '[', "'e'", ']', ')', 'endstate', '[', "'q'", ']', '=', 'endstate', '[', "'a'", ']', '*', '(', '1', '-', 'endstate', '[', "'e'", ']', ')', 'return', 'zerostate', ',', 'endstate'] | parts0.dat) contains the id number, particle fraction (ignore) a, ecc, inc, long. asc., arg. per, and mean anomaly
for every particle in the simulation at t=0.
The second (parts3999.dat) contains the same info at t=3.999 Gyrs for these particles.
:return: | ['parts0', '.', 'dat', ')', 'contains', 'the', 'id', 'number', 'particle', 'fraction', '(', 'ignore', ')', 'a', 'ecc', 'inc', 'long', '.', 'asc', '.', 'arg', '.', 'per', 'and', 'mean', 'anomaly', 'for', 'every', 'particle', 'in', 'the', 'simulation', 'at', 't', '=', '0', '.', 'The', 'second', '(', 'parts3999', '.', 'dat', ')', 'contains', 'the', 'same', 'info', 'at', 't', '=', '3', '.', '999', 'Gyrs', 'for', 'these', 'particles', '.'] | train | https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/plotting/scripts/plot_aq.py#L17-L32 |
3,801 | scottjbarr/bitfinex | bitfinex/client.py | Client.order_book | def order_book(self, symbol, parameters=None):
"""
curl "https://api.bitfinex.com/v1/book/btcusd"
{"bids":[{"price":"561.1101","amount":"0.985","timestamp":"1395557729.0"}],"asks":[{"price":"562.9999","amount":"0.985","timestamp":"1395557711.0"}]}
The 'bids' and 'asks' arrays will have multiple bid and ask dicts.
Optional parameters
limit_bids (int): Optional. Limit the number of bids returned. May be 0 in which case the array of bids is empty. Default is 50.
limit_asks (int): Optional. Limit the number of asks returned. May be 0 in which case the array of asks is empty. Default is 50.
eg.
curl "https://api.bitfinex.com/v1/book/btcusd?limit_bids=1&limit_asks=0"
{"bids":[{"price":"561.1101","amount":"0.985","timestamp":"1395557729.0"}],"asks":[]}
"""
data = self._get(self.url_for(PATH_ORDERBOOK, path_arg=symbol, parameters=parameters))
for type_ in data.keys():
for list_ in data[type_]:
for key, value in list_.items():
list_[key] = float(value)
return data | python | def order_book(self, symbol, parameters=None):
"""
curl "https://api.bitfinex.com/v1/book/btcusd"
{"bids":[{"price":"561.1101","amount":"0.985","timestamp":"1395557729.0"}],"asks":[{"price":"562.9999","amount":"0.985","timestamp":"1395557711.0"}]}
The 'bids' and 'asks' arrays will have multiple bid and ask dicts.
Optional parameters
limit_bids (int): Optional. Limit the number of bids returned. May be 0 in which case the array of bids is empty. Default is 50.
limit_asks (int): Optional. Limit the number of asks returned. May be 0 in which case the array of asks is empty. Default is 50.
eg.
curl "https://api.bitfinex.com/v1/book/btcusd?limit_bids=1&limit_asks=0"
{"bids":[{"price":"561.1101","amount":"0.985","timestamp":"1395557729.0"}],"asks":[]}
"""
data = self._get(self.url_for(PATH_ORDERBOOK, path_arg=symbol, parameters=parameters))
for type_ in data.keys():
for list_ in data[type_]:
for key, value in list_.items():
list_[key] = float(value)
return data | ['def', 'order_book', '(', 'self', ',', 'symbol', ',', 'parameters', '=', 'None', ')', ':', 'data', '=', 'self', '.', '_get', '(', 'self', '.', 'url_for', '(', 'PATH_ORDERBOOK', ',', 'path_arg', '=', 'symbol', ',', 'parameters', '=', 'parameters', ')', ')', 'for', 'type_', 'in', 'data', '.', 'keys', '(', ')', ':', 'for', 'list_', 'in', 'data', '[', 'type_', ']', ':', 'for', 'key', ',', 'value', 'in', 'list_', '.', 'items', '(', ')', ':', 'list_', '[', 'key', ']', '=', 'float', '(', 'value', ')', 'return', 'data'] | curl "https://api.bitfinex.com/v1/book/btcusd"
{"bids":[{"price":"561.1101","amount":"0.985","timestamp":"1395557729.0"}],"asks":[{"price":"562.9999","amount":"0.985","timestamp":"1395557711.0"}]}
The 'bids' and 'asks' arrays will have multiple bid and ask dicts.
Optional parameters
limit_bids (int): Optional. Limit the number of bids returned. May be 0 in which case the array of bids is empty. Default is 50.
limit_asks (int): Optional. Limit the number of asks returned. May be 0 in which case the array of asks is empty. Default is 50.
eg.
curl "https://api.bitfinex.com/v1/book/btcusd?limit_bids=1&limit_asks=0"
{"bids":[{"price":"561.1101","amount":"0.985","timestamp":"1395557729.0"}],"asks":[]} | ['curl', 'https', ':', '//', 'api', '.', 'bitfinex', '.', 'com', '/', 'v1', '/', 'book', '/', 'btcusd'] | train | https://github.com/scottjbarr/bitfinex/blob/03f7c71615fe38c2e28be0ebb761d3106ef0a51a/bitfinex/client.py#L469-L494 |
3,802 | pypa/pipenv | pipenv/vendor/dotenv/environ.py | _cast_boolean | def _cast_boolean(value):
"""
Helper to convert config values to boolean as ConfigParser do.
"""
_BOOLEANS = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False, '': False}
value = str(value)
if value.lower() not in _BOOLEANS:
raise ValueError('Not a boolean: %s' % value)
return _BOOLEANS[value.lower()] | python | def _cast_boolean(value):
"""
Helper to convert config values to boolean as ConfigParser do.
"""
_BOOLEANS = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False, '': False}
value = str(value)
if value.lower() not in _BOOLEANS:
raise ValueError('Not a boolean: %s' % value)
return _BOOLEANS[value.lower()] | ['def', '_cast_boolean', '(', 'value', ')', ':', '_BOOLEANS', '=', '{', "'1'", ':', 'True', ',', "'yes'", ':', 'True', ',', "'true'", ':', 'True', ',', "'on'", ':', 'True', ',', "'0'", ':', 'False', ',', "'no'", ':', 'False', ',', "'false'", ':', 'False', ',', "'off'", ':', 'False', ',', "''", ':', 'False', '}', 'value', '=', 'str', '(', 'value', ')', 'if', 'value', '.', 'lower', '(', ')', 'not', 'in', '_BOOLEANS', ':', 'raise', 'ValueError', '(', "'Not a boolean: %s'", '%', 'value', ')', 'return', '_BOOLEANS', '[', 'value', '.', 'lower', '(', ')', ']'] | Helper to convert config values to boolean as ConfigParser do. | ['Helper', 'to', 'convert', 'config', 'values', 'to', 'boolean', 'as', 'ConfigParser', 'do', '.'] | train | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/dotenv/environ.py#L17-L27 |
3,803 | StackStorm/pybind | pybind/nos/v7_2_0/__init__.py | brocade_vswitch._set_vcenter | def _set_vcenter(self, v, load=False):
"""
Setter method for vcenter, mapped from YANG variable /vcenter (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_vcenter is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vcenter() directly.
YANG Description: vCenter
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("id",vcenter.vcenter, yang_name="vcenter", rest_name="vcenter", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='id', extensions={u'tailf-common': {u'info': u'vCenter Configuration', u'cli-suppress-mode': None, u'sort-priority': u'88', u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'vcenter_callpoint'}}), is_container='list', yang_name="vcenter", rest_name="vcenter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'vCenter Configuration', u'cli-suppress-mode': None, u'sort-priority': u'88', u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'vcenter_callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vcenter must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("id",vcenter.vcenter, yang_name="vcenter", rest_name="vcenter", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='id', extensions={u'tailf-common': {u'info': u'vCenter Configuration', u'cli-suppress-mode': None, u'sort-priority': u'88', u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'vcenter_callpoint'}}), is_container='list', yang_name="vcenter", rest_name="vcenter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'vCenter Configuration', u'cli-suppress-mode': None, u'sort-priority': u'88', u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'vcenter_callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='list', is_config=True)""",
})
self.__vcenter = t
if hasattr(self, '_set'):
self._set() | python | def _set_vcenter(self, v, load=False):
"""
Setter method for vcenter, mapped from YANG variable /vcenter (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_vcenter is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vcenter() directly.
YANG Description: vCenter
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("id",vcenter.vcenter, yang_name="vcenter", rest_name="vcenter", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='id', extensions={u'tailf-common': {u'info': u'vCenter Configuration', u'cli-suppress-mode': None, u'sort-priority': u'88', u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'vcenter_callpoint'}}), is_container='list', yang_name="vcenter", rest_name="vcenter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'vCenter Configuration', u'cli-suppress-mode': None, u'sort-priority': u'88', u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'vcenter_callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vcenter must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("id",vcenter.vcenter, yang_name="vcenter", rest_name="vcenter", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='id', extensions={u'tailf-common': {u'info': u'vCenter Configuration', u'cli-suppress-mode': None, u'sort-priority': u'88', u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'vcenter_callpoint'}}), is_container='list', yang_name="vcenter", rest_name="vcenter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'vCenter Configuration', u'cli-suppress-mode': None, u'sort-priority': u'88', u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'vcenter_callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='list', is_config=True)""",
})
self.__vcenter = t
if hasattr(self, '_set'):
self._set() | ['def', '_set_vcenter', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'YANGListType', '(', '"id"', ',', 'vcenter', '.', 'vcenter', ',', 'yang_name', '=', '"vcenter"', ',', 'rest_name', '=', '"vcenter"', ',', 'parent', '=', 'self', ',', 'is_container', '=', "'list'", ',', 'user_ordered', '=', 'False', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'yang_keys', '=', "'id'", ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'vCenter Configuration'", ',', "u'cli-suppress-mode'", ':', 'None', ',', "u'sort-priority'", ':', "u'88'", ',', "u'cli-suppress-list-no'", ':', 'None', ',', "u'cli-suppress-key-abbreviation'", ':', 'None', ',', "u'cli-incomplete-command'", ':', 'None', ',', "u'callpoint'", ':', "u'vcenter_callpoint'", '}', '}', ')', ',', 'is_container', '=', "'list'", ',', 'yang_name', '=', '"vcenter"', ',', 'rest_name', '=', '"vcenter"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'vCenter Configuration'", ',', "u'cli-suppress-mode'", ':', 'None', ',', "u'sort-priority'", ':', "u'88'", ',', "u'cli-suppress-list-no'", ':', 'None', ',', "u'cli-suppress-key-abbreviation'", ':', 'None', ',', "u'cli-incomplete-command'", ':', 'None', ',', "u'callpoint'", ':', "u'vcenter_callpoint'", '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-vswitch'", ',', 'defining_module', '=', "'brocade-vswitch'", ',', 'yang_type', '=', "'list'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""vcenter must be of a type compatible with list"""', ',', "'defined-type'", ':', '"list"', ',', "'generated-type'", ':', '"""YANGDynClass(base=YANGListType("id",vcenter.vcenter, yang_name="vcenter", rest_name="vcenter", parent=self, is_container=\'list\', user_ordered=False, path_helper=self._path_helper, yang_keys=\'id\', extensions={u\'tailf-common\': {u\'info\': u\'vCenter Configuration\', u\'cli-suppress-mode\': None, u\'sort-priority\': u\'88\', u\'cli-suppress-list-no\': None, u\'cli-suppress-key-abbreviation\': None, u\'cli-incomplete-command\': None, u\'callpoint\': u\'vcenter_callpoint\'}}), is_container=\'list\', yang_name="vcenter", rest_name="vcenter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'vCenter Configuration\', u\'cli-suppress-mode\': None, u\'sort-priority\': u\'88\', u\'cli-suppress-list-no\': None, u\'cli-suppress-key-abbreviation\': None, u\'cli-incomplete-command\': None, u\'callpoint\': u\'vcenter_callpoint\'}}, namespace=\'urn:brocade.com:mgmt:brocade-vswitch\', defining_module=\'brocade-vswitch\', yang_type=\'list\', is_config=True)"""', ',', '}', ')', 'self', '.', '__vcenter', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')'] | Setter method for vcenter, mapped from YANG variable /vcenter (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_vcenter is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vcenter() directly.
YANG Description: vCenter | ['Setter', 'method', 'for', 'vcenter', 'mapped', 'from', 'YANG', 'variable', '/', 'vcenter', '(', 'list', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_vcenter', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_vcenter', '()', 'directly', '.'] | train | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v7_2_0/__init__.py#L4868-L4891 |
3,804 | theislab/anndata | anndata/base.py | AnnData._to_dict_fixed_width_arrays | def _to_dict_fixed_width_arrays(self, var_len_str=True):
"""A dict of arrays that stores data and annotation.
It is sufficient for reconstructing the object.
"""
self.strings_to_categoricals()
obs_rec, uns_obs = df_to_records_fixed_width(self._obs, var_len_str)
var_rec, uns_var = df_to_records_fixed_width(self._var, var_len_str)
layers = self.layers.as_dict()
d = {
'X': self._X,
'obs': obs_rec,
'var': var_rec,
'obsm': self._obsm,
'varm': self._varm,
'layers': layers,
# add the categories to the unstructured annotation
'uns': {**self._uns, **uns_obs, **uns_var}}
if self.raw is not None:
self.strings_to_categoricals(self.raw._var)
var_rec, uns_var = df_to_records_fixed_width(self.raw._var, var_len_str)
d['raw.X'] = self.raw.X
d['raw.var'] = var_rec
d['raw.varm'] = self.raw.varm
d['raw.cat'] = uns_var
return d | python | def _to_dict_fixed_width_arrays(self, var_len_str=True):
"""A dict of arrays that stores data and annotation.
It is sufficient for reconstructing the object.
"""
self.strings_to_categoricals()
obs_rec, uns_obs = df_to_records_fixed_width(self._obs, var_len_str)
var_rec, uns_var = df_to_records_fixed_width(self._var, var_len_str)
layers = self.layers.as_dict()
d = {
'X': self._X,
'obs': obs_rec,
'var': var_rec,
'obsm': self._obsm,
'varm': self._varm,
'layers': layers,
# add the categories to the unstructured annotation
'uns': {**self._uns, **uns_obs, **uns_var}}
if self.raw is not None:
self.strings_to_categoricals(self.raw._var)
var_rec, uns_var = df_to_records_fixed_width(self.raw._var, var_len_str)
d['raw.X'] = self.raw.X
d['raw.var'] = var_rec
d['raw.varm'] = self.raw.varm
d['raw.cat'] = uns_var
return d | ['def', '_to_dict_fixed_width_arrays', '(', 'self', ',', 'var_len_str', '=', 'True', ')', ':', 'self', '.', 'strings_to_categoricals', '(', ')', 'obs_rec', ',', 'uns_obs', '=', 'df_to_records_fixed_width', '(', 'self', '.', '_obs', ',', 'var_len_str', ')', 'var_rec', ',', 'uns_var', '=', 'df_to_records_fixed_width', '(', 'self', '.', '_var', ',', 'var_len_str', ')', 'layers', '=', 'self', '.', 'layers', '.', 'as_dict', '(', ')', 'd', '=', '{', "'X'", ':', 'self', '.', '_X', ',', "'obs'", ':', 'obs_rec', ',', "'var'", ':', 'var_rec', ',', "'obsm'", ':', 'self', '.', '_obsm', ',', "'varm'", ':', 'self', '.', '_varm', ',', "'layers'", ':', 'layers', ',', '# add the categories to the unstructured annotation', "'uns'", ':', '{', '*', '*', 'self', '.', '_uns', ',', '*', '*', 'uns_obs', ',', '*', '*', 'uns_var', '}', '}', 'if', 'self', '.', 'raw', 'is', 'not', 'None', ':', 'self', '.', 'strings_to_categoricals', '(', 'self', '.', 'raw', '.', '_var', ')', 'var_rec', ',', 'uns_var', '=', 'df_to_records_fixed_width', '(', 'self', '.', 'raw', '.', '_var', ',', 'var_len_str', ')', 'd', '[', "'raw.X'", ']', '=', 'self', '.', 'raw', '.', 'X', 'd', '[', "'raw.var'", ']', '=', 'var_rec', 'd', '[', "'raw.varm'", ']', '=', 'self', '.', 'raw', '.', 'varm', 'd', '[', "'raw.cat'", ']', '=', 'uns_var', 'return', 'd'] | A dict of arrays that stores data and annotation.
It is sufficient for reconstructing the object. | ['A', 'dict', 'of', 'arrays', 'that', 'stores', 'data', 'and', 'annotation', '.'] | train | https://github.com/theislab/anndata/blob/34f4eb63710628fbc15e7050e5efcac1d7806062/anndata/base.py#L2239-L2266 |
3,805 | twisted/txacme | src/txacme/client.py | Client._check_regr | def _check_regr(self, regr, new_reg):
"""
Check that a registration response contains the registration we were
expecting.
"""
body = getattr(new_reg, 'body', new_reg)
for k, v in body.items():
if k == 'resource' or not v:
continue
if regr.body[k] != v:
raise errors.UnexpectedUpdate(regr)
if regr.body.key != self.key.public_key():
raise errors.UnexpectedUpdate(regr)
return regr | python | def _check_regr(self, regr, new_reg):
"""
Check that a registration response contains the registration we were
expecting.
"""
body = getattr(new_reg, 'body', new_reg)
for k, v in body.items():
if k == 'resource' or not v:
continue
if regr.body[k] != v:
raise errors.UnexpectedUpdate(regr)
if regr.body.key != self.key.public_key():
raise errors.UnexpectedUpdate(regr)
return regr | ['def', '_check_regr', '(', 'self', ',', 'regr', ',', 'new_reg', ')', ':', 'body', '=', 'getattr', '(', 'new_reg', ',', "'body'", ',', 'new_reg', ')', 'for', 'k', ',', 'v', 'in', 'body', '.', 'items', '(', ')', ':', 'if', 'k', '==', "'resource'", 'or', 'not', 'v', ':', 'continue', 'if', 'regr', '.', 'body', '[', 'k', ']', '!=', 'v', ':', 'raise', 'errors', '.', 'UnexpectedUpdate', '(', 'regr', ')', 'if', 'regr', '.', 'body', '.', 'key', '!=', 'self', '.', 'key', '.', 'public_key', '(', ')', ':', 'raise', 'errors', '.', 'UnexpectedUpdate', '(', 'regr', ')', 'return', 'regr'] | Check that a registration response contains the registration we were
expecting. | ['Check', 'that', 'a', 'registration', 'response', 'contains', 'the', 'registration', 'we', 'were', 'expecting', '.'] | train | https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L253-L266 |
3,806 | signetlabdei/sem | sem/manager.py | CampaignManager.save_to_folders | def save_to_folders(self, parameter_space, folder_name, runs):
"""
Save results to a folder structure.
"""
self.space_to_folders(self.db.get_results(), {}, parameter_space, runs,
folder_name) | python | def save_to_folders(self, parameter_space, folder_name, runs):
"""
Save results to a folder structure.
"""
self.space_to_folders(self.db.get_results(), {}, parameter_space, runs,
folder_name) | ['def', 'save_to_folders', '(', 'self', ',', 'parameter_space', ',', 'folder_name', ',', 'runs', ')', ':', 'self', '.', 'space_to_folders', '(', 'self', '.', 'db', '.', 'get_results', '(', ')', ',', '{', '}', ',', 'parameter_space', ',', 'runs', ',', 'folder_name', ')'] | Save results to a folder structure. | ['Save', 'results', 'to', 'a', 'folder', 'structure', '.'] | train | https://github.com/signetlabdei/sem/blob/5077dd7a6d15644a18790bb6fde320e905f0fef0/sem/manager.py#L432-L437 |
3,807 | twilio/twilio-python | twilio/rest/studio/v1/flow/engagement/step/__init__.py | StepContext.step_context | def step_context(self):
"""
Access the step_context
:returns: twilio.rest.studio.v1.flow.engagement.step.step_context.StepContextList
:rtype: twilio.rest.studio.v1.flow.engagement.step.step_context.StepContextList
"""
if self._step_context is None:
self._step_context = StepContextList(
self._version,
flow_sid=self._solution['flow_sid'],
engagement_sid=self._solution['engagement_sid'],
step_sid=self._solution['sid'],
)
return self._step_context | python | def step_context(self):
"""
Access the step_context
:returns: twilio.rest.studio.v1.flow.engagement.step.step_context.StepContextList
:rtype: twilio.rest.studio.v1.flow.engagement.step.step_context.StepContextList
"""
if self._step_context is None:
self._step_context = StepContextList(
self._version,
flow_sid=self._solution['flow_sid'],
engagement_sid=self._solution['engagement_sid'],
step_sid=self._solution['sid'],
)
return self._step_context | ['def', 'step_context', '(', 'self', ')', ':', 'if', 'self', '.', '_step_context', 'is', 'None', ':', 'self', '.', '_step_context', '=', 'StepContextList', '(', 'self', '.', '_version', ',', 'flow_sid', '=', 'self', '.', '_solution', '[', "'flow_sid'", ']', ',', 'engagement_sid', '=', 'self', '.', '_solution', '[', "'engagement_sid'", ']', ',', 'step_sid', '=', 'self', '.', '_solution', '[', "'sid'", ']', ',', ')', 'return', 'self', '.', '_step_context'] | Access the step_context
:returns: twilio.rest.studio.v1.flow.engagement.step.step_context.StepContextList
:rtype: twilio.rest.studio.v1.flow.engagement.step.step_context.StepContextList | ['Access', 'the', 'step_context'] | train | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/studio/v1/flow/engagement/step/__init__.py#L255-L269 |
3,808 | litl/rauth | rauth/oauth.py | RsaSha1Signature.sign | def sign(self,
consumer_secret,
access_token_secret,
method,
url,
oauth_params,
req_kwargs):
'''Sign request parameters.
:param consumer_secret: RSA private key.
:type consumer_secret: str or RSA._RSAobj
:param access_token_secret: Unused.
:type access_token_secret: str
:param method: The method of this particular request.
:type method: str
:param url: The URL of this particular request.
:type url: str
:param oauth_params: OAuth parameters.
:type oauth_params: dict
:param req_kwargs: Keyworded args that will be sent to the request
method.
:type req_kwargs: dict
'''
url = self._remove_qs(url)
oauth_params = \
self._normalize_request_parameters(oauth_params, req_kwargs)
parameters = map(self._escape, [method, url, oauth_params])
# build a Signature Base String
signature_base_string = b'&'.join(parameters)
# resolve the key
if is_basestring(consumer_secret):
consumer_secret = self.RSA.importKey(consumer_secret)
if not isinstance(consumer_secret, self.RSA._RSAobj):
raise ValueError('invalid consumer_secret')
# hash the string with RSA-SHA1
s = self.PKCS1_v1_5.new(consumer_secret)
# PyCrypto SHA.new requires an encoded byte string
h = self.SHA.new(signature_base_string)
hashed = s.sign(h)
# return the signature
return base64.b64encode(hashed).decode() | python | def sign(self,
consumer_secret,
access_token_secret,
method,
url,
oauth_params,
req_kwargs):
'''Sign request parameters.
:param consumer_secret: RSA private key.
:type consumer_secret: str or RSA._RSAobj
:param access_token_secret: Unused.
:type access_token_secret: str
:param method: The method of this particular request.
:type method: str
:param url: The URL of this particular request.
:type url: str
:param oauth_params: OAuth parameters.
:type oauth_params: dict
:param req_kwargs: Keyworded args that will be sent to the request
method.
:type req_kwargs: dict
'''
url = self._remove_qs(url)
oauth_params = \
self._normalize_request_parameters(oauth_params, req_kwargs)
parameters = map(self._escape, [method, url, oauth_params])
# build a Signature Base String
signature_base_string = b'&'.join(parameters)
# resolve the key
if is_basestring(consumer_secret):
consumer_secret = self.RSA.importKey(consumer_secret)
if not isinstance(consumer_secret, self.RSA._RSAobj):
raise ValueError('invalid consumer_secret')
# hash the string with RSA-SHA1
s = self.PKCS1_v1_5.new(consumer_secret)
# PyCrypto SHA.new requires an encoded byte string
h = self.SHA.new(signature_base_string)
hashed = s.sign(h)
# return the signature
return base64.b64encode(hashed).decode() | ['def', 'sign', '(', 'self', ',', 'consumer_secret', ',', 'access_token_secret', ',', 'method', ',', 'url', ',', 'oauth_params', ',', 'req_kwargs', ')', ':', 'url', '=', 'self', '.', '_remove_qs', '(', 'url', ')', 'oauth_params', '=', 'self', '.', '_normalize_request_parameters', '(', 'oauth_params', ',', 'req_kwargs', ')', 'parameters', '=', 'map', '(', 'self', '.', '_escape', ',', '[', 'method', ',', 'url', ',', 'oauth_params', ']', ')', '# build a Signature Base String', 'signature_base_string', '=', "b'&'", '.', 'join', '(', 'parameters', ')', '# resolve the key', 'if', 'is_basestring', '(', 'consumer_secret', ')', ':', 'consumer_secret', '=', 'self', '.', 'RSA', '.', 'importKey', '(', 'consumer_secret', ')', 'if', 'not', 'isinstance', '(', 'consumer_secret', ',', 'self', '.', 'RSA', '.', '_RSAobj', ')', ':', 'raise', 'ValueError', '(', "'invalid consumer_secret'", ')', '# hash the string with RSA-SHA1', 's', '=', 'self', '.', 'PKCS1_v1_5', '.', 'new', '(', 'consumer_secret', ')', '# PyCrypto SHA.new requires an encoded byte string', 'h', '=', 'self', '.', 'SHA', '.', 'new', '(', 'signature_base_string', ')', 'hashed', '=', 's', '.', 'sign', '(', 'h', ')', '# return the signature', 'return', 'base64', '.', 'b64encode', '(', 'hashed', ')', '.', 'decode', '(', ')'] | Sign request parameters.
:param consumer_secret: RSA private key.
:type consumer_secret: str or RSA._RSAobj
:param access_token_secret: Unused.
:type access_token_secret: str
:param method: The method of this particular request.
:type method: str
:param url: The URL of this particular request.
:type url: str
:param oauth_params: OAuth parameters.
:type oauth_params: dict
:param req_kwargs: Keyworded args that will be sent to the request
method.
:type req_kwargs: dict | ['Sign', 'request', 'parameters', '.'] | train | https://github.com/litl/rauth/blob/a6d887d7737cf21ec896a8104f25c2754c694011/rauth/oauth.py#L176-L221 |
3,809 | pytroll/pyspectral | rsr_convert_scripts/seviri_rsr.py | get_central_wave | def get_central_wave(wavl, resp):
"""Calculate the central wavelength or the central wavenumber,
depending on what is input
"""
return np.trapz(resp * wavl, wavl) / np.trapz(resp, wavl) | python | def get_central_wave(wavl, resp):
"""Calculate the central wavelength or the central wavenumber,
depending on what is input
"""
return np.trapz(resp * wavl, wavl) / np.trapz(resp, wavl) | ['def', 'get_central_wave', '(', 'wavl', ',', 'resp', ')', ':', 'return', 'np', '.', 'trapz', '(', 'resp', '*', 'wavl', ',', 'wavl', ')', '/', 'np', '.', 'trapz', '(', 'resp', ',', 'wavl', ')'] | Calculate the central wavelength or the central wavenumber,
depending on what is input | ['Calculate', 'the', 'central', 'wavelength', 'or', 'the', 'central', 'wavenumber', 'depending', 'on', 'what', 'is', 'input'] | train | https://github.com/pytroll/pyspectral/blob/fd296c0e0bdf5364fa180134a1292665d6bc50a3/rsr_convert_scripts/seviri_rsr.py#L215-L219 |
3,810 | genialis/resolwe | resolwe/rest/projection.py | apply_subfield_projection | def apply_subfield_projection(field, value, deep=False):
"""Apply projection from request context.
The passed dictionary may be mutated.
:param field: An instance of `Field` or `Serializer`
:type field: `Field` or `Serializer`
:param value: Dictionary to apply the projection to
:type value: dict
:param deep: Also process all deep projections
:type deep: bool
"""
# Discover the root manually. We cannot use either `self.root` or `self.context`
# due to a bug with incorrect caching (see DRF issue #5087).
prefix = []
root = field
while root.parent is not None:
# Skip anonymous serializers (e.g., intermediate ListSerializers).
if root.field_name:
prefix.append(root.field_name)
root = root.parent
prefix = prefix[::-1]
context = getattr(root, '_context', {})
# If there is no request, we cannot perform filtering.
request = context.get('request')
if request is None:
return value
filtered = set(request.query_params.get('fields', '').split(FIELD_SEPARATOR))
filtered.discard('')
if not filtered:
# If there are no fields specified in the filter, return all fields.
return value
# Extract projection for current and deeper levels.
current_level = len(prefix)
current_projection = []
for item in filtered:
item = item.split(FIELD_DEREFERENCE)
if len(item) <= current_level:
continue
if item[:current_level] == prefix:
if deep:
current_projection.append(item[current_level:])
else:
current_projection.append([item[current_level]])
if deep and not current_projection:
# For deep projections, an empty projection means that all fields should
# be returned without any projection.
return value
# Apply projection.
return apply_projection(current_projection, value) | python | def apply_subfield_projection(field, value, deep=False):
"""Apply projection from request context.
The passed dictionary may be mutated.
:param field: An instance of `Field` or `Serializer`
:type field: `Field` or `Serializer`
:param value: Dictionary to apply the projection to
:type value: dict
:param deep: Also process all deep projections
:type deep: bool
"""
# Discover the root manually. We cannot use either `self.root` or `self.context`
# due to a bug with incorrect caching (see DRF issue #5087).
prefix = []
root = field
while root.parent is not None:
# Skip anonymous serializers (e.g., intermediate ListSerializers).
if root.field_name:
prefix.append(root.field_name)
root = root.parent
prefix = prefix[::-1]
context = getattr(root, '_context', {})
# If there is no request, we cannot perform filtering.
request = context.get('request')
if request is None:
return value
filtered = set(request.query_params.get('fields', '').split(FIELD_SEPARATOR))
filtered.discard('')
if not filtered:
# If there are no fields specified in the filter, return all fields.
return value
# Extract projection for current and deeper levels.
current_level = len(prefix)
current_projection = []
for item in filtered:
item = item.split(FIELD_DEREFERENCE)
if len(item) <= current_level:
continue
if item[:current_level] == prefix:
if deep:
current_projection.append(item[current_level:])
else:
current_projection.append([item[current_level]])
if deep and not current_projection:
# For deep projections, an empty projection means that all fields should
# be returned without any projection.
return value
# Apply projection.
return apply_projection(current_projection, value) | ['def', 'apply_subfield_projection', '(', 'field', ',', 'value', ',', 'deep', '=', 'False', ')', ':', '# Discover the root manually. We cannot use either `self.root` or `self.context`', '# due to a bug with incorrect caching (see DRF issue #5087).', 'prefix', '=', '[', ']', 'root', '=', 'field', 'while', 'root', '.', 'parent', 'is', 'not', 'None', ':', '# Skip anonymous serializers (e.g., intermediate ListSerializers).', 'if', 'root', '.', 'field_name', ':', 'prefix', '.', 'append', '(', 'root', '.', 'field_name', ')', 'root', '=', 'root', '.', 'parent', 'prefix', '=', 'prefix', '[', ':', ':', '-', '1', ']', 'context', '=', 'getattr', '(', 'root', ',', "'_context'", ',', '{', '}', ')', '# If there is no request, we cannot perform filtering.', 'request', '=', 'context', '.', 'get', '(', "'request'", ')', 'if', 'request', 'is', 'None', ':', 'return', 'value', 'filtered', '=', 'set', '(', 'request', '.', 'query_params', '.', 'get', '(', "'fields'", ',', "''", ')', '.', 'split', '(', 'FIELD_SEPARATOR', ')', ')', 'filtered', '.', 'discard', '(', "''", ')', 'if', 'not', 'filtered', ':', '# If there are no fields specified in the filter, return all fields.', 'return', 'value', '# Extract projection for current and deeper levels.', 'current_level', '=', 'len', '(', 'prefix', ')', 'current_projection', '=', '[', ']', 'for', 'item', 'in', 'filtered', ':', 'item', '=', 'item', '.', 'split', '(', 'FIELD_DEREFERENCE', ')', 'if', 'len', '(', 'item', ')', '<=', 'current_level', ':', 'continue', 'if', 'item', '[', ':', 'current_level', ']', '==', 'prefix', ':', 'if', 'deep', ':', 'current_projection', '.', 'append', '(', 'item', '[', 'current_level', ':', ']', ')', 'else', ':', 'current_projection', '.', 'append', '(', '[', 'item', '[', 'current_level', ']', ']', ')', 'if', 'deep', 'and', 'not', 'current_projection', ':', '# For deep projections, an empty projection means that all fields should', '# be returned without any projection.', 'return', 'value', '# Apply projection.', 'return', 'apply_projection', '(', 'current_projection', ',', 'value', ')'] | Apply projection from request context.
The passed dictionary may be mutated.
:param field: An instance of `Field` or `Serializer`
:type field: `Field` or `Serializer`
:param value: Dictionary to apply the projection to
:type value: dict
:param deep: Also process all deep projections
:type deep: bool | ['Apply', 'projection', 'from', 'request', 'context', '.'] | train | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/rest/projection.py#L8-L64 |
3,811 | log2timeline/dfvfs | dfvfs/vfs/tsk_file_entry.py | TSKFileEntry.GetLinkedFileEntry | def GetLinkedFileEntry(self):
"""Retrieves the linked file entry, e.g. for a symbolic link.
Returns:
TSKFileEntry: linked file entry or None.
"""
link = self._GetLink()
if not link:
return None
# TODO: is there a way to determine the link inode number here?
link_inode = None
parent_path_spec = getattr(self.path_spec, 'parent', None)
path_spec = tsk_path_spec.TSKPathSpec(
location=link, parent=parent_path_spec)
root_inode = self._file_system.GetRootInode()
is_root = bool(
link == self._file_system.LOCATION_ROOT or (
link_inode is not None and root_inode is not None and
link_inode == root_inode))
return TSKFileEntry(
self._resolver_context, self._file_system, path_spec, is_root=is_root) | python | def GetLinkedFileEntry(self):
"""Retrieves the linked file entry, e.g. for a symbolic link.
Returns:
TSKFileEntry: linked file entry or None.
"""
link = self._GetLink()
if not link:
return None
# TODO: is there a way to determine the link inode number here?
link_inode = None
parent_path_spec = getattr(self.path_spec, 'parent', None)
path_spec = tsk_path_spec.TSKPathSpec(
location=link, parent=parent_path_spec)
root_inode = self._file_system.GetRootInode()
is_root = bool(
link == self._file_system.LOCATION_ROOT or (
link_inode is not None and root_inode is not None and
link_inode == root_inode))
return TSKFileEntry(
self._resolver_context, self._file_system, path_spec, is_root=is_root) | ['def', 'GetLinkedFileEntry', '(', 'self', ')', ':', 'link', '=', 'self', '.', '_GetLink', '(', ')', 'if', 'not', 'link', ':', 'return', 'None', '# TODO: is there a way to determine the link inode number here?', 'link_inode', '=', 'None', 'parent_path_spec', '=', 'getattr', '(', 'self', '.', 'path_spec', ',', "'parent'", ',', 'None', ')', 'path_spec', '=', 'tsk_path_spec', '.', 'TSKPathSpec', '(', 'location', '=', 'link', ',', 'parent', '=', 'parent_path_spec', ')', 'root_inode', '=', 'self', '.', '_file_system', '.', 'GetRootInode', '(', ')', 'is_root', '=', 'bool', '(', 'link', '==', 'self', '.', '_file_system', '.', 'LOCATION_ROOT', 'or', '(', 'link_inode', 'is', 'not', 'None', 'and', 'root_inode', 'is', 'not', 'None', 'and', 'link_inode', '==', 'root_inode', ')', ')', 'return', 'TSKFileEntry', '(', 'self', '.', '_resolver_context', ',', 'self', '.', '_file_system', ',', 'path_spec', ',', 'is_root', '=', 'is_root', ')'] | Retrieves the linked file entry, e.g. for a symbolic link.
Returns:
TSKFileEntry: linked file entry or None. | ['Retrieves', 'the', 'linked', 'file', 'entry', 'e', '.', 'g', '.', 'for', 'a', 'symbolic', 'link', '.'] | train | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/tsk_file_entry.py#L813-L837 |
3,812 | lrq3000/pyFileFixity | pyFileFixity/lib/brownanrs/rs.py | RSCoder._forney | def _forney(self, omega, X):
'''Computes the error magnitudes. Works also with erasures and errors+erasures beyond the (n-k)//2 bound, here the bound is 2*e+v <= (n-k-1) with e the number of errors and v the number of erasures.'''
# XXX Is floor division okay here? Should this be ceiling?
Y = [] # the final result, the error/erasures polynomial (contain the values that we should minus on the received message to get the repaired message)
Xlength = len(X)
for l, Xl in enumerate(X):
Xl_inv = Xl.inverse()
# Compute the formal derivative of the error locator polynomial (see Blahut, Algebraic codes for data transmission, pp 196-197).
# the formal derivative of the errata locator is used as the denominator of the Forney Algorithm, which simply says that the ith error value is given by error_evaluator(gf_inverse(Xi)) / error_locator_derivative(gf_inverse(Xi)). See Blahut, Algebraic codes for data transmission, pp 196-197.
sigma_prime_tmp = [1 - Xl_inv * X[j] for j in _range(Xlength) if j != l] # TODO? maybe a faster way would be to precompute sigma_prime = sigma[len(sigma) & 1:len(sigma):2] and then just do sigma_prime.evaluate(X[j]) ? (like in reedsolo.py)
# compute the product
sigma_prime = 1
for coef in sigma_prime_tmp:
sigma_prime = sigma_prime * coef
# equivalent to: sigma_prime = functools.reduce(mul, sigma_prime, 1)
# Compute Yl
# This is a more faithful translation of the theoretical equation contrary to the old forney method. Here it is exactly copy/pasted from the included presentation decoding_rs.pdf: Yl = omega(Xl.inverse()) / prod(1 - Xj*Xl.inverse()) for j in len(X) (in the paper it's for j in s, but it's useless when len(X) < s because we compute neutral terms 1 for nothing, and wrong when correcting more than s erasures or erasures+errors since it prevents computing all required terms).
# Thus here this method works with erasures too because firstly we fixed the equation to be like the theoretical one (don't know why it was modified in _old_forney(), if it's an optimization, it doesn't enhance anything), and secondly because we removed the product bound on s, which prevented computing errors and erasures above the s=(n-k)//2 bound.
# The best resource I have found for the correct equation is https://en.wikipedia.org/wiki/Forney_algorithm -- note that in the article, fcr is defined as c.
Yl = - (Xl**(1-self.fcr) * omega.evaluate(Xl_inv) / sigma_prime) # sigma_prime is the denominator of the Forney algorithm
Y.append(Yl)
return Y | python | def _forney(self, omega, X):
'''Computes the error magnitudes. Works also with erasures and errors+erasures beyond the (n-k)//2 bound, here the bound is 2*e+v <= (n-k-1) with e the number of errors and v the number of erasures.'''
# XXX Is floor division okay here? Should this be ceiling?
Y = [] # the final result, the error/erasures polynomial (contain the values that we should minus on the received message to get the repaired message)
Xlength = len(X)
for l, Xl in enumerate(X):
Xl_inv = Xl.inverse()
# Compute the formal derivative of the error locator polynomial (see Blahut, Algebraic codes for data transmission, pp 196-197).
# the formal derivative of the errata locator is used as the denominator of the Forney Algorithm, which simply says that the ith error value is given by error_evaluator(gf_inverse(Xi)) / error_locator_derivative(gf_inverse(Xi)). See Blahut, Algebraic codes for data transmission, pp 196-197.
sigma_prime_tmp = [1 - Xl_inv * X[j] for j in _range(Xlength) if j != l] # TODO? maybe a faster way would be to precompute sigma_prime = sigma[len(sigma) & 1:len(sigma):2] and then just do sigma_prime.evaluate(X[j]) ? (like in reedsolo.py)
# compute the product
sigma_prime = 1
for coef in sigma_prime_tmp:
sigma_prime = sigma_prime * coef
# equivalent to: sigma_prime = functools.reduce(mul, sigma_prime, 1)
# Compute Yl
# This is a more faithful translation of the theoretical equation contrary to the old forney method. Here it is exactly copy/pasted from the included presentation decoding_rs.pdf: Yl = omega(Xl.inverse()) / prod(1 - Xj*Xl.inverse()) for j in len(X) (in the paper it's for j in s, but it's useless when len(X) < s because we compute neutral terms 1 for nothing, and wrong when correcting more than s erasures or erasures+errors since it prevents computing all required terms).
# Thus here this method works with erasures too because firstly we fixed the equation to be like the theoretical one (don't know why it was modified in _old_forney(), if it's an optimization, it doesn't enhance anything), and secondly because we removed the product bound on s, which prevented computing errors and erasures above the s=(n-k)//2 bound.
# The best resource I have found for the correct equation is https://en.wikipedia.org/wiki/Forney_algorithm -- note that in the article, fcr is defined as c.
Yl = - (Xl**(1-self.fcr) * omega.evaluate(Xl_inv) / sigma_prime) # sigma_prime is the denominator of the Forney algorithm
Y.append(Yl)
return Y | ['def', '_forney', '(', 'self', ',', 'omega', ',', 'X', ')', ':', '# XXX Is floor division okay here? Should this be ceiling?', 'Y', '=', '[', ']', '# the final result, the error/erasures polynomial (contain the values that we should minus on the received message to get the repaired message)', 'Xlength', '=', 'len', '(', 'X', ')', 'for', 'l', ',', 'Xl', 'in', 'enumerate', '(', 'X', ')', ':', 'Xl_inv', '=', 'Xl', '.', 'inverse', '(', ')', '# Compute the formal derivative of the error locator polynomial (see Blahut, Algebraic codes for data transmission, pp 196-197).', '# the formal derivative of the errata locator is used as the denominator of the Forney Algorithm, which simply says that the ith error value is given by error_evaluator(gf_inverse(Xi)) / error_locator_derivative(gf_inverse(Xi)). See Blahut, Algebraic codes for data transmission, pp 196-197.', 'sigma_prime_tmp', '=', '[', '1', '-', 'Xl_inv', '*', 'X', '[', 'j', ']', 'for', 'j', 'in', '_range', '(', 'Xlength', ')', 'if', 'j', '!=', 'l', ']', '# TODO? maybe a faster way would be to precompute sigma_prime = sigma[len(sigma) & 1:len(sigma):2] and then just do sigma_prime.evaluate(X[j]) ? (like in reedsolo.py)', '# compute the product', 'sigma_prime', '=', '1', 'for', 'coef', 'in', 'sigma_prime_tmp', ':', 'sigma_prime', '=', 'sigma_prime', '*', 'coef', '# equivalent to: sigma_prime = functools.reduce(mul, sigma_prime, 1)', '# Compute Yl', "# This is a more faithful translation of the theoretical equation contrary to the old forney method. Here it is exactly copy/pasted from the included presentation decoding_rs.pdf: Yl = omega(Xl.inverse()) / prod(1 - Xj*Xl.inverse()) for j in len(X) (in the paper it's for j in s, but it's useless when len(X) < s because we compute neutral terms 1 for nothing, and wrong when correcting more than s erasures or erasures+errors since it prevents computing all required terms).", "# Thus here this method works with erasures too because firstly we fixed the equation to be like the theoretical one (don't know why it was modified in _old_forney(), if it's an optimization, it doesn't enhance anything), and secondly because we removed the product bound on s, which prevented computing errors and erasures above the s=(n-k)//2 bound.", '# The best resource I have found for the correct equation is https://en.wikipedia.org/wiki/Forney_algorithm -- note that in the article, fcr is defined as c.', 'Yl', '=', '-', '(', 'Xl', '**', '(', '1', '-', 'self', '.', 'fcr', ')', '*', 'omega', '.', 'evaluate', '(', 'Xl_inv', ')', '/', 'sigma_prime', ')', '# sigma_prime is the denominator of the Forney algorithm', 'Y', '.', 'append', '(', 'Yl', ')', 'return', 'Y'] | Computes the error magnitudes. Works also with erasures and errors+erasures beyond the (n-k)//2 bound, here the bound is 2*e+v <= (n-k-1) with e the number of errors and v the number of erasures. | ['Computes', 'the', 'error', 'magnitudes', '.', 'Works', 'also', 'with', 'erasures', 'and', 'errors', '+', 'erasures', 'beyond', 'the', '(', 'n', '-', 'k', ')', '//', '2', 'bound', 'here', 'the', 'bound', 'is', '2', '*', 'e', '+', 'v', '<', '=', '(', 'n', '-', 'k', '-', '1', ')', 'with', 'e', 'the', 'number', 'of', 'errors', 'and', 'v', 'the', 'number', 'of', 'erasures', '.'] | train | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/brownanrs/rs.py#L920-L946 |
3,813 | openthread/openthread | tools/harness-thci/OpenThread.py | OpenThread.__convertIp6PrefixStringToIp6Address | def __convertIp6PrefixStringToIp6Address(self, strIp6Prefix):
"""convert IPv6 prefix string to IPv6 dotted-quad format
for example:
2001000000000000 -> 2001::
Args:
strIp6Prefix: IPv6 address string
Returns:
IPv6 address dotted-quad format
"""
prefix1 = strIp6Prefix.rstrip('L')
prefix2 = prefix1.lstrip("0x")
hexPrefix = str(prefix2).ljust(16,'0')
hexIter = iter(hexPrefix)
finalMac = ':'.join(a + b + c + d for a,b,c,d in zip(hexIter, hexIter,hexIter,hexIter))
prefix = str(finalMac)
strIp6Prefix = prefix[:20]
return strIp6Prefix +':' | python | def __convertIp6PrefixStringToIp6Address(self, strIp6Prefix):
"""convert IPv6 prefix string to IPv6 dotted-quad format
for example:
2001000000000000 -> 2001::
Args:
strIp6Prefix: IPv6 address string
Returns:
IPv6 address dotted-quad format
"""
prefix1 = strIp6Prefix.rstrip('L')
prefix2 = prefix1.lstrip("0x")
hexPrefix = str(prefix2).ljust(16,'0')
hexIter = iter(hexPrefix)
finalMac = ':'.join(a + b + c + d for a,b,c,d in zip(hexIter, hexIter,hexIter,hexIter))
prefix = str(finalMac)
strIp6Prefix = prefix[:20]
return strIp6Prefix +':' | ['def', '__convertIp6PrefixStringToIp6Address', '(', 'self', ',', 'strIp6Prefix', ')', ':', 'prefix1', '=', 'strIp6Prefix', '.', 'rstrip', '(', "'L'", ')', 'prefix2', '=', 'prefix1', '.', 'lstrip', '(', '"0x"', ')', 'hexPrefix', '=', 'str', '(', 'prefix2', ')', '.', 'ljust', '(', '16', ',', "'0'", ')', 'hexIter', '=', 'iter', '(', 'hexPrefix', ')', 'finalMac', '=', "':'", '.', 'join', '(', 'a', '+', 'b', '+', 'c', '+', 'd', 'for', 'a', ',', 'b', ',', 'c', ',', 'd', 'in', 'zip', '(', 'hexIter', ',', 'hexIter', ',', 'hexIter', ',', 'hexIter', ')', ')', 'prefix', '=', 'str', '(', 'finalMac', ')', 'strIp6Prefix', '=', 'prefix', '[', ':', '20', ']', 'return', 'strIp6Prefix', '+', "':'"] | convert IPv6 prefix string to IPv6 dotted-quad format
for example:
2001000000000000 -> 2001::
Args:
strIp6Prefix: IPv6 address string
Returns:
IPv6 address dotted-quad format | ['convert', 'IPv6', 'prefix', 'string', 'to', 'IPv6', 'dotted', '-', 'quad', 'format', 'for', 'example', ':', '2001000000000000', '-', '>', '2001', '::'] | train | https://github.com/openthread/openthread/blob/0208d10563aa21c518092985c78ecf9cd223ab74/tools/harness-thci/OpenThread.py#L485-L503 |
3,814 | instacart/jardin | jardin/model.py | Model.find | def find(self, id, **kwargs):
"""
Finds a record by its id in the model's table in the replica database.
:returns: an instance of the model.
"""
return self.find_by(values={self.primary_key: id}, **kwargs) | python | def find(self, id, **kwargs):
"""
Finds a record by its id in the model's table in the replica database.
:returns: an instance of the model.
"""
return self.find_by(values={self.primary_key: id}, **kwargs) | ['def', 'find', '(', 'self', ',', 'id', ',', '*', '*', 'kwargs', ')', ':', 'return', 'self', '.', 'find_by', '(', 'values', '=', '{', 'self', '.', 'primary_key', ':', 'id', '}', ',', '*', '*', 'kwargs', ')'] | Finds a record by its id in the model's table in the replica database.
:returns: an instance of the model. | ['Finds', 'a', 'record', 'by', 'its', 'id', 'in', 'the', 'model', 's', 'table', 'in', 'the', 'replica', 'database', '.', ':', 'returns', ':', 'an', 'instance', 'of', 'the', 'model', '.'] | train | https://github.com/instacart/jardin/blob/007e283b9ccd621b60b86679148cacd9eab7c4e3/jardin/model.py#L426-L431 |
3,815 | StellarCN/py-stellar-base | stellar_base/operation.py | Payment.to_xdr_object | def to_xdr_object(self):
"""Creates an XDR Operation object that represents this
:class:`Payment`.
"""
asset = self.asset.to_xdr_object()
destination = account_xdr_object(self.destination)
amount = Operation.to_xdr_amount(self.amount)
payment_op = Xdr.types.PaymentOp(destination, asset, amount)
self.body.type = Xdr.const.PAYMENT
self.body.paymentOp = payment_op
return super(Payment, self).to_xdr_object() | python | def to_xdr_object(self):
"""Creates an XDR Operation object that represents this
:class:`Payment`.
"""
asset = self.asset.to_xdr_object()
destination = account_xdr_object(self.destination)
amount = Operation.to_xdr_amount(self.amount)
payment_op = Xdr.types.PaymentOp(destination, asset, amount)
self.body.type = Xdr.const.PAYMENT
self.body.paymentOp = payment_op
return super(Payment, self).to_xdr_object() | ['def', 'to_xdr_object', '(', 'self', ')', ':', 'asset', '=', 'self', '.', 'asset', '.', 'to_xdr_object', '(', ')', 'destination', '=', 'account_xdr_object', '(', 'self', '.', 'destination', ')', 'amount', '=', 'Operation', '.', 'to_xdr_amount', '(', 'self', '.', 'amount', ')', 'payment_op', '=', 'Xdr', '.', 'types', '.', 'PaymentOp', '(', 'destination', ',', 'asset', ',', 'amount', ')', 'self', '.', 'body', '.', 'type', '=', 'Xdr', '.', 'const', '.', 'PAYMENT', 'self', '.', 'body', '.', 'paymentOp', '=', 'payment_op', 'return', 'super', '(', 'Payment', ',', 'self', ')', '.', 'to_xdr_object', '(', ')'] | Creates an XDR Operation object that represents this
:class:`Payment`. | ['Creates', 'an', 'XDR', 'Operation', 'object', 'that', 'represents', 'this', ':', 'class', ':', 'Payment', '.'] | train | https://github.com/StellarCN/py-stellar-base/blob/cce2e782064fb3955c85e1696e630d67b1010848/stellar_base/operation.py#L265-L278 |
3,816 | ministryofjustice/money-to-prisoners-common | mtp_common/build_tasks/executor.py | Context.shell | def shell(self, command, *args, environment=None):
"""
Runs a shell command
"""
command += ' ' + ' '.join(args)
command = command.strip()
self.debug(self.yellow_style('$ %s' % command))
env = self.env.copy()
env.update(environment or {})
return subprocess.call(command, shell=True, env=env) | python | def shell(self, command, *args, environment=None):
"""
Runs a shell command
"""
command += ' ' + ' '.join(args)
command = command.strip()
self.debug(self.yellow_style('$ %s' % command))
env = self.env.copy()
env.update(environment or {})
return subprocess.call(command, shell=True, env=env) | ['def', 'shell', '(', 'self', ',', 'command', ',', '*', 'args', ',', 'environment', '=', 'None', ')', ':', 'command', '+=', "' '", '+', "' '", '.', 'join', '(', 'args', ')', 'command', '=', 'command', '.', 'strip', '(', ')', 'self', '.', 'debug', '(', 'self', '.', 'yellow_style', '(', "'$ %s'", '%', 'command', ')', ')', 'env', '=', 'self', '.', 'env', '.', 'copy', '(', ')', 'env', '.', 'update', '(', 'environment', 'or', '{', '}', ')', 'return', 'subprocess', '.', 'call', '(', 'command', ',', 'shell', '=', 'True', ',', 'env', '=', 'env', ')'] | Runs a shell command | ['Runs', 'a', 'shell', 'command'] | train | https://github.com/ministryofjustice/money-to-prisoners-common/blob/33c43a2912cb990d9148da7c8718f480f07d90a1/mtp_common/build_tasks/executor.py#L434-L443 |
3,817 | serhatbolsu/robotframework-appiumlibrary | AppiumLibrary/keywords/_applicationmanagement.py | _ApplicationManagementKeywords.log_source | def log_source(self, loglevel='INFO'):
"""Logs and returns the entire html source of the current page or frame.
The `loglevel` argument defines the used log level. Valid log levels are
`WARN`, `INFO` (default), `DEBUG`, `TRACE` and `NONE` (no logging).
"""
ll = loglevel.upper()
if ll == 'NONE':
return ''
else:
if "run_keyword_and_ignore_error" not in [check_error_ignored[3] for check_error_ignored in inspect.stack()]:
source = self._current_application().page_source
self._log(source, ll)
return source
else:
return '' | python | def log_source(self, loglevel='INFO'):
"""Logs and returns the entire html source of the current page or frame.
The `loglevel` argument defines the used log level. Valid log levels are
`WARN`, `INFO` (default), `DEBUG`, `TRACE` and `NONE` (no logging).
"""
ll = loglevel.upper()
if ll == 'NONE':
return ''
else:
if "run_keyword_and_ignore_error" not in [check_error_ignored[3] for check_error_ignored in inspect.stack()]:
source = self._current_application().page_source
self._log(source, ll)
return source
else:
return '' | ['def', 'log_source', '(', 'self', ',', 'loglevel', '=', "'INFO'", ')', ':', 'll', '=', 'loglevel', '.', 'upper', '(', ')', 'if', 'll', '==', "'NONE'", ':', 'return', "''", 'else', ':', 'if', '"run_keyword_and_ignore_error"', 'not', 'in', '[', 'check_error_ignored', '[', '3', ']', 'for', 'check_error_ignored', 'in', 'inspect', '.', 'stack', '(', ')', ']', ':', 'source', '=', 'self', '.', '_current_application', '(', ')', '.', 'page_source', 'self', '.', '_log', '(', 'source', ',', 'll', ')', 'return', 'source', 'else', ':', 'return', "''"] | Logs and returns the entire html source of the current page or frame.
The `loglevel` argument defines the used log level. Valid log levels are
`WARN`, `INFO` (default), `DEBUG`, `TRACE` and `NONE` (no logging). | ['Logs', 'and', 'returns', 'the', 'entire', 'html', 'source', 'of', 'the', 'current', 'page', 'or', 'frame', '.', 'The', 'loglevel', 'argument', 'defines', 'the', 'used', 'log', 'level', '.', 'Valid', 'log', 'levels', 'are', 'WARN', 'INFO', '(', 'default', ')', 'DEBUG', 'TRACE', 'and', 'NONE', '(', 'no', 'logging', ')', '.'] | train | https://github.com/serhatbolsu/robotframework-appiumlibrary/blob/91c808cf0602af6be8135ac529fa488fded04a85/AppiumLibrary/keywords/_applicationmanagement.py#L172-L187 |
3,818 | Scoppio/RagnarokEngine3 | RagnarokEngine3/RE3.py | Vector2.angle | def angle(vec1, vec2):
"""Calculate the angle between two Vector2's"""
dotp = Vector2.dot(vec1, vec2)
mag1 = vec1.length()
mag2 = vec2.length()
result = dotp / (mag1 * mag2)
return math.acos(result) | python | def angle(vec1, vec2):
"""Calculate the angle between two Vector2's"""
dotp = Vector2.dot(vec1, vec2)
mag1 = vec1.length()
mag2 = vec2.length()
result = dotp / (mag1 * mag2)
return math.acos(result) | ['def', 'angle', '(', 'vec1', ',', 'vec2', ')', ':', 'dotp', '=', 'Vector2', '.', 'dot', '(', 'vec1', ',', 'vec2', ')', 'mag1', '=', 'vec1', '.', 'length', '(', ')', 'mag2', '=', 'vec2', '.', 'length', '(', ')', 'result', '=', 'dotp', '/', '(', 'mag1', '*', 'mag2', ')', 'return', 'math', '.', 'acos', '(', 'result', ')'] | Calculate the angle between two Vector2's | ['Calculate', 'the', 'angle', 'between', 'two', 'Vector2', 's'] | train | https://github.com/Scoppio/RagnarokEngine3/blob/4395d419ccd64fe9327c41f200b72ee0176ad896/RagnarokEngine3/RE3.py#L213-L219 |
3,819 | awslabs/aws-sam-cli | samcli/local/docker/lambda_container.py | LambdaContainer._get_additional_options | def _get_additional_options(runtime, debug_options):
"""
Return additional Docker container options. Used by container debug mode to enable certain container
security options.
:param DebugContext debug_options: DebugContext for the runtime of the container.
:return dict: Dictionary containing additional arguments to be passed to container creation.
"""
if not debug_options:
return None
opts = {}
if runtime == Runtime.go1x.value:
# These options are required for delve to function properly inside a docker container on docker < 1.12
# See https://github.com/moby/moby/issues/21051
opts["security_opt"] = ["seccomp:unconfined"]
opts["cap_add"] = ["SYS_PTRACE"]
return opts | python | def _get_additional_options(runtime, debug_options):
"""
Return additional Docker container options. Used by container debug mode to enable certain container
security options.
:param DebugContext debug_options: DebugContext for the runtime of the container.
:return dict: Dictionary containing additional arguments to be passed to container creation.
"""
if not debug_options:
return None
opts = {}
if runtime == Runtime.go1x.value:
# These options are required for delve to function properly inside a docker container on docker < 1.12
# See https://github.com/moby/moby/issues/21051
opts["security_opt"] = ["seccomp:unconfined"]
opts["cap_add"] = ["SYS_PTRACE"]
return opts | ['def', '_get_additional_options', '(', 'runtime', ',', 'debug_options', ')', ':', 'if', 'not', 'debug_options', ':', 'return', 'None', 'opts', '=', '{', '}', 'if', 'runtime', '==', 'Runtime', '.', 'go1x', '.', 'value', ':', '# These options are required for delve to function properly inside a docker container on docker < 1.12', '# See https://github.com/moby/moby/issues/21051', 'opts', '[', '"security_opt"', ']', '=', '[', '"seccomp:unconfined"', ']', 'opts', '[', '"cap_add"', ']', '=', '[', '"SYS_PTRACE"', ']', 'return', 'opts'] | Return additional Docker container options. Used by container debug mode to enable certain container
security options.
:param DebugContext debug_options: DebugContext for the runtime of the container.
:return dict: Dictionary containing additional arguments to be passed to container creation. | ['Return', 'additional', 'Docker', 'container', 'options', '.', 'Used', 'by', 'container', 'debug', 'mode', 'to', 'enable', 'certain', 'container', 'security', 'options', '.', ':', 'param', 'DebugContext', 'debug_options', ':', 'DebugContext', 'for', 'the', 'runtime', 'of', 'the', 'container', '.', ':', 'return', 'dict', ':', 'Dictionary', 'containing', 'additional', 'arguments', 'to', 'be', 'passed', 'to', 'container', 'creation', '.'] | train | https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/local/docker/lambda_container.py#L104-L122 |
3,820 | hellosign/hellosign-python-sdk | hellosign_sdk/hsclient.py | HSClient.get_embedded_object | def get_embedded_object(self, signature_id):
''' Retrieves a embedded signing object
Retrieves an embedded object containing a signature url that can be opened in an iFrame.
Args:
signature_id (str): The id of the signature to get a signature url for
Returns:
An Embedded object
'''
request = self._get_request()
return request.get(self.EMBEDDED_OBJECT_GET_URL + signature_id) | python | def get_embedded_object(self, signature_id):
''' Retrieves a embedded signing object
Retrieves an embedded object containing a signature url that can be opened in an iFrame.
Args:
signature_id (str): The id of the signature to get a signature url for
Returns:
An Embedded object
'''
request = self._get_request()
return request.get(self.EMBEDDED_OBJECT_GET_URL + signature_id) | ['def', 'get_embedded_object', '(', 'self', ',', 'signature_id', ')', ':', 'request', '=', 'self', '.', '_get_request', '(', ')', 'return', 'request', '.', 'get', '(', 'self', '.', 'EMBEDDED_OBJECT_GET_URL', '+', 'signature_id', ')'] | Retrieves a embedded signing object
Retrieves an embedded object containing a signature url that can be opened in an iFrame.
Args:
signature_id (str): The id of the signature to get a signature url for
Returns:
An Embedded object | ['Retrieves', 'a', 'embedded', 'signing', 'object'] | train | https://github.com/hellosign/hellosign-python-sdk/blob/4325a29ad5766380a214eac3914511f62f7ecba4/hellosign_sdk/hsclient.py#L966-L980 |
3,821 | Microsoft/nni | tools/nni_cmd/config_utils.py | Experiments.read_file | def read_file(self):
'''load config from local file'''
if os.path.exists(self.experiment_file):
try:
with open(self.experiment_file, 'r') as file:
return json.load(file)
except ValueError:
return {}
return {} | python | def read_file(self):
'''load config from local file'''
if os.path.exists(self.experiment_file):
try:
with open(self.experiment_file, 'r') as file:
return json.load(file)
except ValueError:
return {}
return {} | ['def', 'read_file', '(', 'self', ')', ':', 'if', 'os', '.', 'path', '.', 'exists', '(', 'self', '.', 'experiment_file', ')', ':', 'try', ':', 'with', 'open', '(', 'self', '.', 'experiment_file', ',', "'r'", ')', 'as', 'file', ':', 'return', 'json', '.', 'load', '(', 'file', ')', 'except', 'ValueError', ':', 'return', '{', '}', 'return', '{', '}'] | load config from local file | ['load', 'config', 'from', 'local', 'file'] | train | https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/tools/nni_cmd/config_utils.py#L114-L122 |
3,822 | PyCQA/pydocstyle | src/pydocstyle/violations.py | ErrorRegistry.to_rst | def to_rst(cls) -> str:
"""Output the registry as reStructuredText, for documentation."""
sep_line = '+' + 6 * '-' + '+' + '-' * 71 + '+\n'
blank_line = '|' + 78 * ' ' + '|\n'
table = ''
for group in cls.groups:
table += sep_line
table += blank_line
table += '|' + '**{}**'.format(group.name).center(78) + '|\n'
table += blank_line
for error in group.errors:
table += sep_line
table += ('|' + error.code.center(6) + '| ' +
error.short_desc.ljust(70) + '|\n')
table += sep_line
return table | python | def to_rst(cls) -> str:
"""Output the registry as reStructuredText, for documentation."""
sep_line = '+' + 6 * '-' + '+' + '-' * 71 + '+\n'
blank_line = '|' + 78 * ' ' + '|\n'
table = ''
for group in cls.groups:
table += sep_line
table += blank_line
table += '|' + '**{}**'.format(group.name).center(78) + '|\n'
table += blank_line
for error in group.errors:
table += sep_line
table += ('|' + error.code.center(6) + '| ' +
error.short_desc.ljust(70) + '|\n')
table += sep_line
return table | ['def', 'to_rst', '(', 'cls', ')', '->', 'str', ':', 'sep_line', '=', "'+'", '+', '6', '*', "'-'", '+', "'+'", '+', "'-'", '*', '71', '+', "'+\\n'", 'blank_line', '=', "'|'", '+', '78', '*', "' '", '+', "'|\\n'", 'table', '=', "''", 'for', 'group', 'in', 'cls', '.', 'groups', ':', 'table', '+=', 'sep_line', 'table', '+=', 'blank_line', 'table', '+=', "'|'", '+', "'**{}**'", '.', 'format', '(', 'group', '.', 'name', ')', '.', 'center', '(', '78', ')', '+', "'|\\n'", 'table', '+=', 'blank_line', 'for', 'error', 'in', 'group', '.', 'errors', ':', 'table', '+=', 'sep_line', 'table', '+=', '(', "'|'", '+', 'error', '.', 'code', '.', 'center', '(', '6', ')', '+', "'| '", '+', 'error', '.', 'short_desc', '.', 'ljust', '(', '70', ')', '+', "'|\\n'", ')', 'table', '+=', 'sep_line', 'return', 'table'] | Output the registry as reStructuredText, for documentation. | ['Output', 'the', 'registry', 'as', 'reStructuredText', 'for', 'documentation', '.'] | train | https://github.com/PyCQA/pydocstyle/blob/2549847f9efad225789f931e83dfe782418ca13e/src/pydocstyle/violations.py#L153-L168 |
3,823 | resonai/ybt | yabt/buildcontext.py | BuildContext.can_use_cache | def can_use_cache(self, target: Target) -> bool:
"""Return True if should attempt to load `target` from cache.
Return False if `target` has to be built, regardless of its cache
status (because cache is disabled, or dependencies are dirty).
"""
# if caching is disabled for this execution, then all targets are dirty
if self.conf.no_build_cache:
return False
# if the target's `cachable` prop is falsy, then it is dirty
if not target.props.cachable:
return False
# if any dependency of the target is dirty, then the target is dirty
if any(self.targets[dep].is_dirty for dep in target.deps):
return False
# if the target has a dirty buildenv then it's also dirty
if target.buildenv and self.targets[target.buildenv].is_dirty:
return False
return True | python | def can_use_cache(self, target: Target) -> bool:
"""Return True if should attempt to load `target` from cache.
Return False if `target` has to be built, regardless of its cache
status (because cache is disabled, or dependencies are dirty).
"""
# if caching is disabled for this execution, then all targets are dirty
if self.conf.no_build_cache:
return False
# if the target's `cachable` prop is falsy, then it is dirty
if not target.props.cachable:
return False
# if any dependency of the target is dirty, then the target is dirty
if any(self.targets[dep].is_dirty for dep in target.deps):
return False
# if the target has a dirty buildenv then it's also dirty
if target.buildenv and self.targets[target.buildenv].is_dirty:
return False
return True | ['def', 'can_use_cache', '(', 'self', ',', 'target', ':', 'Target', ')', '->', 'bool', ':', '# if caching is disabled for this execution, then all targets are dirty', 'if', 'self', '.', 'conf', '.', 'no_build_cache', ':', 'return', 'False', "# if the target's `cachable` prop is falsy, then it is dirty", 'if', 'not', 'target', '.', 'props', '.', 'cachable', ':', 'return', 'False', '# if any dependency of the target is dirty, then the target is dirty', 'if', 'any', '(', 'self', '.', 'targets', '[', 'dep', ']', '.', 'is_dirty', 'for', 'dep', 'in', 'target', '.', 'deps', ')', ':', 'return', 'False', "# if the target has a dirty buildenv then it's also dirty", 'if', 'target', '.', 'buildenv', 'and', 'self', '.', 'targets', '[', 'target', '.', 'buildenv', ']', '.', 'is_dirty', ':', 'return', 'False', 'return', 'True'] | Return True if should attempt to load `target` from cache.
Return False if `target` has to be built, regardless of its cache
status (because cache is disabled, or dependencies are dirty). | ['Return', 'True', 'if', 'should', 'attempt', 'to', 'load', 'target', 'from', 'cache', '.', 'Return', 'False', 'if', 'target', 'has', 'to', 'be', 'built', 'regardless', 'of', 'its', 'cache', 'status', '(', 'because', 'cache', 'is', 'disabled', 'or', 'dependencies', 'are', 'dirty', ')', '.'] | train | https://github.com/resonai/ybt/blob/5b40df0922ef3383eb85f2b04a26a2db4b81b3fd/yabt/buildcontext.py#L479-L496 |
3,824 | FPGAwars/apio | apio/commands/examples.py | cli | def cli(ctx, list, dir, files, project_dir, sayno):
"""Manage verilog examples.\n
Install with `apio install examples`"""
exit_code = 0
if list:
exit_code = Examples().list_examples()
elif dir:
exit_code = Examples().copy_example_dir(dir, project_dir, sayno)
elif files:
exit_code = Examples().copy_example_files(files, project_dir, sayno)
else:
click.secho(ctx.get_help())
click.secho(Examples().examples_of_use_cad())
ctx.exit(exit_code) | python | def cli(ctx, list, dir, files, project_dir, sayno):
"""Manage verilog examples.\n
Install with `apio install examples`"""
exit_code = 0
if list:
exit_code = Examples().list_examples()
elif dir:
exit_code = Examples().copy_example_dir(dir, project_dir, sayno)
elif files:
exit_code = Examples().copy_example_files(files, project_dir, sayno)
else:
click.secho(ctx.get_help())
click.secho(Examples().examples_of_use_cad())
ctx.exit(exit_code) | ['def', 'cli', '(', 'ctx', ',', 'list', ',', 'dir', ',', 'files', ',', 'project_dir', ',', 'sayno', ')', ':', 'exit_code', '=', '0', 'if', 'list', ':', 'exit_code', '=', 'Examples', '(', ')', '.', 'list_examples', '(', ')', 'elif', 'dir', ':', 'exit_code', '=', 'Examples', '(', ')', '.', 'copy_example_dir', '(', 'dir', ',', 'project_dir', ',', 'sayno', ')', 'elif', 'files', ':', 'exit_code', '=', 'Examples', '(', ')', '.', 'copy_example_files', '(', 'files', ',', 'project_dir', ',', 'sayno', ')', 'else', ':', 'click', '.', 'secho', '(', 'ctx', '.', 'get_help', '(', ')', ')', 'click', '.', 'secho', '(', 'Examples', '(', ')', '.', 'examples_of_use_cad', '(', ')', ')', 'ctx', '.', 'exit', '(', 'exit_code', ')'] | Manage verilog examples.\n
Install with `apio install examples` | ['Manage', 'verilog', 'examples', '.', '\\', 'n', 'Install', 'with', 'apio', 'install', 'examples'] | train | https://github.com/FPGAwars/apio/blob/5c6310f11a061a760764c6b5847bfb431dc3d0bc/apio/commands/examples.py#L29-L45 |
3,825 | 7sDream/zhihu-py3 | zhihu/question.py | Question.follower_num | def follower_num(self):
"""获取问题关注人数.
:return: 问题关注人数
:rtype: int
"""
follower_num_block = self.soup.find('div', class_='zg-gray-normal')
# 无人关注时 找不到对应block,直接返回0 (感谢知乎用户 段晓晨 提出此问题)
if follower_num_block is None or follower_num_block.strong is None:
return 0
return int(follower_num_block.strong.text) | python | def follower_num(self):
"""获取问题关注人数.
:return: 问题关注人数
:rtype: int
"""
follower_num_block = self.soup.find('div', class_='zg-gray-normal')
# 无人关注时 找不到对应block,直接返回0 (感谢知乎用户 段晓晨 提出此问题)
if follower_num_block is None or follower_num_block.strong is None:
return 0
return int(follower_num_block.strong.text) | ['def', 'follower_num', '(', 'self', ')', ':', 'follower_num_block', '=', 'self', '.', 'soup', '.', 'find', '(', "'div'", ',', 'class_', '=', "'zg-gray-normal'", ')', '# 无人关注时 找不到对应block,直接返回0 (感谢知乎用户 段晓晨 提出此问题)', 'if', 'follower_num_block', 'is', 'None', 'or', 'follower_num_block', '.', 'strong', 'is', 'None', ':', 'return', '0', 'return', 'int', '(', 'follower_num_block', '.', 'strong', '.', 'text', ')'] | 获取问题关注人数.
:return: 问题关注人数
:rtype: int | ['获取问题关注人数', '.'] | train | https://github.com/7sDream/zhihu-py3/blob/bcb4aa8325f8b54d3b44bd0bdc959edd9761fcfc/zhihu/question.py#L136-L146 |
3,826 | improbable-research/keanu | keanu-python/keanu/vertex/generated.py | IntegerMax | def IntegerMax(left: vertex_constructor_param_types, right: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Finds the maximum between two vertices
:param left: one of the vertices to find the maximum of
:param right: one of the vertices to find the maximum of
"""
return Integer(context.jvm_view().IntegerMaxVertex, label, cast_to_integer_vertex(left), cast_to_integer_vertex(right)) | python | def IntegerMax(left: vertex_constructor_param_types, right: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Finds the maximum between two vertices
:param left: one of the vertices to find the maximum of
:param right: one of the vertices to find the maximum of
"""
return Integer(context.jvm_view().IntegerMaxVertex, label, cast_to_integer_vertex(left), cast_to_integer_vertex(right)) | ['def', 'IntegerMax', '(', 'left', ':', 'vertex_constructor_param_types', ',', 'right', ':', 'vertex_constructor_param_types', ',', 'label', ':', 'Optional', '[', 'str', ']', '=', 'None', ')', '->', 'Vertex', ':', 'return', 'Integer', '(', 'context', '.', 'jvm_view', '(', ')', '.', 'IntegerMaxVertex', ',', 'label', ',', 'cast_to_integer_vertex', '(', 'left', ')', ',', 'cast_to_integer_vertex', '(', 'right', ')', ')'] | Finds the maximum between two vertices
:param left: one of the vertices to find the maximum of
:param right: one of the vertices to find the maximum of | ['Finds', 'the', 'maximum', 'between', 'two', 'vertices', ':', 'param', 'left', ':', 'one', 'of', 'the', 'vertices', 'to', 'find', 'the', 'maximum', 'of', ':', 'param', 'right', ':', 'one', 'of', 'the', 'vertices', 'to', 'find', 'the', 'maximum', 'of'] | train | https://github.com/improbable-research/keanu/blob/73189a8f569078e156168e795f82c7366c59574b/keanu-python/keanu/vertex/generated.py#L744-L751 |
3,827 | linkedin/pyexchange | pyexchange/base/soap.py | ExchangeServiceSOAP._xpath_to_dict | def _xpath_to_dict(self, element, property_map, namespace_map):
"""
property_map = {
u'name' : { u'xpath' : u't:Mailbox/t:Name'},
u'email' : { u'xpath' : u't:Mailbox/t:EmailAddress'},
u'response' : { u'xpath' : u't:ResponseType'},
u'last_response': { u'xpath' : u't:LastResponseTime', u'cast': u'datetime'},
}
This runs the given xpath on the node and returns a dictionary
"""
result = {}
log.info(etree.tostring(element, pretty_print=True))
for key in property_map:
item = property_map[key]
log.info(u'Pulling xpath {xpath} into key {key}'.format(key=key, xpath=item[u'xpath']))
nodes = element.xpath(item[u'xpath'], namespaces=namespace_map)
if nodes:
result_for_node = []
for node in nodes:
cast_as = item.get(u'cast', None)
if cast_as == u'datetime':
result_for_node.append(self._parse_date(node.text))
elif cast_as == u'date_only_naive':
result_for_node.append(self._parse_date_only_naive(node.text))
elif cast_as == u'int':
result_for_node.append(int(node.text))
elif cast_as == u'bool':
if node.text.lower() == u'true':
result_for_node.append(True)
else:
result_for_node.append(False)
else:
result_for_node.append(node.text)
if not result_for_node:
result[key] = None
elif len(result_for_node) == 1:
result[key] = result_for_node[0]
else:
result[key] = result_for_node
return result | python | def _xpath_to_dict(self, element, property_map, namespace_map):
"""
property_map = {
u'name' : { u'xpath' : u't:Mailbox/t:Name'},
u'email' : { u'xpath' : u't:Mailbox/t:EmailAddress'},
u'response' : { u'xpath' : u't:ResponseType'},
u'last_response': { u'xpath' : u't:LastResponseTime', u'cast': u'datetime'},
}
This runs the given xpath on the node and returns a dictionary
"""
result = {}
log.info(etree.tostring(element, pretty_print=True))
for key in property_map:
item = property_map[key]
log.info(u'Pulling xpath {xpath} into key {key}'.format(key=key, xpath=item[u'xpath']))
nodes = element.xpath(item[u'xpath'], namespaces=namespace_map)
if nodes:
result_for_node = []
for node in nodes:
cast_as = item.get(u'cast', None)
if cast_as == u'datetime':
result_for_node.append(self._parse_date(node.text))
elif cast_as == u'date_only_naive':
result_for_node.append(self._parse_date_only_naive(node.text))
elif cast_as == u'int':
result_for_node.append(int(node.text))
elif cast_as == u'bool':
if node.text.lower() == u'true':
result_for_node.append(True)
else:
result_for_node.append(False)
else:
result_for_node.append(node.text)
if not result_for_node:
result[key] = None
elif len(result_for_node) == 1:
result[key] = result_for_node[0]
else:
result[key] = result_for_node
return result | ['def', '_xpath_to_dict', '(', 'self', ',', 'element', ',', 'property_map', ',', 'namespace_map', ')', ':', 'result', '=', '{', '}', 'log', '.', 'info', '(', 'etree', '.', 'tostring', '(', 'element', ',', 'pretty_print', '=', 'True', ')', ')', 'for', 'key', 'in', 'property_map', ':', 'item', '=', 'property_map', '[', 'key', ']', 'log', '.', 'info', '(', "u'Pulling xpath {xpath} into key {key}'", '.', 'format', '(', 'key', '=', 'key', ',', 'xpath', '=', 'item', '[', "u'xpath'", ']', ')', ')', 'nodes', '=', 'element', '.', 'xpath', '(', 'item', '[', "u'xpath'", ']', ',', 'namespaces', '=', 'namespace_map', ')', 'if', 'nodes', ':', 'result_for_node', '=', '[', ']', 'for', 'node', 'in', 'nodes', ':', 'cast_as', '=', 'item', '.', 'get', '(', "u'cast'", ',', 'None', ')', 'if', 'cast_as', '==', "u'datetime'", ':', 'result_for_node', '.', 'append', '(', 'self', '.', '_parse_date', '(', 'node', '.', 'text', ')', ')', 'elif', 'cast_as', '==', "u'date_only_naive'", ':', 'result_for_node', '.', 'append', '(', 'self', '.', '_parse_date_only_naive', '(', 'node', '.', 'text', ')', ')', 'elif', 'cast_as', '==', "u'int'", ':', 'result_for_node', '.', 'append', '(', 'int', '(', 'node', '.', 'text', ')', ')', 'elif', 'cast_as', '==', "u'bool'", ':', 'if', 'node', '.', 'text', '.', 'lower', '(', ')', '==', "u'true'", ':', 'result_for_node', '.', 'append', '(', 'True', ')', 'else', ':', 'result_for_node', '.', 'append', '(', 'False', ')', 'else', ':', 'result_for_node', '.', 'append', '(', 'node', '.', 'text', ')', 'if', 'not', 'result_for_node', ':', 'result', '[', 'key', ']', '=', 'None', 'elif', 'len', '(', 'result_for_node', ')', '==', '1', ':', 'result', '[', 'key', ']', '=', 'result_for_node', '[', '0', ']', 'else', ':', 'result', '[', 'key', ']', '=', 'result_for_node', 'return', 'result'] | property_map = {
u'name' : { u'xpath' : u't:Mailbox/t:Name'},
u'email' : { u'xpath' : u't:Mailbox/t:EmailAddress'},
u'response' : { u'xpath' : u't:ResponseType'},
u'last_response': { u'xpath' : u't:LastResponseTime', u'cast': u'datetime'},
}
This runs the given xpath on the node and returns a dictionary | ['property_map', '=', '{', 'u', 'name', ':', '{', 'u', 'xpath', ':', 'u', 't', ':', 'Mailbox', '/', 't', ':', 'Name', '}', 'u', 'email', ':', '{', 'u', 'xpath', ':', 'u', 't', ':', 'Mailbox', '/', 't', ':', 'EmailAddress', '}', 'u', 'response', ':', '{', 'u', 'xpath', ':', 'u', 't', ':', 'ResponseType', '}', 'u', 'last_response', ':', '{', 'u', 'xpath', ':', 'u', 't', ':', 'LastResponseTime', 'u', 'cast', ':', 'u', 'datetime', '}', '}'] | train | https://github.com/linkedin/pyexchange/blob/d568f4edd326adb451b915ddf66cf1a37820e3ca/pyexchange/base/soap.py#L83-L132 |
3,828 | devassistant/devassistant | devassistant/gui/run_window.py | RunLoggingHandler.emit | def emit(self, record):
"""
Function inserts log messages to list_view
"""
msg = record.getMessage()
list_store = self.list_view.get_model()
Gdk.threads_enter()
if msg:
# Underline URLs in the record message
msg = replace_markup_chars(record.getMessage())
record.msg = URL_FINDER.sub(r'<u>\1</u>', msg)
self.parent.debug_logs['logs'].append(record)
# During execution if level is bigger then DEBUG
# then GUI shows the message.
event_type = getattr(record, 'event_type', '')
if event_type:
if event_type == 'dep_installation_start':
switch_cursor(Gdk.CursorType.WATCH, self.parent.run_window)
list_store.append([format_entry(record)])
if event_type == 'dep_installation_end':
switch_cursor(Gdk.CursorType.ARROW, self.parent.run_window)
if not self.parent.debugging:
# We will show only INFO messages and messages who have no dep_ event_type
if int(record.levelno) > 10:
if event_type == "dep_check" or event_type == "dep_found":
list_store.append([format_entry(record)])
elif not event_type.startswith("dep_"):
list_store.append([format_entry(record, colorize=True)])
if self.parent.debugging:
if event_type != "cmd_retcode":
list_store.append([format_entry(record, show_level=True, colorize=True)])
Gdk.threads_leave() | python | def emit(self, record):
"""
Function inserts log messages to list_view
"""
msg = record.getMessage()
list_store = self.list_view.get_model()
Gdk.threads_enter()
if msg:
# Underline URLs in the record message
msg = replace_markup_chars(record.getMessage())
record.msg = URL_FINDER.sub(r'<u>\1</u>', msg)
self.parent.debug_logs['logs'].append(record)
# During execution if level is bigger then DEBUG
# then GUI shows the message.
event_type = getattr(record, 'event_type', '')
if event_type:
if event_type == 'dep_installation_start':
switch_cursor(Gdk.CursorType.WATCH, self.parent.run_window)
list_store.append([format_entry(record)])
if event_type == 'dep_installation_end':
switch_cursor(Gdk.CursorType.ARROW, self.parent.run_window)
if not self.parent.debugging:
# We will show only INFO messages and messages who have no dep_ event_type
if int(record.levelno) > 10:
if event_type == "dep_check" or event_type == "dep_found":
list_store.append([format_entry(record)])
elif not event_type.startswith("dep_"):
list_store.append([format_entry(record, colorize=True)])
if self.parent.debugging:
if event_type != "cmd_retcode":
list_store.append([format_entry(record, show_level=True, colorize=True)])
Gdk.threads_leave() | ['def', 'emit', '(', 'self', ',', 'record', ')', ':', 'msg', '=', 'record', '.', 'getMessage', '(', ')', 'list_store', '=', 'self', '.', 'list_view', '.', 'get_model', '(', ')', 'Gdk', '.', 'threads_enter', '(', ')', 'if', 'msg', ':', '# Underline URLs in the record message', 'msg', '=', 'replace_markup_chars', '(', 'record', '.', 'getMessage', '(', ')', ')', 'record', '.', 'msg', '=', 'URL_FINDER', '.', 'sub', '(', "r'<u>\\1</u>'", ',', 'msg', ')', 'self', '.', 'parent', '.', 'debug_logs', '[', "'logs'", ']', '.', 'append', '(', 'record', ')', '# During execution if level is bigger then DEBUG', '# then GUI shows the message.', 'event_type', '=', 'getattr', '(', 'record', ',', "'event_type'", ',', "''", ')', 'if', 'event_type', ':', 'if', 'event_type', '==', "'dep_installation_start'", ':', 'switch_cursor', '(', 'Gdk', '.', 'CursorType', '.', 'WATCH', ',', 'self', '.', 'parent', '.', 'run_window', ')', 'list_store', '.', 'append', '(', '[', 'format_entry', '(', 'record', ')', ']', ')', 'if', 'event_type', '==', "'dep_installation_end'", ':', 'switch_cursor', '(', 'Gdk', '.', 'CursorType', '.', 'ARROW', ',', 'self', '.', 'parent', '.', 'run_window', ')', 'if', 'not', 'self', '.', 'parent', '.', 'debugging', ':', '# We will show only INFO messages and messages who have no dep_ event_type', 'if', 'int', '(', 'record', '.', 'levelno', ')', '>', '10', ':', 'if', 'event_type', '==', '"dep_check"', 'or', 'event_type', '==', '"dep_found"', ':', 'list_store', '.', 'append', '(', '[', 'format_entry', '(', 'record', ')', ']', ')', 'elif', 'not', 'event_type', '.', 'startswith', '(', '"dep_"', ')', ':', 'list_store', '.', 'append', '(', '[', 'format_entry', '(', 'record', ',', 'colorize', '=', 'True', ')', ']', ')', 'if', 'self', '.', 'parent', '.', 'debugging', ':', 'if', 'event_type', '!=', '"cmd_retcode"', ':', 'list_store', '.', 'append', '(', '[', 'format_entry', '(', 'record', ',', 'show_level', '=', 'True', ',', 'colorize', '=', 'True', ')', ']', ')', 'Gdk', '.', 'threads_leave', '(', ')'] | Function inserts log messages to list_view | ['Function', 'inserts', 'log', 'messages', 'to', 'list_view'] | train | https://github.com/devassistant/devassistant/blob/2dbfeaa666a64127263664d18969c55d19ecc83e/devassistant/gui/run_window.py#L62-L93 |
3,829 | frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/__init__.py | GeminiG6._get_parameter | def _get_parameter(self, name, tp, timeout=1.0, max_retries=2):
""" Gets the specified drive parameter.
Gets a parameter from the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to check. It is always the command to
set it but without the value.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
value : bool, int, or float
The value of the specified parameter.
Raises
------
TypeError
If 'tp' is not an allowed type (``bool``, ``int``,
``float``).
CommandError
If the command to retrieve the parameter returned an error.
ValueError
If the value returned to the drive cannot be converted to
the proper type.
See Also
--------
_set_parameter : Set a parameter.
"""
# Raise a TypeError if tp isn't one of the valid types.
if tp not in (bool, int, float):
raise TypeError('Only supports bool, int, and float; not '
+ str(tp))
# Sending a command of name queries the state for that
# parameter. The response will have name preceeded by an '*' and
# then followed by a number which will have to be converted.
response = self.driver.send_command(name, timeout=timeout,
immediate=True,
max_retries=max_retries)
# If the response has an error, there are no response lines, or
# the first response line isn't '*'+name; then there was an
# error and an exception needs to be thrown.
if self.driver.command_error(response) \
or len(response[4]) == 0 \
or not response[4][0].startswith('*' + name):
raise CommandError('Couldn''t retrieve parameter '
+ name)
# Extract the string representation of the value, which is after
# the '*'+name.
value_str = response[4][0][(len(name)+1):]
# Convert the value string to the appropriate type and return
# it. Throw an error if it is not supported.
if tp == bool:
return (value_str == '1')
elif tp == int:
return int(value_str)
elif tp == float:
return float(value_str) | python | def _get_parameter(self, name, tp, timeout=1.0, max_retries=2):
""" Gets the specified drive parameter.
Gets a parameter from the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to check. It is always the command to
set it but without the value.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
value : bool, int, or float
The value of the specified parameter.
Raises
------
TypeError
If 'tp' is not an allowed type (``bool``, ``int``,
``float``).
CommandError
If the command to retrieve the parameter returned an error.
ValueError
If the value returned to the drive cannot be converted to
the proper type.
See Also
--------
_set_parameter : Set a parameter.
"""
# Raise a TypeError if tp isn't one of the valid types.
if tp not in (bool, int, float):
raise TypeError('Only supports bool, int, and float; not '
+ str(tp))
# Sending a command of name queries the state for that
# parameter. The response will have name preceeded by an '*' and
# then followed by a number which will have to be converted.
response = self.driver.send_command(name, timeout=timeout,
immediate=True,
max_retries=max_retries)
# If the response has an error, there are no response lines, or
# the first response line isn't '*'+name; then there was an
# error and an exception needs to be thrown.
if self.driver.command_error(response) \
or len(response[4]) == 0 \
or not response[4][0].startswith('*' + name):
raise CommandError('Couldn''t retrieve parameter '
+ name)
# Extract the string representation of the value, which is after
# the '*'+name.
value_str = response[4][0][(len(name)+1):]
# Convert the value string to the appropriate type and return
# it. Throw an error if it is not supported.
if tp == bool:
return (value_str == '1')
elif tp == int:
return int(value_str)
elif tp == float:
return float(value_str) | ['def', '_get_parameter', '(', 'self', ',', 'name', ',', 'tp', ',', 'timeout', '=', '1.0', ',', 'max_retries', '=', '2', ')', ':', "# Raise a TypeError if tp isn't one of the valid types.", 'if', 'tp', 'not', 'in', '(', 'bool', ',', 'int', ',', 'float', ')', ':', 'raise', 'TypeError', '(', "'Only supports bool, int, and float; not '", '+', 'str', '(', 'tp', ')', ')', '# Sending a command of name queries the state for that', "# parameter. The response will have name preceeded by an '*' and", '# then followed by a number which will have to be converted.', 'response', '=', 'self', '.', 'driver', '.', 'send_command', '(', 'name', ',', 'timeout', '=', 'timeout', ',', 'immediate', '=', 'True', ',', 'max_retries', '=', 'max_retries', ')', '# If the response has an error, there are no response lines, or', "# the first response line isn't '*'+name; then there was an", '# error and an exception needs to be thrown.', 'if', 'self', '.', 'driver', '.', 'command_error', '(', 'response', ')', 'or', 'len', '(', 'response', '[', '4', ']', ')', '==', '0', 'or', 'not', 'response', '[', '4', ']', '[', '0', ']', '.', 'startswith', '(', "'*'", '+', 'name', ')', ':', 'raise', 'CommandError', '(', "'Couldn'", "'t retrieve parameter '", '+', 'name', ')', '# Extract the string representation of the value, which is after', "# the '*'+name.", 'value_str', '=', 'response', '[', '4', ']', '[', '0', ']', '[', '(', 'len', '(', 'name', ')', '+', '1', ')', ':', ']', '# Convert the value string to the appropriate type and return', '# it. Throw an error if it is not supported.', 'if', 'tp', '==', 'bool', ':', 'return', '(', 'value_str', '==', "'1'", ')', 'elif', 'tp', '==', 'int', ':', 'return', 'int', '(', 'value_str', ')', 'elif', 'tp', '==', 'float', ':', 'return', 'float', '(', 'value_str', ')'] | Gets the specified drive parameter.
Gets a parameter from the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to check. It is always the command to
set it but without the value.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
value : bool, int, or float
The value of the specified parameter.
Raises
------
TypeError
If 'tp' is not an allowed type (``bool``, ``int``,
``float``).
CommandError
If the command to retrieve the parameter returned an error.
ValueError
If the value returned to the drive cannot be converted to
the proper type.
See Also
--------
_set_parameter : Set a parameter. | ['Gets', 'the', 'specified', 'drive', 'parameter', '.'] | train | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/__init__.py#L145-L219 |
3,830 | opennode/waldur-core | waldur_core/logging/views.py | AlertViewSet.list | def list(self, request, *args, **kwargs):
"""
To get a list of alerts, run **GET** against */api/alerts/* as authenticated user.
Alert severity field can take one of this values: "Error", "Warning", "Info", "Debug".
Field scope will contain link to object that cause alert.
Context - dictionary that contains information about all related to alert objects.
Alerts can be filtered by:
- ?severity=<severity> (can be list)
- ?alert_type=<alert_type> (can be list)
- ?scope=<url> concrete alert scope
- ?scope_type=<string> name of scope type (Ex.: instance, service_project_link, project...)
DEPRECATED use ?content_type instead
- ?created_from=<timestamp>
- ?created_to=<timestamp>
- ?closed_from=<timestamp>
- ?closed_to=<timestamp>
- ?from=<timestamp> - filter alerts that was active from given date
- ?to=<timestamp> - filter alerts that was active to given date
- ?opened - if this argument is in GET request endpoint will return only alerts that are not closed
- ?closed - if this argument is in GET request endpoint will return only alerts that are closed
- ?aggregate=aggregate_model_name (default: 'customer'. Have to be from list: 'customer', project')
- ?uuid=uuid_of_aggregate_model_object (not required. If this parameter will be defined - result ill contain only
object with given uuid)
- ?acknowledged=True|False - show only acknowledged (non-acknowledged) alerts
- ?content_type=<string> name of scope content type in format <app_name>.<scope_type>
(Ex.: structure.project, openstack.instance...)
- ?exclude_features=<feature> (can be list) - exclude alert from output if it's type corresponds o one of given features
Alerts can be ordered by:
-?o=severity - order by severity
-?o=created - order by creation time
.. code-block:: http
GET /api/alerts/
Accept: application/json
Content-Type: application/json
Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4
Host: example.com
[
{
"url": "http://example.com/api/alerts/e80e48a4e58b48ff9a1320a0aa0d68ab/",
"uuid": "e80e48a4e58b48ff9a1320a0aa0d68ab",
"alert_type": "first_alert",
"message": "message#1",
"severity": "Debug",
"scope": "http://example.com/api/instances/9d1d7e03b0d14fd0b42b5f649dfa3de5/",
"created": "2015-05-29T14:24:27.342Z",
"closed": null,
"context": {
'customer_abbreviation': 'customer_abbreviation',
'customer_contact_details': 'customer details',
'customer_name': 'Customer name',
'customer_uuid': '53c6e86406e349faa7924f4c865b15ab',
'quota_limit': '131072.0',
'quota_name': 'ram',
'quota_usage': '131071',
'quota_uuid': 'f6ae2f7ca86f4e2f9bb64de1015a2815',
'scope_name': 'project X',
'scope_uuid': '0238d71ee1934bd2839d4e71e5f9b91a'
}
"acknowledged": true,
}
]
"""
return super(AlertViewSet, self).list(request, *args, **kwargs) | python | def list(self, request, *args, **kwargs):
"""
To get a list of alerts, run **GET** against */api/alerts/* as authenticated user.
Alert severity field can take one of this values: "Error", "Warning", "Info", "Debug".
Field scope will contain link to object that cause alert.
Context - dictionary that contains information about all related to alert objects.
Alerts can be filtered by:
- ?severity=<severity> (can be list)
- ?alert_type=<alert_type> (can be list)
- ?scope=<url> concrete alert scope
- ?scope_type=<string> name of scope type (Ex.: instance, service_project_link, project...)
DEPRECATED use ?content_type instead
- ?created_from=<timestamp>
- ?created_to=<timestamp>
- ?closed_from=<timestamp>
- ?closed_to=<timestamp>
- ?from=<timestamp> - filter alerts that was active from given date
- ?to=<timestamp> - filter alerts that was active to given date
- ?opened - if this argument is in GET request endpoint will return only alerts that are not closed
- ?closed - if this argument is in GET request endpoint will return only alerts that are closed
- ?aggregate=aggregate_model_name (default: 'customer'. Have to be from list: 'customer', project')
- ?uuid=uuid_of_aggregate_model_object (not required. If this parameter will be defined - result ill contain only
object with given uuid)
- ?acknowledged=True|False - show only acknowledged (non-acknowledged) alerts
- ?content_type=<string> name of scope content type in format <app_name>.<scope_type>
(Ex.: structure.project, openstack.instance...)
- ?exclude_features=<feature> (can be list) - exclude alert from output if it's type corresponds o one of given features
Alerts can be ordered by:
-?o=severity - order by severity
-?o=created - order by creation time
.. code-block:: http
GET /api/alerts/
Accept: application/json
Content-Type: application/json
Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4
Host: example.com
[
{
"url": "http://example.com/api/alerts/e80e48a4e58b48ff9a1320a0aa0d68ab/",
"uuid": "e80e48a4e58b48ff9a1320a0aa0d68ab",
"alert_type": "first_alert",
"message": "message#1",
"severity": "Debug",
"scope": "http://example.com/api/instances/9d1d7e03b0d14fd0b42b5f649dfa3de5/",
"created": "2015-05-29T14:24:27.342Z",
"closed": null,
"context": {
'customer_abbreviation': 'customer_abbreviation',
'customer_contact_details': 'customer details',
'customer_name': 'Customer name',
'customer_uuid': '53c6e86406e349faa7924f4c865b15ab',
'quota_limit': '131072.0',
'quota_name': 'ram',
'quota_usage': '131071',
'quota_uuid': 'f6ae2f7ca86f4e2f9bb64de1015a2815',
'scope_name': 'project X',
'scope_uuid': '0238d71ee1934bd2839d4e71e5f9b91a'
}
"acknowledged": true,
}
]
"""
return super(AlertViewSet, self).list(request, *args, **kwargs) | ['def', 'list', '(', 'self', ',', 'request', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'return', 'super', '(', 'AlertViewSet', ',', 'self', ')', '.', 'list', '(', 'request', ',', '*', 'args', ',', '*', '*', 'kwargs', ')'] | To get a list of alerts, run **GET** against */api/alerts/* as authenticated user.
Alert severity field can take one of this values: "Error", "Warning", "Info", "Debug".
Field scope will contain link to object that cause alert.
Context - dictionary that contains information about all related to alert objects.
Alerts can be filtered by:
- ?severity=<severity> (can be list)
- ?alert_type=<alert_type> (can be list)
- ?scope=<url> concrete alert scope
- ?scope_type=<string> name of scope type (Ex.: instance, service_project_link, project...)
DEPRECATED use ?content_type instead
- ?created_from=<timestamp>
- ?created_to=<timestamp>
- ?closed_from=<timestamp>
- ?closed_to=<timestamp>
- ?from=<timestamp> - filter alerts that was active from given date
- ?to=<timestamp> - filter alerts that was active to given date
- ?opened - if this argument is in GET request endpoint will return only alerts that are not closed
- ?closed - if this argument is in GET request endpoint will return only alerts that are closed
- ?aggregate=aggregate_model_name (default: 'customer'. Have to be from list: 'customer', project')
- ?uuid=uuid_of_aggregate_model_object (not required. If this parameter will be defined - result ill contain only
object with given uuid)
- ?acknowledged=True|False - show only acknowledged (non-acknowledged) alerts
- ?content_type=<string> name of scope content type in format <app_name>.<scope_type>
(Ex.: structure.project, openstack.instance...)
- ?exclude_features=<feature> (can be list) - exclude alert from output if it's type corresponds o one of given features
Alerts can be ordered by:
-?o=severity - order by severity
-?o=created - order by creation time
.. code-block:: http
GET /api/alerts/
Accept: application/json
Content-Type: application/json
Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4
Host: example.com
[
{
"url": "http://example.com/api/alerts/e80e48a4e58b48ff9a1320a0aa0d68ab/",
"uuid": "e80e48a4e58b48ff9a1320a0aa0d68ab",
"alert_type": "first_alert",
"message": "message#1",
"severity": "Debug",
"scope": "http://example.com/api/instances/9d1d7e03b0d14fd0b42b5f649dfa3de5/",
"created": "2015-05-29T14:24:27.342Z",
"closed": null,
"context": {
'customer_abbreviation': 'customer_abbreviation',
'customer_contact_details': 'customer details',
'customer_name': 'Customer name',
'customer_uuid': '53c6e86406e349faa7924f4c865b15ab',
'quota_limit': '131072.0',
'quota_name': 'ram',
'quota_usage': '131071',
'quota_uuid': 'f6ae2f7ca86f4e2f9bb64de1015a2815',
'scope_name': 'project X',
'scope_uuid': '0238d71ee1934bd2839d4e71e5f9b91a'
}
"acknowledged": true,
}
] | ['To', 'get', 'a', 'list', 'of', 'alerts', 'run', '**', 'GET', '**', 'against', '*', '/', 'api', '/', 'alerts', '/', '*', 'as', 'authenticated', 'user', '.'] | train | https://github.com/opennode/waldur-core/blob/d6c17a9592bb6c49c33567542eef8d099605a46a/waldur_core/logging/views.py#L161-L230 |
3,831 | empymod/empymod | empymod/model.py | dipole | def dipole(src, rec, depth, res, freqtime, signal=None, ab=11, aniso=None,
epermH=None, epermV=None, mpermH=None, mpermV=None, xdirect=False,
ht='fht', htarg=None, ft='sin', ftarg=None, opt=None, loop=None,
verb=2):
r"""Return the electromagnetic field due to a dipole source.
Calculate the electromagnetic frequency- or time-domain field due to
infinitesimal small electric or magnetic dipole source(s), measured by
infinitesimal small electric or magnetic dipole receiver(s); sources and
receivers are directed along the principal directions x, y, or z, and all
sources are at the same depth, as well as all receivers are at the same
depth.
Use the functions ``bipole`` to calculate dipoles with arbitrary angles or
bipoles of finite length and arbitrary angle.
The function ``dipole`` could be replaced by ``bipole`` (all there is to do
is translate ``ab`` into ``msrc``, ``mrec``, ``azimuth``'s and ``dip``'s).
However, ``dipole`` is kept separately to serve as an example of a simple
modelling routine that can serve as a template.
See Also
--------
bipole : Electromagnetic field due to an electromagnetic source.
fem : Electromagnetic frequency-domain response.
tem : Electromagnetic time-domain response.
Parameters
----------
src, rec : list of floats or arrays
Source and receiver coordinates (m): [x, y, z].
The x- and y-coordinates can be arrays, z is a single value.
The x- and y-coordinates must have the same dimension.
Sources or receivers placed on a layer interface are considered in the
upper layer.
depth : list
Absolute layer interfaces z (m); #depth = #res - 1
(excluding +/- infinity).
res : array_like
Horizontal resistivities rho_h (Ohm.m); #res = #depth + 1.
Alternatively, res can be a dictionary. See the main manual of empymod
too see how to exploit this hook to re-calculate etaH, etaV, zetaH, and
zetaV, which can be used to, for instance, use the Cole-Cole model for
IP.
freqtime : array_like
Frequencies f (Hz) if ``signal`` == None, else times t (s); (f, t > 0).
signal : {None, 0, 1, -1}, optional
Source signal, default is None:
- None: Frequency-domain response
- -1 : Switch-off time-domain response
- 0 : Impulse time-domain response
- +1 : Switch-on time-domain response
ab : int, optional
Source-receiver configuration, defaults to 11.
+---------------+-------+------+------+------+------+------+------+
| | electric source | magnetic source |
+===============+=======+======+======+======+======+======+======+
| | **x**| **y**| **z**| **x**| **y**| **z**|
+---------------+-------+------+------+------+------+------+------+
| | **x** | 11 | 12 | 13 | 14 | 15 | 16 |
+ **electric** +-------+------+------+------+------+------+------+
| | **y** | 21 | 22 | 23 | 24 | 25 | 26 |
+ **receiver** +-------+------+------+------+------+------+------+
| | **z** | 31 | 32 | 33 | 34 | 35 | 36 |
+---------------+-------+------+------+------+------+------+------+
| | **x** | 41 | 42 | 43 | 44 | 45 | 46 |
+ **magnetic** +-------+------+------+------+------+------+------+
| | **y** | 51 | 52 | 53 | 54 | 55 | 56 |
+ **receiver** +-------+------+------+------+------+------+------+
| | **z** | 61 | 62 | 63 | 64 | 65 | 66 |
+---------------+-------+------+------+------+------+------+------+
aniso : array_like, optional
Anisotropies lambda = sqrt(rho_v/rho_h) (-); #aniso = #res.
Defaults to ones.
epermH, epermV : array_like, optional
Relative horizontal/vertical electric permittivities
epsilon_h/epsilon_v (-);
#epermH = #epermV = #res. Default is ones.
mpermH, mpermV : array_like, optional
Relative horizontal/vertical magnetic permeabilities mu_h/mu_v (-);
#mpermH = #mpermV = #res. Default is ones.
xdirect : bool or None, optional
Direct field calculation (only if src and rec are in the same layer):
- If True, direct field is calculated analytically in the frequency
domain.
- If False, direct field is calculated in the wavenumber domain.
- If None, direct field is excluded from the calculation, and only
reflected fields are returned (secondary field).
Defaults to False.
ht : {'fht', 'qwe', 'quad'}, optional
Flag to choose either the *Digital Linear Filter* method (FHT, *Fast
Hankel Transform*), the *Quadrature-With-Extrapolation* (QWE), or a
simple *Quadrature* (QUAD) for the Hankel transform. Defaults to
'fht'.
htarg : dict or list, optional
Depends on the value for ``ht``:
- If ``ht`` = 'fht': [fhtfilt, pts_per_dec]:
- fhtfilt: string of filter name in ``empymod.filters`` or
the filter method itself.
(default: ``empymod.filters.key_201_2009()``)
- pts_per_dec: points per decade; (default: 0)
- If 0: Standard DLF.
- If < 0: Lagged Convolution DLF.
- If > 0: Splined DLF
- If ``ht`` = 'qwe': [rtol, atol, nquad, maxint, pts_per_dec,
diff_quad, a, b, limit]:
- rtol: relative tolerance (default: 1e-12)
- atol: absolute tolerance (default: 1e-30)
- nquad: order of Gaussian quadrature (default: 51)
- maxint: maximum number of partial integral intervals
(default: 40)
- pts_per_dec: points per decade; (default: 0)
- If 0, no interpolation is used.
- If > 0, interpolation is used.
- diff_quad: criteria when to swap to QUAD (only relevant if
opt='spline') (default: 100)
- a: lower limit for QUAD (default: first interval from QWE)
- b: upper limit for QUAD (default: last interval from QWE)
- limit: limit for quad (default: maxint)
- If ``ht`` = 'quad': [atol, rtol, limit, lmin, lmax, pts_per_dec]:
- rtol: relative tolerance (default: 1e-12)
- atol: absolute tolerance (default: 1e-20)
- limit: An upper bound on the number of subintervals used in
the adaptive algorithm (default: 500)
- lmin: Minimum wavenumber (default 1e-6)
- lmax: Maximum wavenumber (default 0.1)
- pts_per_dec: points per decade (default: 40)
The values can be provided as dict with the keywords, or as list.
However, if provided as list, you have to follow the order given above.
A few examples, assuming ``ht`` = ``qwe``:
- Only changing rtol:
{'rtol': 1e-4} or [1e-4] or 1e-4
- Changing rtol and nquad:
{'rtol': 1e-4, 'nquad': 101} or [1e-4, '', 101]
- Only changing diff_quad:
{'diffquad': 10} or ['', '', '', '', '', 10]
ft : {'sin', 'cos', 'qwe', 'fftlog', 'fft'}, optional
Only used if ``signal`` != None. Flag to choose either the Digital
Linear Filter method (Sine- or Cosine-Filter), the
Quadrature-With-Extrapolation (QWE), the FFTLog, or the FFT for the
Fourier transform. Defaults to 'sin'.
ftarg : dict or list, optional
Only used if ``signal`` !=None. Depends on the value for ``ft``:
- If ``ft`` = 'sin' or 'cos': [fftfilt, pts_per_dec]:
- fftfilt: string of filter name in ``empymod.filters`` or
the filter method itself.
(Default: ``empymod.filters.key_201_CosSin_2012()``)
- pts_per_dec: points per decade; (default: -1)
- If 0: Standard DLF.
- If < 0: Lagged Convolution DLF.
- If > 0: Splined DLF
- If ``ft`` = 'qwe': [rtol, atol, nquad, maxint, pts_per_dec]:
- rtol: relative tolerance (default: 1e-8)
- atol: absolute tolerance (default: 1e-20)
- nquad: order of Gaussian quadrature (default: 21)
- maxint: maximum number of partial integral intervals
(default: 200)
- pts_per_dec: points per decade (default: 20)
- diff_quad: criteria when to swap to QUAD (default: 100)
- a: lower limit for QUAD (default: first interval from QWE)
- b: upper limit for QUAD (default: last interval from QWE)
- limit: limit for quad (default: maxint)
- If ``ft`` = 'fftlog': [pts_per_dec, add_dec, q]:
- pts_per_dec: sampels per decade (default: 10)
- add_dec: additional decades [left, right] (default: [-2, 1])
- q: exponent of power law bias (default: 0); -1 <= q <= 1
- If ``ft`` = 'fft': [dfreq, nfreq, ntot]:
- dfreq: Linear step-size of frequencies (default: 0.002)
- nfreq: Number of frequencies (default: 2048)
- ntot: Total number for FFT; difference between nfreq and
ntot is padded with zeroes. This number is ideally a
power of 2, e.g. 2048 or 4096 (default: nfreq).
- pts_per_dec : points per decade (default: None)
Padding can sometimes improve the result, not always. The
default samples from 0.002 Hz - 4.096 Hz. If pts_per_dec is set
to an integer, calculated frequencies are logarithmically
spaced with the given number per decade, and then interpolated
to yield the required frequencies for the FFT.
The values can be provided as dict with the keywords, or as list.
However, if provided as list, you have to follow the order given above.
See ``htarg`` for a few examples.
opt : {None, 'parallel'}, optional
Optimization flag. Defaults to None:
- None: Normal case, no parallelization nor interpolation is used.
- If 'parallel', the package ``numexpr`` is used to evaluate the
most expensive statements. Always check if it actually improves
performance for a specific problem. It can speed up the
calculation for big arrays, but will most likely be slower for
small arrays. It will use all available cores for these specific
statements, which all contain ``Gamma`` in one way or another,
which has dimensions (#frequencies, #offsets, #layers, #lambdas),
therefore can grow pretty big. The module ``numexpr`` uses by
default all available cores up to a maximum of 8. You can change
this behaviour to your desired number of threads ``nthreads``
with ``numexpr.set_num_threads(nthreads)``.
- The value 'spline' is deprecated and will be removed. See
``htarg`` instead for the interpolated versions.
The option 'parallel' only affects speed and memory usage, whereas
'spline' also affects precision! Please read the note in the *README*
documentation for more information.
loop : {None, 'freq', 'off'}, optional
Define if to calculate everything vectorized or if to loop over
frequencies ('freq') or over offsets ('off'), default is None. It
always loops over frequencies if ``ht = 'qwe'`` or if ``opt =
'spline'``. Calculating everything vectorized is fast for few offsets
OR for few frequencies. However, if you calculate many frequencies for
many offsets, it might be faster to loop over frequencies. Only
comparing the different versions will yield the answer for your
specific problem at hand!
verb : {0, 1, 2, 3, 4}, optional
Level of verbosity, default is 2:
- 0: Print nothing.
- 1: Print warnings.
- 2: Print additional runtime and kernel calls
- 3: Print additional start/stop, condensed parameter information.
- 4: Print additional full parameter information
Returns
-------
EM : ndarray, (nfreq, nrec, nsrc)
Frequency- or time-domain EM field (depending on ``signal``):
- If rec is electric, returns E [V/m].
- If rec is magnetic, returns B [T] (not H [A/m]!).
However, source and receiver are normalised. So for instance in the
electric case the source strength is 1 A and its length is 1 m. So the
electric field could also be written as [V/(A.m2)].
The shape of EM is (nfreq, nrec, nsrc). However, single dimensions
are removed.
Examples
--------
>>> import numpy as np
>>> from empymod import dipole
>>> src = [0, 0, 100]
>>> rec = [np.arange(1, 11)*500, np.zeros(10), 200]
>>> depth = [0, 300, 1000, 1050]
>>> res = [1e20, .3, 1, 50, 1]
>>> EMfield = dipole(src, rec, depth, res, freqtime=1, verb=0)
>>> print(EMfield)
[ 1.68809346e-10 -3.08303130e-10j -8.77189179e-12 -3.76920235e-11j
-3.46654704e-12 -4.87133683e-12j -3.60159726e-13 -1.12434417e-12j
1.87807271e-13 -6.21669759e-13j 1.97200208e-13 -4.38210489e-13j
1.44134842e-13 -3.17505260e-13j 9.92770406e-14 -2.33950871e-13j
6.75287598e-14 -1.74922886e-13j 4.62724887e-14 -1.32266600e-13j]
"""
# === 1. LET'S START ============
t0 = printstartfinish(verb)
# === 2. CHECK INPUT ============
# Backwards compatibility
htarg, opt = spline_backwards_hankel(ht, htarg, opt)
# Check times and Fourier Transform arguments, get required frequencies
# (freq = freqtime if ``signal=None``)
if signal is not None:
time, freq, ft, ftarg = check_time(freqtime, signal, ft, ftarg, verb)
else:
freq = freqtime
# Check layer parameters
model = check_model(depth, res, aniso, epermH, epermV, mpermH, mpermV,
xdirect, verb)
depth, res, aniso, epermH, epermV, mpermH, mpermV, isfullspace = model
# Check frequency => get etaH, etaV, zetaH, and zetaV
frequency = check_frequency(freq, res, aniso, epermH, epermV, mpermH,
mpermV, verb)
freq, etaH, etaV, zetaH, zetaV = frequency
# Update etaH/etaV and zetaH/zetaV according to user-provided model
if isinstance(res, dict) and 'func_eta' in res:
etaH, etaV = res['func_eta'](res, locals())
if isinstance(res, dict) and 'func_zeta' in res:
zetaH, zetaV = res['func_zeta'](res, locals())
# Check Hankel transform parameters
ht, htarg = check_hankel(ht, htarg, verb)
# Check optimization
use_ne_eval, loop_freq, loop_off = check_opt(opt, loop, ht, htarg, verb)
# Check src-rec configuration
# => Get flags if src or rec or both are magnetic (msrc, mrec)
ab_calc, msrc, mrec = check_ab(ab, verb)
# Check src and rec
src, nsrc = check_dipole(src, 'src', verb)
rec, nrec = check_dipole(rec, 'rec', verb)
# Get offsets and angles (off, angle)
off, angle = get_off_ang(src, rec, nsrc, nrec, verb)
# Get layer number in which src and rec reside (lsrc/lrec)
lsrc, zsrc = get_layer_nr(src, depth)
lrec, zrec = get_layer_nr(rec, depth)
# === 3. EM-FIELD CALCULATION ============
# Collect variables for fem
inp = (ab_calc, off, angle, zsrc, zrec, lsrc, lrec, depth, freq, etaH,
etaV, zetaH, zetaV, xdirect, isfullspace, ht, htarg, use_ne_eval,
msrc, mrec, loop_freq, loop_off)
EM, kcount, conv = fem(*inp)
# In case of QWE/QUAD, print Warning if not converged
conv_warning(conv, htarg, 'Hankel', verb)
# Do f->t transform if required
if signal is not None:
EM, conv = tem(EM, off, freq, time, signal, ft, ftarg)
# In case of QWE/QUAD, print Warning if not converged
conv_warning(conv, ftarg, 'Fourier', verb)
# Reshape for number of sources
EM = np.squeeze(EM.reshape((-1, nrec, nsrc), order='F'))
# === 4. FINISHED ============
printstartfinish(verb, t0, kcount)
return EM | python | def dipole(src, rec, depth, res, freqtime, signal=None, ab=11, aniso=None,
epermH=None, epermV=None, mpermH=None, mpermV=None, xdirect=False,
ht='fht', htarg=None, ft='sin', ftarg=None, opt=None, loop=None,
verb=2):
r"""Return the electromagnetic field due to a dipole source.
Calculate the electromagnetic frequency- or time-domain field due to
infinitesimal small electric or magnetic dipole source(s), measured by
infinitesimal small electric or magnetic dipole receiver(s); sources and
receivers are directed along the principal directions x, y, or z, and all
sources are at the same depth, as well as all receivers are at the same
depth.
Use the functions ``bipole`` to calculate dipoles with arbitrary angles or
bipoles of finite length and arbitrary angle.
The function ``dipole`` could be replaced by ``bipole`` (all there is to do
is translate ``ab`` into ``msrc``, ``mrec``, ``azimuth``'s and ``dip``'s).
However, ``dipole`` is kept separately to serve as an example of a simple
modelling routine that can serve as a template.
See Also
--------
bipole : Electromagnetic field due to an electromagnetic source.
fem : Electromagnetic frequency-domain response.
tem : Electromagnetic time-domain response.
Parameters
----------
src, rec : list of floats or arrays
Source and receiver coordinates (m): [x, y, z].
The x- and y-coordinates can be arrays, z is a single value.
The x- and y-coordinates must have the same dimension.
Sources or receivers placed on a layer interface are considered in the
upper layer.
depth : list
Absolute layer interfaces z (m); #depth = #res - 1
(excluding +/- infinity).
res : array_like
Horizontal resistivities rho_h (Ohm.m); #res = #depth + 1.
Alternatively, res can be a dictionary. See the main manual of empymod
too see how to exploit this hook to re-calculate etaH, etaV, zetaH, and
zetaV, which can be used to, for instance, use the Cole-Cole model for
IP.
freqtime : array_like
Frequencies f (Hz) if ``signal`` == None, else times t (s); (f, t > 0).
signal : {None, 0, 1, -1}, optional
Source signal, default is None:
- None: Frequency-domain response
- -1 : Switch-off time-domain response
- 0 : Impulse time-domain response
- +1 : Switch-on time-domain response
ab : int, optional
Source-receiver configuration, defaults to 11.
+---------------+-------+------+------+------+------+------+------+
| | electric source | magnetic source |
+===============+=======+======+======+======+======+======+======+
| | **x**| **y**| **z**| **x**| **y**| **z**|
+---------------+-------+------+------+------+------+------+------+
| | **x** | 11 | 12 | 13 | 14 | 15 | 16 |
+ **electric** +-------+------+------+------+------+------+------+
| | **y** | 21 | 22 | 23 | 24 | 25 | 26 |
+ **receiver** +-------+------+------+------+------+------+------+
| | **z** | 31 | 32 | 33 | 34 | 35 | 36 |
+---------------+-------+------+------+------+------+------+------+
| | **x** | 41 | 42 | 43 | 44 | 45 | 46 |
+ **magnetic** +-------+------+------+------+------+------+------+
| | **y** | 51 | 52 | 53 | 54 | 55 | 56 |
+ **receiver** +-------+------+------+------+------+------+------+
| | **z** | 61 | 62 | 63 | 64 | 65 | 66 |
+---------------+-------+------+------+------+------+------+------+
aniso : array_like, optional
Anisotropies lambda = sqrt(rho_v/rho_h) (-); #aniso = #res.
Defaults to ones.
epermH, epermV : array_like, optional
Relative horizontal/vertical electric permittivities
epsilon_h/epsilon_v (-);
#epermH = #epermV = #res. Default is ones.
mpermH, mpermV : array_like, optional
Relative horizontal/vertical magnetic permeabilities mu_h/mu_v (-);
#mpermH = #mpermV = #res. Default is ones.
xdirect : bool or None, optional
Direct field calculation (only if src and rec are in the same layer):
- If True, direct field is calculated analytically in the frequency
domain.
- If False, direct field is calculated in the wavenumber domain.
- If None, direct field is excluded from the calculation, and only
reflected fields are returned (secondary field).
Defaults to False.
ht : {'fht', 'qwe', 'quad'}, optional
Flag to choose either the *Digital Linear Filter* method (FHT, *Fast
Hankel Transform*), the *Quadrature-With-Extrapolation* (QWE), or a
simple *Quadrature* (QUAD) for the Hankel transform. Defaults to
'fht'.
htarg : dict or list, optional
Depends on the value for ``ht``:
- If ``ht`` = 'fht': [fhtfilt, pts_per_dec]:
- fhtfilt: string of filter name in ``empymod.filters`` or
the filter method itself.
(default: ``empymod.filters.key_201_2009()``)
- pts_per_dec: points per decade; (default: 0)
- If 0: Standard DLF.
- If < 0: Lagged Convolution DLF.
- If > 0: Splined DLF
- If ``ht`` = 'qwe': [rtol, atol, nquad, maxint, pts_per_dec,
diff_quad, a, b, limit]:
- rtol: relative tolerance (default: 1e-12)
- atol: absolute tolerance (default: 1e-30)
- nquad: order of Gaussian quadrature (default: 51)
- maxint: maximum number of partial integral intervals
(default: 40)
- pts_per_dec: points per decade; (default: 0)
- If 0, no interpolation is used.
- If > 0, interpolation is used.
- diff_quad: criteria when to swap to QUAD (only relevant if
opt='spline') (default: 100)
- a: lower limit for QUAD (default: first interval from QWE)
- b: upper limit for QUAD (default: last interval from QWE)
- limit: limit for quad (default: maxint)
- If ``ht`` = 'quad': [atol, rtol, limit, lmin, lmax, pts_per_dec]:
- rtol: relative tolerance (default: 1e-12)
- atol: absolute tolerance (default: 1e-20)
- limit: An upper bound on the number of subintervals used in
the adaptive algorithm (default: 500)
- lmin: Minimum wavenumber (default 1e-6)
- lmax: Maximum wavenumber (default 0.1)
- pts_per_dec: points per decade (default: 40)
The values can be provided as dict with the keywords, or as list.
However, if provided as list, you have to follow the order given above.
A few examples, assuming ``ht`` = ``qwe``:
- Only changing rtol:
{'rtol': 1e-4} or [1e-4] or 1e-4
- Changing rtol and nquad:
{'rtol': 1e-4, 'nquad': 101} or [1e-4, '', 101]
- Only changing diff_quad:
{'diffquad': 10} or ['', '', '', '', '', 10]
ft : {'sin', 'cos', 'qwe', 'fftlog', 'fft'}, optional
Only used if ``signal`` != None. Flag to choose either the Digital
Linear Filter method (Sine- or Cosine-Filter), the
Quadrature-With-Extrapolation (QWE), the FFTLog, or the FFT for the
Fourier transform. Defaults to 'sin'.
ftarg : dict or list, optional
Only used if ``signal`` !=None. Depends on the value for ``ft``:
- If ``ft`` = 'sin' or 'cos': [fftfilt, pts_per_dec]:
- fftfilt: string of filter name in ``empymod.filters`` or
the filter method itself.
(Default: ``empymod.filters.key_201_CosSin_2012()``)
- pts_per_dec: points per decade; (default: -1)
- If 0: Standard DLF.
- If < 0: Lagged Convolution DLF.
- If > 0: Splined DLF
- If ``ft`` = 'qwe': [rtol, atol, nquad, maxint, pts_per_dec]:
- rtol: relative tolerance (default: 1e-8)
- atol: absolute tolerance (default: 1e-20)
- nquad: order of Gaussian quadrature (default: 21)
- maxint: maximum number of partial integral intervals
(default: 200)
- pts_per_dec: points per decade (default: 20)
- diff_quad: criteria when to swap to QUAD (default: 100)
- a: lower limit for QUAD (default: first interval from QWE)
- b: upper limit for QUAD (default: last interval from QWE)
- limit: limit for quad (default: maxint)
- If ``ft`` = 'fftlog': [pts_per_dec, add_dec, q]:
- pts_per_dec: sampels per decade (default: 10)
- add_dec: additional decades [left, right] (default: [-2, 1])
- q: exponent of power law bias (default: 0); -1 <= q <= 1
- If ``ft`` = 'fft': [dfreq, nfreq, ntot]:
- dfreq: Linear step-size of frequencies (default: 0.002)
- nfreq: Number of frequencies (default: 2048)
- ntot: Total number for FFT; difference between nfreq and
ntot is padded with zeroes. This number is ideally a
power of 2, e.g. 2048 or 4096 (default: nfreq).
- pts_per_dec : points per decade (default: None)
Padding can sometimes improve the result, not always. The
default samples from 0.002 Hz - 4.096 Hz. If pts_per_dec is set
to an integer, calculated frequencies are logarithmically
spaced with the given number per decade, and then interpolated
to yield the required frequencies for the FFT.
The values can be provided as dict with the keywords, or as list.
However, if provided as list, you have to follow the order given above.
See ``htarg`` for a few examples.
opt : {None, 'parallel'}, optional
Optimization flag. Defaults to None:
- None: Normal case, no parallelization nor interpolation is used.
- If 'parallel', the package ``numexpr`` is used to evaluate the
most expensive statements. Always check if it actually improves
performance for a specific problem. It can speed up the
calculation for big arrays, but will most likely be slower for
small arrays. It will use all available cores for these specific
statements, which all contain ``Gamma`` in one way or another,
which has dimensions (#frequencies, #offsets, #layers, #lambdas),
therefore can grow pretty big. The module ``numexpr`` uses by
default all available cores up to a maximum of 8. You can change
this behaviour to your desired number of threads ``nthreads``
with ``numexpr.set_num_threads(nthreads)``.
- The value 'spline' is deprecated and will be removed. See
``htarg`` instead for the interpolated versions.
The option 'parallel' only affects speed and memory usage, whereas
'spline' also affects precision! Please read the note in the *README*
documentation for more information.
loop : {None, 'freq', 'off'}, optional
Define if to calculate everything vectorized or if to loop over
frequencies ('freq') or over offsets ('off'), default is None. It
always loops over frequencies if ``ht = 'qwe'`` or if ``opt =
'spline'``. Calculating everything vectorized is fast for few offsets
OR for few frequencies. However, if you calculate many frequencies for
many offsets, it might be faster to loop over frequencies. Only
comparing the different versions will yield the answer for your
specific problem at hand!
verb : {0, 1, 2, 3, 4}, optional
Level of verbosity, default is 2:
- 0: Print nothing.
- 1: Print warnings.
- 2: Print additional runtime and kernel calls
- 3: Print additional start/stop, condensed parameter information.
- 4: Print additional full parameter information
Returns
-------
EM : ndarray, (nfreq, nrec, nsrc)
Frequency- or time-domain EM field (depending on ``signal``):
- If rec is electric, returns E [V/m].
- If rec is magnetic, returns B [T] (not H [A/m]!).
However, source and receiver are normalised. So for instance in the
electric case the source strength is 1 A and its length is 1 m. So the
electric field could also be written as [V/(A.m2)].
The shape of EM is (nfreq, nrec, nsrc). However, single dimensions
are removed.
Examples
--------
>>> import numpy as np
>>> from empymod import dipole
>>> src = [0, 0, 100]
>>> rec = [np.arange(1, 11)*500, np.zeros(10), 200]
>>> depth = [0, 300, 1000, 1050]
>>> res = [1e20, .3, 1, 50, 1]
>>> EMfield = dipole(src, rec, depth, res, freqtime=1, verb=0)
>>> print(EMfield)
[ 1.68809346e-10 -3.08303130e-10j -8.77189179e-12 -3.76920235e-11j
-3.46654704e-12 -4.87133683e-12j -3.60159726e-13 -1.12434417e-12j
1.87807271e-13 -6.21669759e-13j 1.97200208e-13 -4.38210489e-13j
1.44134842e-13 -3.17505260e-13j 9.92770406e-14 -2.33950871e-13j
6.75287598e-14 -1.74922886e-13j 4.62724887e-14 -1.32266600e-13j]
"""
# === 1. LET'S START ============
t0 = printstartfinish(verb)
# === 2. CHECK INPUT ============
# Backwards compatibility
htarg, opt = spline_backwards_hankel(ht, htarg, opt)
# Check times and Fourier Transform arguments, get required frequencies
# (freq = freqtime if ``signal=None``)
if signal is not None:
time, freq, ft, ftarg = check_time(freqtime, signal, ft, ftarg, verb)
else:
freq = freqtime
# Check layer parameters
model = check_model(depth, res, aniso, epermH, epermV, mpermH, mpermV,
xdirect, verb)
depth, res, aniso, epermH, epermV, mpermH, mpermV, isfullspace = model
# Check frequency => get etaH, etaV, zetaH, and zetaV
frequency = check_frequency(freq, res, aniso, epermH, epermV, mpermH,
mpermV, verb)
freq, etaH, etaV, zetaH, zetaV = frequency
# Update etaH/etaV and zetaH/zetaV according to user-provided model
if isinstance(res, dict) and 'func_eta' in res:
etaH, etaV = res['func_eta'](res, locals())
if isinstance(res, dict) and 'func_zeta' in res:
zetaH, zetaV = res['func_zeta'](res, locals())
# Check Hankel transform parameters
ht, htarg = check_hankel(ht, htarg, verb)
# Check optimization
use_ne_eval, loop_freq, loop_off = check_opt(opt, loop, ht, htarg, verb)
# Check src-rec configuration
# => Get flags if src or rec or both are magnetic (msrc, mrec)
ab_calc, msrc, mrec = check_ab(ab, verb)
# Check src and rec
src, nsrc = check_dipole(src, 'src', verb)
rec, nrec = check_dipole(rec, 'rec', verb)
# Get offsets and angles (off, angle)
off, angle = get_off_ang(src, rec, nsrc, nrec, verb)
# Get layer number in which src and rec reside (lsrc/lrec)
lsrc, zsrc = get_layer_nr(src, depth)
lrec, zrec = get_layer_nr(rec, depth)
# === 3. EM-FIELD CALCULATION ============
# Collect variables for fem
inp = (ab_calc, off, angle, zsrc, zrec, lsrc, lrec, depth, freq, etaH,
etaV, zetaH, zetaV, xdirect, isfullspace, ht, htarg, use_ne_eval,
msrc, mrec, loop_freq, loop_off)
EM, kcount, conv = fem(*inp)
# In case of QWE/QUAD, print Warning if not converged
conv_warning(conv, htarg, 'Hankel', verb)
# Do f->t transform if required
if signal is not None:
EM, conv = tem(EM, off, freq, time, signal, ft, ftarg)
# In case of QWE/QUAD, print Warning if not converged
conv_warning(conv, ftarg, 'Fourier', verb)
# Reshape for number of sources
EM = np.squeeze(EM.reshape((-1, nrec, nsrc), order='F'))
# === 4. FINISHED ============
printstartfinish(verb, t0, kcount)
return EM | ['def', 'dipole', '(', 'src', ',', 'rec', ',', 'depth', ',', 'res', ',', 'freqtime', ',', 'signal', '=', 'None', ',', 'ab', '=', '11', ',', 'aniso', '=', 'None', ',', 'epermH', '=', 'None', ',', 'epermV', '=', 'None', ',', 'mpermH', '=', 'None', ',', 'mpermV', '=', 'None', ',', 'xdirect', '=', 'False', ',', 'ht', '=', "'fht'", ',', 'htarg', '=', 'None', ',', 'ft', '=', "'sin'", ',', 'ftarg', '=', 'None', ',', 'opt', '=', 'None', ',', 'loop', '=', 'None', ',', 'verb', '=', '2', ')', ':', "# === 1. LET'S START ============", 't0', '=', 'printstartfinish', '(', 'verb', ')', '# === 2. CHECK INPUT ============', '# Backwards compatibility', 'htarg', ',', 'opt', '=', 'spline_backwards_hankel', '(', 'ht', ',', 'htarg', ',', 'opt', ')', '# Check times and Fourier Transform arguments, get required frequencies', '# (freq = freqtime if ``signal=None``)', 'if', 'signal', 'is', 'not', 'None', ':', 'time', ',', 'freq', ',', 'ft', ',', 'ftarg', '=', 'check_time', '(', 'freqtime', ',', 'signal', ',', 'ft', ',', 'ftarg', ',', 'verb', ')', 'else', ':', 'freq', '=', 'freqtime', '# Check layer parameters', 'model', '=', 'check_model', '(', 'depth', ',', 'res', ',', 'aniso', ',', 'epermH', ',', 'epermV', ',', 'mpermH', ',', 'mpermV', ',', 'xdirect', ',', 'verb', ')', 'depth', ',', 'res', ',', 'aniso', ',', 'epermH', ',', 'epermV', ',', 'mpermH', ',', 'mpermV', ',', 'isfullspace', '=', 'model', '# Check frequency => get etaH, etaV, zetaH, and zetaV', 'frequency', '=', 'check_frequency', '(', 'freq', ',', 'res', ',', 'aniso', ',', 'epermH', ',', 'epermV', ',', 'mpermH', ',', 'mpermV', ',', 'verb', ')', 'freq', ',', 'etaH', ',', 'etaV', ',', 'zetaH', ',', 'zetaV', '=', 'frequency', '# Update etaH/etaV and zetaH/zetaV according to user-provided model', 'if', 'isinstance', '(', 'res', ',', 'dict', ')', 'and', "'func_eta'", 'in', 'res', ':', 'etaH', ',', 'etaV', '=', 'res', '[', "'func_eta'", ']', '(', 'res', ',', 'locals', '(', ')', ')', 'if', 'isinstance', '(', 'res', ',', 'dict', ')', 'and', "'func_zeta'", 'in', 'res', ':', 'zetaH', ',', 'zetaV', '=', 'res', '[', "'func_zeta'", ']', '(', 'res', ',', 'locals', '(', ')', ')', '# Check Hankel transform parameters', 'ht', ',', 'htarg', '=', 'check_hankel', '(', 'ht', ',', 'htarg', ',', 'verb', ')', '# Check optimization', 'use_ne_eval', ',', 'loop_freq', ',', 'loop_off', '=', 'check_opt', '(', 'opt', ',', 'loop', ',', 'ht', ',', 'htarg', ',', 'verb', ')', '# Check src-rec configuration', '# => Get flags if src or rec or both are magnetic (msrc, mrec)', 'ab_calc', ',', 'msrc', ',', 'mrec', '=', 'check_ab', '(', 'ab', ',', 'verb', ')', '# Check src and rec', 'src', ',', 'nsrc', '=', 'check_dipole', '(', 'src', ',', "'src'", ',', 'verb', ')', 'rec', ',', 'nrec', '=', 'check_dipole', '(', 'rec', ',', "'rec'", ',', 'verb', ')', '# Get offsets and angles (off, angle)', 'off', ',', 'angle', '=', 'get_off_ang', '(', 'src', ',', 'rec', ',', 'nsrc', ',', 'nrec', ',', 'verb', ')', '# Get layer number in which src and rec reside (lsrc/lrec)', 'lsrc', ',', 'zsrc', '=', 'get_layer_nr', '(', 'src', ',', 'depth', ')', 'lrec', ',', 'zrec', '=', 'get_layer_nr', '(', 'rec', ',', 'depth', ')', '# === 3. EM-FIELD CALCULATION ============', '# Collect variables for fem', 'inp', '=', '(', 'ab_calc', ',', 'off', ',', 'angle', ',', 'zsrc', ',', 'zrec', ',', 'lsrc', ',', 'lrec', ',', 'depth', ',', 'freq', ',', 'etaH', ',', 'etaV', ',', 'zetaH', ',', 'zetaV', ',', 'xdirect', ',', 'isfullspace', ',', 'ht', ',', 'htarg', ',', 'use_ne_eval', ',', 'msrc', ',', 'mrec', ',', 'loop_freq', ',', 'loop_off', ')', 'EM', ',', 'kcount', ',', 'conv', '=', 'fem', '(', '*', 'inp', ')', '# In case of QWE/QUAD, print Warning if not converged', 'conv_warning', '(', 'conv', ',', 'htarg', ',', "'Hankel'", ',', 'verb', ')', '# Do f->t transform if required', 'if', 'signal', 'is', 'not', 'None', ':', 'EM', ',', 'conv', '=', 'tem', '(', 'EM', ',', 'off', ',', 'freq', ',', 'time', ',', 'signal', ',', 'ft', ',', 'ftarg', ')', '# In case of QWE/QUAD, print Warning if not converged', 'conv_warning', '(', 'conv', ',', 'ftarg', ',', "'Fourier'", ',', 'verb', ')', '# Reshape for number of sources', 'EM', '=', 'np', '.', 'squeeze', '(', 'EM', '.', 'reshape', '(', '(', '-', '1', ',', 'nrec', ',', 'nsrc', ')', ',', 'order', '=', "'F'", ')', ')', '# === 4. FINISHED ============', 'printstartfinish', '(', 'verb', ',', 't0', ',', 'kcount', ')', 'return', 'EM'] | r"""Return the electromagnetic field due to a dipole source.
Calculate the electromagnetic frequency- or time-domain field due to
infinitesimal small electric or magnetic dipole source(s), measured by
infinitesimal small electric or magnetic dipole receiver(s); sources and
receivers are directed along the principal directions x, y, or z, and all
sources are at the same depth, as well as all receivers are at the same
depth.
Use the functions ``bipole`` to calculate dipoles with arbitrary angles or
bipoles of finite length and arbitrary angle.
The function ``dipole`` could be replaced by ``bipole`` (all there is to do
is translate ``ab`` into ``msrc``, ``mrec``, ``azimuth``'s and ``dip``'s).
However, ``dipole`` is kept separately to serve as an example of a simple
modelling routine that can serve as a template.
See Also
--------
bipole : Electromagnetic field due to an electromagnetic source.
fem : Electromagnetic frequency-domain response.
tem : Electromagnetic time-domain response.
Parameters
----------
src, rec : list of floats or arrays
Source and receiver coordinates (m): [x, y, z].
The x- and y-coordinates can be arrays, z is a single value.
The x- and y-coordinates must have the same dimension.
Sources or receivers placed on a layer interface are considered in the
upper layer.
depth : list
Absolute layer interfaces z (m); #depth = #res - 1
(excluding +/- infinity).
res : array_like
Horizontal resistivities rho_h (Ohm.m); #res = #depth + 1.
Alternatively, res can be a dictionary. See the main manual of empymod
too see how to exploit this hook to re-calculate etaH, etaV, zetaH, and
zetaV, which can be used to, for instance, use the Cole-Cole model for
IP.
freqtime : array_like
Frequencies f (Hz) if ``signal`` == None, else times t (s); (f, t > 0).
signal : {None, 0, 1, -1}, optional
Source signal, default is None:
- None: Frequency-domain response
- -1 : Switch-off time-domain response
- 0 : Impulse time-domain response
- +1 : Switch-on time-domain response
ab : int, optional
Source-receiver configuration, defaults to 11.
+---------------+-------+------+------+------+------+------+------+
| | electric source | magnetic source |
+===============+=======+======+======+======+======+======+======+
| | **x**| **y**| **z**| **x**| **y**| **z**|
+---------------+-------+------+------+------+------+------+------+
| | **x** | 11 | 12 | 13 | 14 | 15 | 16 |
+ **electric** +-------+------+------+------+------+------+------+
| | **y** | 21 | 22 | 23 | 24 | 25 | 26 |
+ **receiver** +-------+------+------+------+------+------+------+
| | **z** | 31 | 32 | 33 | 34 | 35 | 36 |
+---------------+-------+------+------+------+------+------+------+
| | **x** | 41 | 42 | 43 | 44 | 45 | 46 |
+ **magnetic** +-------+------+------+------+------+------+------+
| | **y** | 51 | 52 | 53 | 54 | 55 | 56 |
+ **receiver** +-------+------+------+------+------+------+------+
| | **z** | 61 | 62 | 63 | 64 | 65 | 66 |
+---------------+-------+------+------+------+------+------+------+
aniso : array_like, optional
Anisotropies lambda = sqrt(rho_v/rho_h) (-); #aniso = #res.
Defaults to ones.
epermH, epermV : array_like, optional
Relative horizontal/vertical electric permittivities
epsilon_h/epsilon_v (-);
#epermH = #epermV = #res. Default is ones.
mpermH, mpermV : array_like, optional
Relative horizontal/vertical magnetic permeabilities mu_h/mu_v (-);
#mpermH = #mpermV = #res. Default is ones.
xdirect : bool or None, optional
Direct field calculation (only if src and rec are in the same layer):
- If True, direct field is calculated analytically in the frequency
domain.
- If False, direct field is calculated in the wavenumber domain.
- If None, direct field is excluded from the calculation, and only
reflected fields are returned (secondary field).
Defaults to False.
ht : {'fht', 'qwe', 'quad'}, optional
Flag to choose either the *Digital Linear Filter* method (FHT, *Fast
Hankel Transform*), the *Quadrature-With-Extrapolation* (QWE), or a
simple *Quadrature* (QUAD) for the Hankel transform. Defaults to
'fht'.
htarg : dict or list, optional
Depends on the value for ``ht``:
- If ``ht`` = 'fht': [fhtfilt, pts_per_dec]:
- fhtfilt: string of filter name in ``empymod.filters`` or
the filter method itself.
(default: ``empymod.filters.key_201_2009()``)
- pts_per_dec: points per decade; (default: 0)
- If 0: Standard DLF.
- If < 0: Lagged Convolution DLF.
- If > 0: Splined DLF
- If ``ht`` = 'qwe': [rtol, atol, nquad, maxint, pts_per_dec,
diff_quad, a, b, limit]:
- rtol: relative tolerance (default: 1e-12)
- atol: absolute tolerance (default: 1e-30)
- nquad: order of Gaussian quadrature (default: 51)
- maxint: maximum number of partial integral intervals
(default: 40)
- pts_per_dec: points per decade; (default: 0)
- If 0, no interpolation is used.
- If > 0, interpolation is used.
- diff_quad: criteria when to swap to QUAD (only relevant if
opt='spline') (default: 100)
- a: lower limit for QUAD (default: first interval from QWE)
- b: upper limit for QUAD (default: last interval from QWE)
- limit: limit for quad (default: maxint)
- If ``ht`` = 'quad': [atol, rtol, limit, lmin, lmax, pts_per_dec]:
- rtol: relative tolerance (default: 1e-12)
- atol: absolute tolerance (default: 1e-20)
- limit: An upper bound on the number of subintervals used in
the adaptive algorithm (default: 500)
- lmin: Minimum wavenumber (default 1e-6)
- lmax: Maximum wavenumber (default 0.1)
- pts_per_dec: points per decade (default: 40)
The values can be provided as dict with the keywords, or as list.
However, if provided as list, you have to follow the order given above.
A few examples, assuming ``ht`` = ``qwe``:
- Only changing rtol:
{'rtol': 1e-4} or [1e-4] or 1e-4
- Changing rtol and nquad:
{'rtol': 1e-4, 'nquad': 101} or [1e-4, '', 101]
- Only changing diff_quad:
{'diffquad': 10} or ['', '', '', '', '', 10]
ft : {'sin', 'cos', 'qwe', 'fftlog', 'fft'}, optional
Only used if ``signal`` != None. Flag to choose either the Digital
Linear Filter method (Sine- or Cosine-Filter), the
Quadrature-With-Extrapolation (QWE), the FFTLog, or the FFT for the
Fourier transform. Defaults to 'sin'.
ftarg : dict or list, optional
Only used if ``signal`` !=None. Depends on the value for ``ft``:
- If ``ft`` = 'sin' or 'cos': [fftfilt, pts_per_dec]:
- fftfilt: string of filter name in ``empymod.filters`` or
the filter method itself.
(Default: ``empymod.filters.key_201_CosSin_2012()``)
- pts_per_dec: points per decade; (default: -1)
- If 0: Standard DLF.
- If < 0: Lagged Convolution DLF.
- If > 0: Splined DLF
- If ``ft`` = 'qwe': [rtol, atol, nquad, maxint, pts_per_dec]:
- rtol: relative tolerance (default: 1e-8)
- atol: absolute tolerance (default: 1e-20)
- nquad: order of Gaussian quadrature (default: 21)
- maxint: maximum number of partial integral intervals
(default: 200)
- pts_per_dec: points per decade (default: 20)
- diff_quad: criteria when to swap to QUAD (default: 100)
- a: lower limit for QUAD (default: first interval from QWE)
- b: upper limit for QUAD (default: last interval from QWE)
- limit: limit for quad (default: maxint)
- If ``ft`` = 'fftlog': [pts_per_dec, add_dec, q]:
- pts_per_dec: sampels per decade (default: 10)
- add_dec: additional decades [left, right] (default: [-2, 1])
- q: exponent of power law bias (default: 0); -1 <= q <= 1
- If ``ft`` = 'fft': [dfreq, nfreq, ntot]:
- dfreq: Linear step-size of frequencies (default: 0.002)
- nfreq: Number of frequencies (default: 2048)
- ntot: Total number for FFT; difference between nfreq and
ntot is padded with zeroes. This number is ideally a
power of 2, e.g. 2048 or 4096 (default: nfreq).
- pts_per_dec : points per decade (default: None)
Padding can sometimes improve the result, not always. The
default samples from 0.002 Hz - 4.096 Hz. If pts_per_dec is set
to an integer, calculated frequencies are logarithmically
spaced with the given number per decade, and then interpolated
to yield the required frequencies for the FFT.
The values can be provided as dict with the keywords, or as list.
However, if provided as list, you have to follow the order given above.
See ``htarg`` for a few examples.
opt : {None, 'parallel'}, optional
Optimization flag. Defaults to None:
- None: Normal case, no parallelization nor interpolation is used.
- If 'parallel', the package ``numexpr`` is used to evaluate the
most expensive statements. Always check if it actually improves
performance for a specific problem. It can speed up the
calculation for big arrays, but will most likely be slower for
small arrays. It will use all available cores for these specific
statements, which all contain ``Gamma`` in one way or another,
which has dimensions (#frequencies, #offsets, #layers, #lambdas),
therefore can grow pretty big. The module ``numexpr`` uses by
default all available cores up to a maximum of 8. You can change
this behaviour to your desired number of threads ``nthreads``
with ``numexpr.set_num_threads(nthreads)``.
- The value 'spline' is deprecated and will be removed. See
``htarg`` instead for the interpolated versions.
The option 'parallel' only affects speed and memory usage, whereas
'spline' also affects precision! Please read the note in the *README*
documentation for more information.
loop : {None, 'freq', 'off'}, optional
Define if to calculate everything vectorized or if to loop over
frequencies ('freq') or over offsets ('off'), default is None. It
always loops over frequencies if ``ht = 'qwe'`` or if ``opt =
'spline'``. Calculating everything vectorized is fast for few offsets
OR for few frequencies. However, if you calculate many frequencies for
many offsets, it might be faster to loop over frequencies. Only
comparing the different versions will yield the answer for your
specific problem at hand!
verb : {0, 1, 2, 3, 4}, optional
Level of verbosity, default is 2:
- 0: Print nothing.
- 1: Print warnings.
- 2: Print additional runtime and kernel calls
- 3: Print additional start/stop, condensed parameter information.
- 4: Print additional full parameter information
Returns
-------
EM : ndarray, (nfreq, nrec, nsrc)
Frequency- or time-domain EM field (depending on ``signal``):
- If rec is electric, returns E [V/m].
- If rec is magnetic, returns B [T] (not H [A/m]!).
However, source and receiver are normalised. So for instance in the
electric case the source strength is 1 A and its length is 1 m. So the
electric field could also be written as [V/(A.m2)].
The shape of EM is (nfreq, nrec, nsrc). However, single dimensions
are removed.
Examples
--------
>>> import numpy as np
>>> from empymod import dipole
>>> src = [0, 0, 100]
>>> rec = [np.arange(1, 11)*500, np.zeros(10), 200]
>>> depth = [0, 300, 1000, 1050]
>>> res = [1e20, .3, 1, 50, 1]
>>> EMfield = dipole(src, rec, depth, res, freqtime=1, verb=0)
>>> print(EMfield)
[ 1.68809346e-10 -3.08303130e-10j -8.77189179e-12 -3.76920235e-11j
-3.46654704e-12 -4.87133683e-12j -3.60159726e-13 -1.12434417e-12j
1.87807271e-13 -6.21669759e-13j 1.97200208e-13 -4.38210489e-13j
1.44134842e-13 -3.17505260e-13j 9.92770406e-14 -2.33950871e-13j
6.75287598e-14 -1.74922886e-13j 4.62724887e-14 -1.32266600e-13j] | ['r', 'Return', 'the', 'electromagnetic', 'field', 'due', 'to', 'a', 'dipole', 'source', '.'] | train | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/model.py#L593-L960 |
3,832 | rochacbruno/dynaconf | dynaconf/contrib/flask_dynaconf.py | DynaconfConfig.get | def get(self, key, default=None):
"""Gets config from dynaconf variables
if variables does not exists in dynaconf try getting from
`app.config` to support runtime settings."""
return self._settings.get(key, Config.get(self, key, default)) | python | def get(self, key, default=None):
"""Gets config from dynaconf variables
if variables does not exists in dynaconf try getting from
`app.config` to support runtime settings."""
return self._settings.get(key, Config.get(self, key, default)) | ['def', 'get', '(', 'self', ',', 'key', ',', 'default', '=', 'None', ')', ':', 'return', 'self', '.', '_settings', '.', 'get', '(', 'key', ',', 'Config', '.', 'get', '(', 'self', ',', 'key', ',', 'default', ')', ')'] | Gets config from dynaconf variables
if variables does not exists in dynaconf try getting from
`app.config` to support runtime settings. | ['Gets', 'config', 'from', 'dynaconf', 'variables', 'if', 'variables', 'does', 'not', 'exists', 'in', 'dynaconf', 'try', 'getting', 'from', 'app', '.', 'config', 'to', 'support', 'runtime', 'settings', '.'] | train | https://github.com/rochacbruno/dynaconf/blob/5a7cc8f8252251cbdf4f4112965801f9dfe2831d/dynaconf/contrib/flask_dynaconf.py#L130-L134 |
3,833 | bodylabs/lace | lace/arcball.py | ArcBallT.drag | def drag(self, NewPt):
# //Mouse drag, calculate rotation (Point2fT Quat4fT)
""" drag (Point2fT mouse_coord) -> new_quaternion_rotation_vec
"""
X = 0
Y = 1
Z = 2
W = 3
self.m_EnVec = self._mapToSphere(NewPt)
# //Compute the vector perpendicular to the begin and end vectors
# Perp = Vector3fT()
Perp = Vector3fCross(self.m_StVec, self.m_EnVec)
NewRot = Quat4fT()
# //Compute the length of the perpendicular vector
if Vector3fLength(Perp) > Epsilon: # //if its non-zero
# //We're ok, so return the perpendicular vector as the transform after all
NewRot[X] = Perp[X]
NewRot[Y] = Perp[Y]
NewRot[Z] = Perp[Z]
# //In the quaternion values, w is cosine(theta / 2), where theta is rotation angle
NewRot[W] = Vector3fDot(self.m_StVec, self.m_EnVec)
else: # //if its zero
# //The begin and end vectors coincide, so return a quaternion of zero matrix (no rotation)
NewRot[X] = NewRot[Y] = NewRot[Z] = NewRot[W] = 0.0
return NewRot | python | def drag(self, NewPt):
# //Mouse drag, calculate rotation (Point2fT Quat4fT)
""" drag (Point2fT mouse_coord) -> new_quaternion_rotation_vec
"""
X = 0
Y = 1
Z = 2
W = 3
self.m_EnVec = self._mapToSphere(NewPt)
# //Compute the vector perpendicular to the begin and end vectors
# Perp = Vector3fT()
Perp = Vector3fCross(self.m_StVec, self.m_EnVec)
NewRot = Quat4fT()
# //Compute the length of the perpendicular vector
if Vector3fLength(Perp) > Epsilon: # //if its non-zero
# //We're ok, so return the perpendicular vector as the transform after all
NewRot[X] = Perp[X]
NewRot[Y] = Perp[Y]
NewRot[Z] = Perp[Z]
# //In the quaternion values, w is cosine(theta / 2), where theta is rotation angle
NewRot[W] = Vector3fDot(self.m_StVec, self.m_EnVec)
else: # //if its zero
# //The begin and end vectors coincide, so return a quaternion of zero matrix (no rotation)
NewRot[X] = NewRot[Y] = NewRot[Z] = NewRot[W] = 0.0
return NewRot | ['def', 'drag', '(', 'self', ',', 'NewPt', ')', ':', '# //Mouse drag, calculate rotation (Point2fT Quat4fT)', 'X', '=', '0', 'Y', '=', '1', 'Z', '=', '2', 'W', '=', '3', 'self', '.', 'm_EnVec', '=', 'self', '.', '_mapToSphere', '(', 'NewPt', ')', '# //Compute the vector perpendicular to the begin and end vectors', '# Perp = Vector3fT()', 'Perp', '=', 'Vector3fCross', '(', 'self', '.', 'm_StVec', ',', 'self', '.', 'm_EnVec', ')', 'NewRot', '=', 'Quat4fT', '(', ')', '# //Compute the length of the perpendicular vector', 'if', 'Vector3fLength', '(', 'Perp', ')', '>', 'Epsilon', ':', '# //if its non-zero', "# //We're ok, so return the perpendicular vector as the transform after all", 'NewRot', '[', 'X', ']', '=', 'Perp', '[', 'X', ']', 'NewRot', '[', 'Y', ']', '=', 'Perp', '[', 'Y', ']', 'NewRot', '[', 'Z', ']', '=', 'Perp', '[', 'Z', ']', '# //In the quaternion values, w is cosine(theta / 2), where theta is rotation angle', 'NewRot', '[', 'W', ']', '=', 'Vector3fDot', '(', 'self', '.', 'm_StVec', ',', 'self', '.', 'm_EnVec', ')', 'else', ':', '# //if its zero', '# //The begin and end vectors coincide, so return a quaternion of zero matrix (no rotation)', 'NewRot', '[', 'X', ']', '=', 'NewRot', '[', 'Y', ']', '=', 'NewRot', '[', 'Z', ']', '=', 'NewRot', '[', 'W', ']', '=', '0.0', 'return', 'NewRot'] | drag (Point2fT mouse_coord) -> new_quaternion_rotation_vec | ['drag', '(', 'Point2fT', 'mouse_coord', ')', '-', '>', 'new_quaternion_rotation_vec'] | train | https://github.com/bodylabs/lace/blob/b68f4a60a4cac66c0607ffbae38ef9d07d37f459/lace/arcball.py#L70-L98 |
3,834 | InfoAgeTech/django-core | django_core/views/response.py | JSONHybridDeleteView.delete | def delete(self, request, *args, **kwargs):
"""
Calls the delete() method on the fetched object and then
redirects to the success URL.
"""
self.object = self.get_object()
success_url = self.get_success_url()
self.object.delete()
if self.request.is_ajax():
return JSONResponseMixin.render_to_response(self, context={})
return HttpResponseRedirect(success_url) | python | def delete(self, request, *args, **kwargs):
"""
Calls the delete() method on the fetched object and then
redirects to the success URL.
"""
self.object = self.get_object()
success_url = self.get_success_url()
self.object.delete()
if self.request.is_ajax():
return JSONResponseMixin.render_to_response(self, context={})
return HttpResponseRedirect(success_url) | ['def', 'delete', '(', 'self', ',', 'request', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'self', '.', 'object', '=', 'self', '.', 'get_object', '(', ')', 'success_url', '=', 'self', '.', 'get_success_url', '(', ')', 'self', '.', 'object', '.', 'delete', '(', ')', 'if', 'self', '.', 'request', '.', 'is_ajax', '(', ')', ':', 'return', 'JSONResponseMixin', '.', 'render_to_response', '(', 'self', ',', 'context', '=', '{', '}', ')', 'return', 'HttpResponseRedirect', '(', 'success_url', ')'] | Calls the delete() method on the fetched object and then
redirects to the success URL. | ['Calls', 'the', 'delete', '()', 'method', 'on', 'the', 'fetched', 'object', 'and', 'then', 'redirects', 'to', 'the', 'success', 'URL', '.'] | train | https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/views/response.py#L166-L178 |
3,835 | python-diamond/Diamond | src/collectors/conntrack/conntrack.py | ConnTrackCollector.get_default_config_help | def get_default_config_help(self):
"""
Return help text for collector configuration
"""
config_help = super(ConnTrackCollector, self).get_default_config_help()
config_help.update({
"dir": "Directories with files of interest, comma seperated",
"files": "List of files to collect statistics from",
})
return config_help | python | def get_default_config_help(self):
"""
Return help text for collector configuration
"""
config_help = super(ConnTrackCollector, self).get_default_config_help()
config_help.update({
"dir": "Directories with files of interest, comma seperated",
"files": "List of files to collect statistics from",
})
return config_help | ['def', 'get_default_config_help', '(', 'self', ')', ':', 'config_help', '=', 'super', '(', 'ConnTrackCollector', ',', 'self', ')', '.', 'get_default_config_help', '(', ')', 'config_help', '.', 'update', '(', '{', '"dir"', ':', '"Directories with files of interest, comma seperated"', ',', '"files"', ':', '"List of files to collect statistics from"', ',', '}', ')', 'return', 'config_help'] | Return help text for collector configuration | ['Return', 'help', 'text', 'for', 'collector', 'configuration'] | train | https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/conntrack/conntrack.py#L22-L31 |
3,836 | tradenity/python-sdk | tradenity/resources/payment_token.py | PaymentToken.get_payment_token_by_id | def get_payment_token_by_id(cls, payment_token_id, **kwargs):
"""Find PaymentToken
Return single instance of PaymentToken by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_payment_token_by_id(payment_token_id, async=True)
>>> result = thread.get()
:param async bool
:param str payment_token_id: ID of paymentToken to return (required)
:return: PaymentToken
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_payment_token_by_id_with_http_info(payment_token_id, **kwargs)
else:
(data) = cls._get_payment_token_by_id_with_http_info(payment_token_id, **kwargs)
return data | python | def get_payment_token_by_id(cls, payment_token_id, **kwargs):
"""Find PaymentToken
Return single instance of PaymentToken by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_payment_token_by_id(payment_token_id, async=True)
>>> result = thread.get()
:param async bool
:param str payment_token_id: ID of paymentToken to return (required)
:return: PaymentToken
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_payment_token_by_id_with_http_info(payment_token_id, **kwargs)
else:
(data) = cls._get_payment_token_by_id_with_http_info(payment_token_id, **kwargs)
return data | ['def', 'get_payment_token_by_id', '(', 'cls', ',', 'payment_token_id', ',', '*', '*', 'kwargs', ')', ':', 'kwargs', '[', "'_return_http_data_only'", ']', '=', 'True', 'if', 'kwargs', '.', 'get', '(', "'async'", ')', ':', 'return', 'cls', '.', '_get_payment_token_by_id_with_http_info', '(', 'payment_token_id', ',', '*', '*', 'kwargs', ')', 'else', ':', '(', 'data', ')', '=', 'cls', '.', '_get_payment_token_by_id_with_http_info', '(', 'payment_token_id', ',', '*', '*', 'kwargs', ')', 'return', 'data'] | Find PaymentToken
Return single instance of PaymentToken by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_payment_token_by_id(payment_token_id, async=True)
>>> result = thread.get()
:param async bool
:param str payment_token_id: ID of paymentToken to return (required)
:return: PaymentToken
If the method is called asynchronously,
returns the request thread. | ['Find', 'PaymentToken'] | train | https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/payment_token.py#L474-L494 |
3,837 | spacetelescope/synphot_refactor | synphot/models.py | _get_meta | def _get_meta(model):
"""Return metadata of a model.
Model could be a real model or evaluated metadata."""
if isinstance(model, Model):
w = model.meta
else:
w = model # Already metadata
return w | python | def _get_meta(model):
"""Return metadata of a model.
Model could be a real model or evaluated metadata."""
if isinstance(model, Model):
w = model.meta
else:
w = model # Already metadata
return w | ['def', '_get_meta', '(', 'model', ')', ':', 'if', 'isinstance', '(', 'model', ',', 'Model', ')', ':', 'w', '=', 'model', '.', 'meta', 'else', ':', 'w', '=', 'model', '# Already metadata', 'return', 'w'] | Return metadata of a model.
Model could be a real model or evaluated metadata. | ['Return', 'metadata', 'of', 'a', 'model', '.', 'Model', 'could', 'be', 'a', 'real', 'model', 'or', 'evaluated', 'metadata', '.'] | train | https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/models.py#L744-L751 |
3,838 | RedHatInsights/insights-core | insights/parsers/dmidecode.py | parse_dmidecode | def parse_dmidecode(dmidecode_content, pythonic_keys=False):
"""
Returns a dictionary of dmidecode information parsed from a dmidecode list
(i.e. from context.content)
This method will attempt to handle leading spaces rather than tabs.
"""
if len(dmidecode_content) < 3:
return {}
section = None
obj = {}
current = {}
buf = "\n".join(dmidecode_content).strip()
# Some versions of DMIDecode have an extra
# level of indentation, as well as slightly
# different configuration for each section.
if "\tDMI type" in buf:
pat = re.compile("^\t", flags=re.MULTILINE)
buf = pat.sub("", buf)
buf = buf.replace("\nDMI type", "DMI type")
buf = buf.replace("\nHandle", "\n\nHandle")
buf = buf.replace("\n\t\t", "\t")
def fix_key(k):
return k.lower().replace(" ", "_") if pythonic_keys else k
for line in buf.splitlines():
nbline = line.strip()
if section:
if not nbline:
# There maybe some sections with the same name, such as:
# processor_information
if section in obj:
obj[section].append(current)
else:
obj[section] = [current]
current = {}
section = key = None
continue
elif line.startswith("\t"):
if ":" in line:
key, value = nbline.split(":", 1)
key = fix_key(key)
value = value.strip()
if "\t" in value:
current[key] = list(filter(None, value.split("\t")))
else:
current[key] = value
else:
section = None
if not section:
# Ignore 'Table at 0xBFFCB000' and similar.
if not ('Table' in nbline or 'table' in nbline):
section = fix_key(nbline)
if section in obj:
obj[section].append(current)
else:
obj[section] = [current]
return obj | python | def parse_dmidecode(dmidecode_content, pythonic_keys=False):
"""
Returns a dictionary of dmidecode information parsed from a dmidecode list
(i.e. from context.content)
This method will attempt to handle leading spaces rather than tabs.
"""
if len(dmidecode_content) < 3:
return {}
section = None
obj = {}
current = {}
buf = "\n".join(dmidecode_content).strip()
# Some versions of DMIDecode have an extra
# level of indentation, as well as slightly
# different configuration for each section.
if "\tDMI type" in buf:
pat = re.compile("^\t", flags=re.MULTILINE)
buf = pat.sub("", buf)
buf = buf.replace("\nDMI type", "DMI type")
buf = buf.replace("\nHandle", "\n\nHandle")
buf = buf.replace("\n\t\t", "\t")
def fix_key(k):
return k.lower().replace(" ", "_") if pythonic_keys else k
for line in buf.splitlines():
nbline = line.strip()
if section:
if not nbline:
# There maybe some sections with the same name, such as:
# processor_information
if section in obj:
obj[section].append(current)
else:
obj[section] = [current]
current = {}
section = key = None
continue
elif line.startswith("\t"):
if ":" in line:
key, value = nbline.split(":", 1)
key = fix_key(key)
value = value.strip()
if "\t" in value:
current[key] = list(filter(None, value.split("\t")))
else:
current[key] = value
else:
section = None
if not section:
# Ignore 'Table at 0xBFFCB000' and similar.
if not ('Table' in nbline or 'table' in nbline):
section = fix_key(nbline)
if section in obj:
obj[section].append(current)
else:
obj[section] = [current]
return obj | ['def', 'parse_dmidecode', '(', 'dmidecode_content', ',', 'pythonic_keys', '=', 'False', ')', ':', 'if', 'len', '(', 'dmidecode_content', ')', '<', '3', ':', 'return', '{', '}', 'section', '=', 'None', 'obj', '=', '{', '}', 'current', '=', '{', '}', 'buf', '=', '"\\n"', '.', 'join', '(', 'dmidecode_content', ')', '.', 'strip', '(', ')', '# Some versions of DMIDecode have an extra', '# level of indentation, as well as slightly', '# different configuration for each section.', 'if', '"\\tDMI type"', 'in', 'buf', ':', 'pat', '=', 're', '.', 'compile', '(', '"^\\t"', ',', 'flags', '=', 're', '.', 'MULTILINE', ')', 'buf', '=', 'pat', '.', 'sub', '(', '""', ',', 'buf', ')', 'buf', '=', 'buf', '.', 'replace', '(', '"\\nDMI type"', ',', '"DMI type"', ')', 'buf', '=', 'buf', '.', 'replace', '(', '"\\nHandle"', ',', '"\\n\\nHandle"', ')', 'buf', '=', 'buf', '.', 'replace', '(', '"\\n\\t\\t"', ',', '"\\t"', ')', 'def', 'fix_key', '(', 'k', ')', ':', 'return', 'k', '.', 'lower', '(', ')', '.', 'replace', '(', '" "', ',', '"_"', ')', 'if', 'pythonic_keys', 'else', 'k', 'for', 'line', 'in', 'buf', '.', 'splitlines', '(', ')', ':', 'nbline', '=', 'line', '.', 'strip', '(', ')', 'if', 'section', ':', 'if', 'not', 'nbline', ':', '# There maybe some sections with the same name, such as:', '# processor_information', 'if', 'section', 'in', 'obj', ':', 'obj', '[', 'section', ']', '.', 'append', '(', 'current', ')', 'else', ':', 'obj', '[', 'section', ']', '=', '[', 'current', ']', 'current', '=', '{', '}', 'section', '=', 'key', '=', 'None', 'continue', 'elif', 'line', '.', 'startswith', '(', '"\\t"', ')', ':', 'if', '":"', 'in', 'line', ':', 'key', ',', 'value', '=', 'nbline', '.', 'split', '(', '":"', ',', '1', ')', 'key', '=', 'fix_key', '(', 'key', ')', 'value', '=', 'value', '.', 'strip', '(', ')', 'if', '"\\t"', 'in', 'value', ':', 'current', '[', 'key', ']', '=', 'list', '(', 'filter', '(', 'None', ',', 'value', '.', 'split', '(', '"\\t"', ')', ')', ')', 'else', ':', 'current', '[', 'key', ']', '=', 'value', 'else', ':', 'section', '=', 'None', 'if', 'not', 'section', ':', "# Ignore 'Table at 0xBFFCB000' and similar.", 'if', 'not', '(', "'Table'", 'in', 'nbline', 'or', "'table'", 'in', 'nbline', ')', ':', 'section', '=', 'fix_key', '(', 'nbline', ')', 'if', 'section', 'in', 'obj', ':', 'obj', '[', 'section', ']', '.', 'append', '(', 'current', ')', 'else', ':', 'obj', '[', 'section', ']', '=', '[', 'current', ']', 'return', 'obj'] | Returns a dictionary of dmidecode information parsed from a dmidecode list
(i.e. from context.content)
This method will attempt to handle leading spaces rather than tabs. | ['Returns', 'a', 'dictionary', 'of', 'dmidecode', 'information', 'parsed', 'from', 'a', 'dmidecode', 'list', '(', 'i', '.', 'e', '.', 'from', 'context', '.', 'content', ')'] | train | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/dmidecode.py#L147-L215 |
3,839 | tensorflow/tensorboard | tensorboard/plugins/graph/graphs_plugin.py | GraphsPlugin.graph_impl | def graph_impl(self, run, tag, is_conceptual, limit_attr_size=None, large_attrs_key=None):
"""Result of the form `(body, mime_type)`, or `None` if no graph exists."""
if is_conceptual:
tensor_events = self._multiplexer.Tensors(run, tag)
# Take the first event if there are multiple events written from different
# steps.
keras_model_config = json.loads(tensor_events[0].tensor_proto.string_val[0])
graph = keras_util.keras_model_to_graph_def(keras_model_config)
elif tag:
tensor_events = self._multiplexer.Tensors(run, tag)
# Take the first event if there are multiple events written from different
# steps.
run_metadata = config_pb2.RunMetadata.FromString(
tensor_events[0].tensor_proto.string_val[0])
graph = graph_pb2.GraphDef()
for func_graph in run_metadata.function_graphs:
graph_util.combine_graph_defs(graph, func_graph.pre_optimization_graph)
else:
graph = self._multiplexer.Graph(run)
# This next line might raise a ValueError if the limit parameters
# are invalid (size is negative, size present but key absent, etc.).
process_graph.prepare_graph_for_ui(graph, limit_attr_size, large_attrs_key)
return (str(graph), 'text/x-protobuf') | python | def graph_impl(self, run, tag, is_conceptual, limit_attr_size=None, large_attrs_key=None):
"""Result of the form `(body, mime_type)`, or `None` if no graph exists."""
if is_conceptual:
tensor_events = self._multiplexer.Tensors(run, tag)
# Take the first event if there are multiple events written from different
# steps.
keras_model_config = json.loads(tensor_events[0].tensor_proto.string_val[0])
graph = keras_util.keras_model_to_graph_def(keras_model_config)
elif tag:
tensor_events = self._multiplexer.Tensors(run, tag)
# Take the first event if there are multiple events written from different
# steps.
run_metadata = config_pb2.RunMetadata.FromString(
tensor_events[0].tensor_proto.string_val[0])
graph = graph_pb2.GraphDef()
for func_graph in run_metadata.function_graphs:
graph_util.combine_graph_defs(graph, func_graph.pre_optimization_graph)
else:
graph = self._multiplexer.Graph(run)
# This next line might raise a ValueError if the limit parameters
# are invalid (size is negative, size present but key absent, etc.).
process_graph.prepare_graph_for_ui(graph, limit_attr_size, large_attrs_key)
return (str(graph), 'text/x-protobuf') | ['def', 'graph_impl', '(', 'self', ',', 'run', ',', 'tag', ',', 'is_conceptual', ',', 'limit_attr_size', '=', 'None', ',', 'large_attrs_key', '=', 'None', ')', ':', 'if', 'is_conceptual', ':', 'tensor_events', '=', 'self', '.', '_multiplexer', '.', 'Tensors', '(', 'run', ',', 'tag', ')', '# Take the first event if there are multiple events written from different', '# steps.', 'keras_model_config', '=', 'json', '.', 'loads', '(', 'tensor_events', '[', '0', ']', '.', 'tensor_proto', '.', 'string_val', '[', '0', ']', ')', 'graph', '=', 'keras_util', '.', 'keras_model_to_graph_def', '(', 'keras_model_config', ')', 'elif', 'tag', ':', 'tensor_events', '=', 'self', '.', '_multiplexer', '.', 'Tensors', '(', 'run', ',', 'tag', ')', '# Take the first event if there are multiple events written from different', '# steps.', 'run_metadata', '=', 'config_pb2', '.', 'RunMetadata', '.', 'FromString', '(', 'tensor_events', '[', '0', ']', '.', 'tensor_proto', '.', 'string_val', '[', '0', ']', ')', 'graph', '=', 'graph_pb2', '.', 'GraphDef', '(', ')', 'for', 'func_graph', 'in', 'run_metadata', '.', 'function_graphs', ':', 'graph_util', '.', 'combine_graph_defs', '(', 'graph', ',', 'func_graph', '.', 'pre_optimization_graph', ')', 'else', ':', 'graph', '=', 'self', '.', '_multiplexer', '.', 'Graph', '(', 'run', ')', '# This next line might raise a ValueError if the limit parameters', '# are invalid (size is negative, size present but key absent, etc.).', 'process_graph', '.', 'prepare_graph_for_ui', '(', 'graph', ',', 'limit_attr_size', ',', 'large_attrs_key', ')', 'return', '(', 'str', '(', 'graph', ')', ',', "'text/x-protobuf'", ')'] | Result of the form `(body, mime_type)`, or `None` if no graph exists. | ['Result', 'of', 'the', 'form', '(', 'body', 'mime_type', ')', 'or', 'None', 'if', 'no', 'graph', 'exists', '.'] | train | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/graph/graphs_plugin.py#L145-L169 |
3,840 | log2timeline/plaso | plaso/multi_processing/psort.py | PsortEventHeap.PopEvent | def PopEvent(self):
"""Pops an event from the heap.
Returns:
tuple: containing:
str: identifier of the event MACB group or None if the event cannot
be grouped.
str: identifier of the event content.
EventObject: event.
"""
try:
macb_group_identifier, content_identifier, event = heapq.heappop(
self._heap)
if macb_group_identifier == '':
macb_group_identifier = None
return macb_group_identifier, content_identifier, event
except IndexError:
return None | python | def PopEvent(self):
"""Pops an event from the heap.
Returns:
tuple: containing:
str: identifier of the event MACB group or None if the event cannot
be grouped.
str: identifier of the event content.
EventObject: event.
"""
try:
macb_group_identifier, content_identifier, event = heapq.heappop(
self._heap)
if macb_group_identifier == '':
macb_group_identifier = None
return macb_group_identifier, content_identifier, event
except IndexError:
return None | ['def', 'PopEvent', '(', 'self', ')', ':', 'try', ':', 'macb_group_identifier', ',', 'content_identifier', ',', 'event', '=', 'heapq', '.', 'heappop', '(', 'self', '.', '_heap', ')', 'if', 'macb_group_identifier', '==', "''", ':', 'macb_group_identifier', '=', 'None', 'return', 'macb_group_identifier', ',', 'content_identifier', ',', 'event', 'except', 'IndexError', ':', 'return', 'None'] | Pops an event from the heap.
Returns:
tuple: containing:
str: identifier of the event MACB group or None if the event cannot
be grouped.
str: identifier of the event content.
EventObject: event. | ['Pops', 'an', 'event', 'from', 'the', 'heap', '.'] | train | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/multi_processing/psort.py#L127-L146 |
3,841 | thespacedoctor/tastic | tastic/tastic.py | baseClass.notes | def notes(self):
"""*list of the notes assoicated with this object*
**Usage:**
The document, project and task objects can all contain notes.
.. code-block:: python
docNotes = doc.notes
projectNotes = aProject.notes
taskNotes = aTask.notes
"""
return self._get_object(
regex=re.compile(
r'((?<=\n)|(?<=^))(?P<title>\S(?<!-)((?!(: +@|: *\n|: *$)).)*)\s*?(\n|$)(?P<tagString>&&&)?(?P<content>&&&)?', re.UNICODE),
objectType="note",
content=None
) | python | def notes(self):
"""*list of the notes assoicated with this object*
**Usage:**
The document, project and task objects can all contain notes.
.. code-block:: python
docNotes = doc.notes
projectNotes = aProject.notes
taskNotes = aTask.notes
"""
return self._get_object(
regex=re.compile(
r'((?<=\n)|(?<=^))(?P<title>\S(?<!-)((?!(: +@|: *\n|: *$)).)*)\s*?(\n|$)(?P<tagString>&&&)?(?P<content>&&&)?', re.UNICODE),
objectType="note",
content=None
) | ['def', 'notes', '(', 'self', ')', ':', 'return', 'self', '.', '_get_object', '(', 'regex', '=', 're', '.', 'compile', '(', "r'((?<=\\n)|(?<=^))(?P<title>\\S(?<!-)((?!(: +@|: *\\n|: *$)).)*)\\s*?(\\n|$)(?P<tagString>&&&)?(?P<content>&&&)?'", ',', 're', '.', 'UNICODE', ')', ',', 'objectType', '=', '"note"', ',', 'content', '=', 'None', ')'] | *list of the notes assoicated with this object*
**Usage:**
The document, project and task objects can all contain notes.
.. code-block:: python
docNotes = doc.notes
projectNotes = aProject.notes
taskNotes = aTask.notes | ['*', 'list', 'of', 'the', 'notes', 'assoicated', 'with', 'this', 'object', '*'] | train | https://github.com/thespacedoctor/tastic/blob/a0a16cf329a50057906ac3f696bb60b6fcee25e0/tastic/tastic.py#L189-L207 |
3,842 | evocell/rabifier | rabifier/utils.py | Pathfinder.get | def get(self, name):
""" Looks for a name in the path.
:param name: file name
:return: path to the file
"""
for d in self.paths:
if os.path.exists(d) and name in os.listdir(d):
return os.path.join(d, name)
logger.debug('File not found {}'.format(name))
return None | python | def get(self, name):
""" Looks for a name in the path.
:param name: file name
:return: path to the file
"""
for d in self.paths:
if os.path.exists(d) and name in os.listdir(d):
return os.path.join(d, name)
logger.debug('File not found {}'.format(name))
return None | ['def', 'get', '(', 'self', ',', 'name', ')', ':', 'for', 'd', 'in', 'self', '.', 'paths', ':', 'if', 'os', '.', 'path', '.', 'exists', '(', 'd', ')', 'and', 'name', 'in', 'os', '.', 'listdir', '(', 'd', ')', ':', 'return', 'os', '.', 'path', '.', 'join', '(', 'd', ',', 'name', ')', 'logger', '.', 'debug', '(', "'File not found {}'", '.', 'format', '(', 'name', ')', ')', 'return', 'None'] | Looks for a name in the path.
:param name: file name
:return: path to the file | ['Looks', 'for', 'a', 'name', 'in', 'the', 'path', '.'] | train | https://github.com/evocell/rabifier/blob/a5be3d516517e555bde463b94f06aeed106d19b8/rabifier/utils.py#L62-L73 |
3,843 | mdiener/grace | grace/py27/slimit/mangler.py | mangle | def mangle(tree, toplevel=False):
"""Mangle names.
Args:
toplevel: defaults to False. Defines if global
scope should be mangled or not.
"""
sym_table = SymbolTable()
visitor = ScopeTreeVisitor(sym_table)
visitor.visit(tree)
fill_scope_references(tree)
mangle_scope_tree(sym_table.globals, toplevel)
mangler = NameManglerVisitor()
mangler.visit(tree) | python | def mangle(tree, toplevel=False):
"""Mangle names.
Args:
toplevel: defaults to False. Defines if global
scope should be mangled or not.
"""
sym_table = SymbolTable()
visitor = ScopeTreeVisitor(sym_table)
visitor.visit(tree)
fill_scope_references(tree)
mangle_scope_tree(sym_table.globals, toplevel)
mangler = NameManglerVisitor()
mangler.visit(tree) | ['def', 'mangle', '(', 'tree', ',', 'toplevel', '=', 'False', ')', ':', 'sym_table', '=', 'SymbolTable', '(', ')', 'visitor', '=', 'ScopeTreeVisitor', '(', 'sym_table', ')', 'visitor', '.', 'visit', '(', 'tree', ')', 'fill_scope_references', '(', 'tree', ')', 'mangle_scope_tree', '(', 'sym_table', '.', 'globals', ',', 'toplevel', ')', 'mangler', '=', 'NameManglerVisitor', '(', ')', 'mangler', '.', 'visit', '(', 'tree', ')'] | Mangle names.
Args:
toplevel: defaults to False. Defines if global
scope should be mangled or not. | ['Mangle', 'names', '.'] | train | https://github.com/mdiener/grace/blob/2dab13a2cf636da5da989904c5885166fc94d36d/grace/py27/slimit/mangler.py#L36-L51 |
3,844 | umutbozkurt/django-rest-framework-mongoengine | rest_framework_mongoengine/fields.py | DocumentField.to_representation | def to_representation(self, obj):
""" convert value to representation.
DRF ModelField uses ``value_to_string`` for this purpose. Mongoengine fields do not have such method.
This implementation uses ``django.utils.encoding.smart_text`` to convert everything to text, while keeping json-safe types intact.
NB: The argument is whole object, instead of attribute value. This is upstream feature.
Probably because the field can be represented by a complicated method with nontrivial way to extract data.
"""
value = self.model_field.__get__(obj, None)
return smart_text(value, strings_only=True) | python | def to_representation(self, obj):
""" convert value to representation.
DRF ModelField uses ``value_to_string`` for this purpose. Mongoengine fields do not have such method.
This implementation uses ``django.utils.encoding.smart_text`` to convert everything to text, while keeping json-safe types intact.
NB: The argument is whole object, instead of attribute value. This is upstream feature.
Probably because the field can be represented by a complicated method with nontrivial way to extract data.
"""
value = self.model_field.__get__(obj, None)
return smart_text(value, strings_only=True) | ['def', 'to_representation', '(', 'self', ',', 'obj', ')', ':', 'value', '=', 'self', '.', 'model_field', '.', '__get__', '(', 'obj', ',', 'None', ')', 'return', 'smart_text', '(', 'value', ',', 'strings_only', '=', 'True', ')'] | convert value to representation.
DRF ModelField uses ``value_to_string`` for this purpose. Mongoengine fields do not have such method.
This implementation uses ``django.utils.encoding.smart_text`` to convert everything to text, while keeping json-safe types intact.
NB: The argument is whole object, instead of attribute value. This is upstream feature.
Probably because the field can be represented by a complicated method with nontrivial way to extract data. | ['convert', 'value', 'to', 'representation', '.'] | train | https://github.com/umutbozkurt/django-rest-framework-mongoengine/blob/2fe6de53907b31a5e8b742e4c6b728942b5fa4f0/rest_framework_mongoengine/fields.py#L58-L69 |
3,845 | PyGithub/PyGithub | github/MainClass.py | Github.get_rate_limit | def get_rate_limit(self):
"""
Rate limit status for different resources (core/search/graphql).
:calls: `GET /rate_limit <http://developer.github.com/v3/rate_limit>`_
:rtype: :class:`github.RateLimit.RateLimit`
"""
headers, data = self.__requester.requestJsonAndCheck(
'GET',
'/rate_limit'
)
return RateLimit.RateLimit(self.__requester, headers, data["resources"], True) | python | def get_rate_limit(self):
"""
Rate limit status for different resources (core/search/graphql).
:calls: `GET /rate_limit <http://developer.github.com/v3/rate_limit>`_
:rtype: :class:`github.RateLimit.RateLimit`
"""
headers, data = self.__requester.requestJsonAndCheck(
'GET',
'/rate_limit'
)
return RateLimit.RateLimit(self.__requester, headers, data["resources"], True) | ['def', 'get_rate_limit', '(', 'self', ')', ':', 'headers', ',', 'data', '=', 'self', '.', '__requester', '.', 'requestJsonAndCheck', '(', "'GET'", ',', "'/rate_limit'", ')', 'return', 'RateLimit', '.', 'RateLimit', '(', 'self', '.', '__requester', ',', 'headers', ',', 'data', '[', '"resources"', ']', ',', 'True', ')'] | Rate limit status for different resources (core/search/graphql).
:calls: `GET /rate_limit <http://developer.github.com/v3/rate_limit>`_
:rtype: :class:`github.RateLimit.RateLimit` | ['Rate', 'limit', 'status', 'for', 'different', 'resources', '(', 'core', '/', 'search', '/', 'graphql', ')', '.'] | train | https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/MainClass.py#L172-L183 |
3,846 | carpedm20/ndrive | ndrive/models.py | ndrive.getList | def getList(self, dummy = 56184, orgresource = '/', type = 1, dept = 0, sort = 'name', order = 'asc', startnum = 0, pagingrow = 1000):
"""GetList
Args:
dummy: ???
orgresource: Directory path to get the file list
ex) /Picture/
type: 1 => only directories with idxfolder property
2 => only files
3 => directories and files with thumbnail info
ex) viewHeight, viewWidth for Image file
4 => only directories except idxfolder
5 => directories and files without thumbnail info
depth: Dept for file list
sort: name => 이름
file => file type, 종류
length => size of file, 크기
date => edited date, 수정한 날짜
credate => creation date, 올린 날짜
protect => protect or not, 중요 표시
order: Order by (asc, desc)
startnum: ???
pagingrow: start index ?
Returns:
FileInfo list: List of files for a path
False: Failed to get list
"""
url = nurls['getList']
data = {'userid': self.user_id,
'useridx': self.useridx,
'dummy': dummy,
'orgresource': orgresource,
'type': type,
'dept': dept,
'sort': sort,
'order': order,
'startnum': startnum,
'pagingrow': pagingrow,
}
r = self.session.post(url = url, data = data)
try:
j = json.loads(r.text)
except:
print '[*] Success checkUpload: 0 result'
return []
if j['message'] != 'success':
print '[*] Error checkUpload: ' + j['message']
return False
else:
files = []
for i in j['resultvalue']:
f = FileInfo()
f.protect = i['protect']
f.resourceno = i['resourceno']
f.copyright = i['copyright']
f.subfoldercnt = i['subfoldercnt']
f.resourcetype = i['resourcetype']
f.fileuploadstatus = i['fileuploadstatus']
f.prority = i['priority']
f.filelink = i['filelink']
f.href = i['href']
f.thumbnailpath = i['thumbnailpath']
f.sharedinfo = i['sharedinfo']
f.getlastmodified = i['getlastmodified']
f.shareno = i['shareno']
f.lastmodifieduser = i['lastmodifieduser']
f.getcontentlength = i['getcontentlength']
f.lastaccessed = i['lastaccessed']
f.virusstatus = i['virusstatus']
f.idxfolder = i['idxfolder']
f.creationdate = i['creationdate']
f.nocache = i['nocache']
f.viewWidth = i['viewWidth']
f.viewHeight = i['viewHeight']
f.setJson(j['resultvalue'])
files.append(f)
return files | python | def getList(self, dummy = 56184, orgresource = '/', type = 1, dept = 0, sort = 'name', order = 'asc', startnum = 0, pagingrow = 1000):
"""GetList
Args:
dummy: ???
orgresource: Directory path to get the file list
ex) /Picture/
type: 1 => only directories with idxfolder property
2 => only files
3 => directories and files with thumbnail info
ex) viewHeight, viewWidth for Image file
4 => only directories except idxfolder
5 => directories and files without thumbnail info
depth: Dept for file list
sort: name => 이름
file => file type, 종류
length => size of file, 크기
date => edited date, 수정한 날짜
credate => creation date, 올린 날짜
protect => protect or not, 중요 표시
order: Order by (asc, desc)
startnum: ???
pagingrow: start index ?
Returns:
FileInfo list: List of files for a path
False: Failed to get list
"""
url = nurls['getList']
data = {'userid': self.user_id,
'useridx': self.useridx,
'dummy': dummy,
'orgresource': orgresource,
'type': type,
'dept': dept,
'sort': sort,
'order': order,
'startnum': startnum,
'pagingrow': pagingrow,
}
r = self.session.post(url = url, data = data)
try:
j = json.loads(r.text)
except:
print '[*] Success checkUpload: 0 result'
return []
if j['message'] != 'success':
print '[*] Error checkUpload: ' + j['message']
return False
else:
files = []
for i in j['resultvalue']:
f = FileInfo()
f.protect = i['protect']
f.resourceno = i['resourceno']
f.copyright = i['copyright']
f.subfoldercnt = i['subfoldercnt']
f.resourcetype = i['resourcetype']
f.fileuploadstatus = i['fileuploadstatus']
f.prority = i['priority']
f.filelink = i['filelink']
f.href = i['href']
f.thumbnailpath = i['thumbnailpath']
f.sharedinfo = i['sharedinfo']
f.getlastmodified = i['getlastmodified']
f.shareno = i['shareno']
f.lastmodifieduser = i['lastmodifieduser']
f.getcontentlength = i['getcontentlength']
f.lastaccessed = i['lastaccessed']
f.virusstatus = i['virusstatus']
f.idxfolder = i['idxfolder']
f.creationdate = i['creationdate']
f.nocache = i['nocache']
f.viewWidth = i['viewWidth']
f.viewHeight = i['viewHeight']
f.setJson(j['resultvalue'])
files.append(f)
return files | ['def', 'getList', '(', 'self', ',', 'dummy', '=', '56184', ',', 'orgresource', '=', "'/'", ',', 'type', '=', '1', ',', 'dept', '=', '0', ',', 'sort', '=', "'name'", ',', 'order', '=', "'asc'", ',', 'startnum', '=', '0', ',', 'pagingrow', '=', '1000', ')', ':', 'url', '=', 'nurls', '[', "'getList'", ']', 'data', '=', '{', "'userid'", ':', 'self', '.', 'user_id', ',', "'useridx'", ':', 'self', '.', 'useridx', ',', "'dummy'", ':', 'dummy', ',', "'orgresource'", ':', 'orgresource', ',', "'type'", ':', 'type', ',', "'dept'", ':', 'dept', ',', "'sort'", ':', 'sort', ',', "'order'", ':', 'order', ',', "'startnum'", ':', 'startnum', ',', "'pagingrow'", ':', 'pagingrow', ',', '}', 'r', '=', 'self', '.', 'session', '.', 'post', '(', 'url', '=', 'url', ',', 'data', '=', 'data', ')', 'try', ':', 'j', '=', 'json', '.', 'loads', '(', 'r', '.', 'text', ')', 'except', ':', 'print', "'[*] Success checkUpload: 0 result'", 'return', '[', ']', 'if', 'j', '[', "'message'", ']', '!=', "'success'", ':', 'print', "'[*] Error checkUpload: '", '+', 'j', '[', "'message'", ']', 'return', 'False', 'else', ':', 'files', '=', '[', ']', 'for', 'i', 'in', 'j', '[', "'resultvalue'", ']', ':', 'f', '=', 'FileInfo', '(', ')', 'f', '.', 'protect', '=', 'i', '[', "'protect'", ']', 'f', '.', 'resourceno', '=', 'i', '[', "'resourceno'", ']', 'f', '.', 'copyright', '=', 'i', '[', "'copyright'", ']', 'f', '.', 'subfoldercnt', '=', 'i', '[', "'subfoldercnt'", ']', 'f', '.', 'resourcetype', '=', 'i', '[', "'resourcetype'", ']', 'f', '.', 'fileuploadstatus', '=', 'i', '[', "'fileuploadstatus'", ']', 'f', '.', 'prority', '=', 'i', '[', "'priority'", ']', 'f', '.', 'filelink', '=', 'i', '[', "'filelink'", ']', 'f', '.', 'href', '=', 'i', '[', "'href'", ']', 'f', '.', 'thumbnailpath', '=', 'i', '[', "'thumbnailpath'", ']', 'f', '.', 'sharedinfo', '=', 'i', '[', "'sharedinfo'", ']', 'f', '.', 'getlastmodified', '=', 'i', '[', "'getlastmodified'", ']', 'f', '.', 'shareno', '=', 'i', '[', "'shareno'", ']', 'f', '.', 'lastmodifieduser', '=', 'i', '[', "'lastmodifieduser'", ']', 'f', '.', 'getcontentlength', '=', 'i', '[', "'getcontentlength'", ']', 'f', '.', 'lastaccessed', '=', 'i', '[', "'lastaccessed'", ']', 'f', '.', 'virusstatus', '=', 'i', '[', "'virusstatus'", ']', 'f', '.', 'idxfolder', '=', 'i', '[', "'idxfolder'", ']', 'f', '.', 'creationdate', '=', 'i', '[', "'creationdate'", ']', 'f', '.', 'nocache', '=', 'i', '[', "'nocache'", ']', 'f', '.', 'viewWidth', '=', 'i', '[', "'viewWidth'", ']', 'f', '.', 'viewHeight', '=', 'i', '[', "'viewHeight'", ']', 'f', '.', 'setJson', '(', 'j', '[', "'resultvalue'", ']', ')', 'files', '.', 'append', '(', 'f', ')', 'return', 'files'] | GetList
Args:
dummy: ???
orgresource: Directory path to get the file list
ex) /Picture/
type: 1 => only directories with idxfolder property
2 => only files
3 => directories and files with thumbnail info
ex) viewHeight, viewWidth for Image file
4 => only directories except idxfolder
5 => directories and files without thumbnail info
depth: Dept for file list
sort: name => 이름
file => file type, 종류
length => size of file, 크기
date => edited date, 수정한 날짜
credate => creation date, 올린 날짜
protect => protect or not, 중요 표시
order: Order by (asc, desc)
startnum: ???
pagingrow: start index ?
Returns:
FileInfo list: List of files for a path
False: Failed to get list | ['GetList'] | train | https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/models.py#L426-L519 |
3,847 | BlueBrain/NeuroM | neurom/check/neuron_checks.py | has_no_narrow_start | def has_no_narrow_start(neuron, frac=0.9):
'''Check if neurites have a narrow start
Arguments:
neuron(Neuron): The neuron object to test
frac(float): Ratio that the second point must be smaller than the first
Returns:
CheckResult with a list of all first segments of neurites with a narrow start
'''
bad_ids = [(neurite.root_node.id, [neurite.root_node.points[1]])
for neurite in neuron.neurites
if neurite.root_node.points[1][COLS.R] < frac * neurite.root_node.points[2][COLS.R]]
return CheckResult(len(bad_ids) == 0, bad_ids) | python | def has_no_narrow_start(neuron, frac=0.9):
'''Check if neurites have a narrow start
Arguments:
neuron(Neuron): The neuron object to test
frac(float): Ratio that the second point must be smaller than the first
Returns:
CheckResult with a list of all first segments of neurites with a narrow start
'''
bad_ids = [(neurite.root_node.id, [neurite.root_node.points[1]])
for neurite in neuron.neurites
if neurite.root_node.points[1][COLS.R] < frac * neurite.root_node.points[2][COLS.R]]
return CheckResult(len(bad_ids) == 0, bad_ids) | ['def', 'has_no_narrow_start', '(', 'neuron', ',', 'frac', '=', '0.9', ')', ':', 'bad_ids', '=', '[', '(', 'neurite', '.', 'root_node', '.', 'id', ',', '[', 'neurite', '.', 'root_node', '.', 'points', '[', '1', ']', ']', ')', 'for', 'neurite', 'in', 'neuron', '.', 'neurites', 'if', 'neurite', '.', 'root_node', '.', 'points', '[', '1', ']', '[', 'COLS', '.', 'R', ']', '<', 'frac', '*', 'neurite', '.', 'root_node', '.', 'points', '[', '2', ']', '[', 'COLS', '.', 'R', ']', ']', 'return', 'CheckResult', '(', 'len', '(', 'bad_ids', ')', '==', '0', ',', 'bad_ids', ')'] | Check if neurites have a narrow start
Arguments:
neuron(Neuron): The neuron object to test
frac(float): Ratio that the second point must be smaller than the first
Returns:
CheckResult with a list of all first segments of neurites with a narrow start | ['Check', 'if', 'neurites', 'have', 'a', 'narrow', 'start'] | train | https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/check/neuron_checks.py#L253-L266 |
3,848 | Telefonica/toolium | toolium/config_parser.py | ExtendedConfigParser.get_config_from_file | def get_config_from_file(conf_properties_files):
"""Reads properties files and saves them to a config object
:param conf_properties_files: comma-separated list of properties files
:returns: config object
"""
# Initialize the config object
config = ExtendedConfigParser()
logger = logging.getLogger(__name__)
# Configure properties (last files could override properties)
found = False
files_list = conf_properties_files.split(';')
for conf_properties_file in files_list:
result = config.read(conf_properties_file)
if len(result) == 0:
message = 'Properties config file not found: %s'
if len(files_list) == 1:
logger.error(message, conf_properties_file)
raise Exception(message % conf_properties_file)
else:
logger.debug(message, conf_properties_file)
else:
logger.debug('Reading properties from file: %s', conf_properties_file)
found = True
if not found:
message = 'Any of the properties config files has been found'
logger.error(message)
raise Exception(message)
return config | python | def get_config_from_file(conf_properties_files):
"""Reads properties files and saves them to a config object
:param conf_properties_files: comma-separated list of properties files
:returns: config object
"""
# Initialize the config object
config = ExtendedConfigParser()
logger = logging.getLogger(__name__)
# Configure properties (last files could override properties)
found = False
files_list = conf_properties_files.split(';')
for conf_properties_file in files_list:
result = config.read(conf_properties_file)
if len(result) == 0:
message = 'Properties config file not found: %s'
if len(files_list) == 1:
logger.error(message, conf_properties_file)
raise Exception(message % conf_properties_file)
else:
logger.debug(message, conf_properties_file)
else:
logger.debug('Reading properties from file: %s', conf_properties_file)
found = True
if not found:
message = 'Any of the properties config files has been found'
logger.error(message)
raise Exception(message)
return config | ['def', 'get_config_from_file', '(', 'conf_properties_files', ')', ':', '# Initialize the config object', 'config', '=', 'ExtendedConfigParser', '(', ')', 'logger', '=', 'logging', '.', 'getLogger', '(', '__name__', ')', '# Configure properties (last files could override properties)', 'found', '=', 'False', 'files_list', '=', 'conf_properties_files', '.', 'split', '(', "';'", ')', 'for', 'conf_properties_file', 'in', 'files_list', ':', 'result', '=', 'config', '.', 'read', '(', 'conf_properties_file', ')', 'if', 'len', '(', 'result', ')', '==', '0', ':', 'message', '=', "'Properties config file not found: %s'", 'if', 'len', '(', 'files_list', ')', '==', '1', ':', 'logger', '.', 'error', '(', 'message', ',', 'conf_properties_file', ')', 'raise', 'Exception', '(', 'message', '%', 'conf_properties_file', ')', 'else', ':', 'logger', '.', 'debug', '(', 'message', ',', 'conf_properties_file', ')', 'else', ':', 'logger', '.', 'debug', '(', "'Reading properties from file: %s'", ',', 'conf_properties_file', ')', 'found', '=', 'True', 'if', 'not', 'found', ':', 'message', '=', "'Any of the properties config files has been found'", 'logger', '.', 'error', '(', 'message', ')', 'raise', 'Exception', '(', 'message', ')', 'return', 'config'] | Reads properties files and saves them to a config object
:param conf_properties_files: comma-separated list of properties files
:returns: config object | ['Reads', 'properties', 'files', 'and', 'saves', 'them', 'to', 'a', 'config', 'object'] | train | https://github.com/Telefonica/toolium/blob/56847c243b3a98876df74c184b75e43f8810e475/toolium/config_parser.py#L99-L129 |
3,849 | tensorflow/tensor2tensor | tensor2tensor/utils/decoding.py | decode_from_file | def decode_from_file(estimator,
filename,
hparams,
decode_hp,
decode_to_file=None,
checkpoint_path=None):
"""Compute predictions on entries in filename and write them out."""
if not decode_hp.batch_size:
decode_hp.batch_size = 32
tf.logging.info(
"decode_hp.batch_size not specified; default=%d" % decode_hp.batch_size)
# Inputs vocabulary is set to targets if there are no inputs in the problem,
# e.g., for language models where the inputs are just a prefix of targets.
p_hp = hparams.problem_hparams
has_input = "inputs" in p_hp.vocabulary
inputs_vocab_key = "inputs" if has_input else "targets"
inputs_vocab = p_hp.vocabulary[inputs_vocab_key]
targets_vocab = p_hp.vocabulary["targets"]
problem_name = FLAGS.problem
filename = _add_shard_to_filename(filename, decode_hp)
tf.logging.info("Performing decoding from file (%s)." % filename)
if has_input:
sorted_inputs, sorted_keys = _get_sorted_inputs(
filename, decode_hp.delimiter)
else:
sorted_inputs = _get_language_modeling_inputs(
filename, decode_hp.delimiter, repeat=decode_hp.num_decodes)
sorted_keys = range(len(sorted_inputs))
num_sentences = len(sorted_inputs)
num_decode_batches = (num_sentences - 1) // decode_hp.batch_size + 1
if estimator.config.use_tpu:
length = getattr(hparams, "length", 0) or hparams.max_length
batch_ids = []
for line in sorted_inputs:
if has_input:
ids = inputs_vocab.encode(line.strip()) + [1]
else:
ids = targets_vocab.encode(line)
if len(ids) < length:
ids.extend([0] * (length - len(ids)))
else:
ids = ids[:length]
batch_ids.append(ids)
np_ids = np.array(batch_ids, dtype=np.int32)
def input_fn(params):
batch_size = params["batch_size"]
dataset = tf.data.Dataset.from_tensor_slices({"inputs": np_ids})
dataset = dataset.map(
lambda ex: {"inputs": tf.reshape(ex["inputs"], (length, 1, 1))})
dataset = dataset.batch(batch_size)
return dataset
else:
def input_fn():
input_gen = _decode_batch_input_fn(
num_decode_batches, sorted_inputs,
inputs_vocab, decode_hp.batch_size,
decode_hp.max_input_size,
task_id=decode_hp.multiproblem_task_id, has_input=has_input)
gen_fn = make_input_fn_from_generator(input_gen)
example = gen_fn()
return _decode_input_tensor_to_features_dict(example, hparams)
decodes = []
result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path)
start_time = time.time()
total_time_per_step = 0
total_cnt = 0
def timer(gen):
while True:
try:
start_time = time.time()
item = next(gen)
elapsed_time = time.time() - start_time
yield elapsed_time, item
except StopIteration:
break
for elapsed_time, result in timer(result_iter):
if decode_hp.return_beams:
beam_decodes = []
beam_scores = []
output_beams = np.split(result["outputs"], decode_hp.beam_size, axis=0)
scores = None
if "scores" in result:
if np.isscalar(result["scores"]):
result["scores"] = result["scores"].reshape(1)
scores = np.split(result["scores"], decode_hp.beam_size, axis=0)
for k, beam in enumerate(output_beams):
tf.logging.info("BEAM %d:" % k)
score = scores and scores[k]
_, decoded_outputs, _ = log_decode_results(
result["inputs"],
beam,
problem_name,
None,
inputs_vocab,
targets_vocab,
log_results=decode_hp.log_results,
skip_eos_postprocess=decode_hp.skip_eos_postprocess)
beam_decodes.append(decoded_outputs)
if decode_hp.write_beam_scores:
beam_scores.append(score)
if decode_hp.write_beam_scores:
decodes.append("\t".join([
"\t".join([d, "%.2f" % s])
for d, s in zip(beam_decodes, beam_scores)
]))
else:
decodes.append("\t".join(beam_decodes))
else:
_, decoded_outputs, _ = log_decode_results(
result["inputs"],
result["outputs"],
problem_name,
None,
inputs_vocab,
targets_vocab,
log_results=decode_hp.log_results,
skip_eos_postprocess=decode_hp.skip_eos_postprocess)
decodes.append(decoded_outputs)
total_time_per_step += elapsed_time
total_cnt += result["outputs"].shape[-1]
duration = time.time() - start_time
tf.logging.info("Elapsed Time: %5.5f" % duration)
tf.logging.info("Averaged Single Token Generation Time: %5.7f "
"(time %5.7f count %d)" %
(total_time_per_step / total_cnt,
total_time_per_step, total_cnt))
if decode_hp.batch_size == 1:
tf.logging.info("Inference time %.4f seconds "
"(Latency = %.4f ms/setences)" %
(duration, 1000.0*duration/num_sentences))
else:
tf.logging.info("Inference time %.4f seconds "
"(Throughput = %.4f sentences/second)" %
(duration, num_sentences/duration))
# If decode_to_file was provided use it as the output filename without change
# (except for adding shard_id if using more shards for decoding).
# Otherwise, use the input filename plus model, hp, problem, beam, alpha.
decode_filename = decode_to_file if decode_to_file else filename
if not decode_to_file:
decode_filename = _decode_filename(decode_filename, problem_name, decode_hp)
else:
decode_filename = _add_shard_to_filename(decode_filename, decode_hp)
tf.logging.info("Writing decodes into %s" % decode_filename)
outfile = tf.gfile.Open(decode_filename, "w")
for index in range(len(sorted_inputs)):
outfile.write("%s%s" % (decodes[sorted_keys[index]], decode_hp.delimiter))
outfile.flush()
outfile.close()
output_dir = os.path.join(estimator.model_dir, "decode")
tf.gfile.MakeDirs(output_dir)
run_postdecode_hooks(DecodeHookArgs(
estimator=estimator,
problem=hparams.problem,
output_dirs=[output_dir],
hparams=hparams,
decode_hparams=decode_hp,
predictions=list(result_iter)
), None) | python | def decode_from_file(estimator,
filename,
hparams,
decode_hp,
decode_to_file=None,
checkpoint_path=None):
"""Compute predictions on entries in filename and write them out."""
if not decode_hp.batch_size:
decode_hp.batch_size = 32
tf.logging.info(
"decode_hp.batch_size not specified; default=%d" % decode_hp.batch_size)
# Inputs vocabulary is set to targets if there are no inputs in the problem,
# e.g., for language models where the inputs are just a prefix of targets.
p_hp = hparams.problem_hparams
has_input = "inputs" in p_hp.vocabulary
inputs_vocab_key = "inputs" if has_input else "targets"
inputs_vocab = p_hp.vocabulary[inputs_vocab_key]
targets_vocab = p_hp.vocabulary["targets"]
problem_name = FLAGS.problem
filename = _add_shard_to_filename(filename, decode_hp)
tf.logging.info("Performing decoding from file (%s)." % filename)
if has_input:
sorted_inputs, sorted_keys = _get_sorted_inputs(
filename, decode_hp.delimiter)
else:
sorted_inputs = _get_language_modeling_inputs(
filename, decode_hp.delimiter, repeat=decode_hp.num_decodes)
sorted_keys = range(len(sorted_inputs))
num_sentences = len(sorted_inputs)
num_decode_batches = (num_sentences - 1) // decode_hp.batch_size + 1
if estimator.config.use_tpu:
length = getattr(hparams, "length", 0) or hparams.max_length
batch_ids = []
for line in sorted_inputs:
if has_input:
ids = inputs_vocab.encode(line.strip()) + [1]
else:
ids = targets_vocab.encode(line)
if len(ids) < length:
ids.extend([0] * (length - len(ids)))
else:
ids = ids[:length]
batch_ids.append(ids)
np_ids = np.array(batch_ids, dtype=np.int32)
def input_fn(params):
batch_size = params["batch_size"]
dataset = tf.data.Dataset.from_tensor_slices({"inputs": np_ids})
dataset = dataset.map(
lambda ex: {"inputs": tf.reshape(ex["inputs"], (length, 1, 1))})
dataset = dataset.batch(batch_size)
return dataset
else:
def input_fn():
input_gen = _decode_batch_input_fn(
num_decode_batches, sorted_inputs,
inputs_vocab, decode_hp.batch_size,
decode_hp.max_input_size,
task_id=decode_hp.multiproblem_task_id, has_input=has_input)
gen_fn = make_input_fn_from_generator(input_gen)
example = gen_fn()
return _decode_input_tensor_to_features_dict(example, hparams)
decodes = []
result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path)
start_time = time.time()
total_time_per_step = 0
total_cnt = 0
def timer(gen):
while True:
try:
start_time = time.time()
item = next(gen)
elapsed_time = time.time() - start_time
yield elapsed_time, item
except StopIteration:
break
for elapsed_time, result in timer(result_iter):
if decode_hp.return_beams:
beam_decodes = []
beam_scores = []
output_beams = np.split(result["outputs"], decode_hp.beam_size, axis=0)
scores = None
if "scores" in result:
if np.isscalar(result["scores"]):
result["scores"] = result["scores"].reshape(1)
scores = np.split(result["scores"], decode_hp.beam_size, axis=0)
for k, beam in enumerate(output_beams):
tf.logging.info("BEAM %d:" % k)
score = scores and scores[k]
_, decoded_outputs, _ = log_decode_results(
result["inputs"],
beam,
problem_name,
None,
inputs_vocab,
targets_vocab,
log_results=decode_hp.log_results,
skip_eos_postprocess=decode_hp.skip_eos_postprocess)
beam_decodes.append(decoded_outputs)
if decode_hp.write_beam_scores:
beam_scores.append(score)
if decode_hp.write_beam_scores:
decodes.append("\t".join([
"\t".join([d, "%.2f" % s])
for d, s in zip(beam_decodes, beam_scores)
]))
else:
decodes.append("\t".join(beam_decodes))
else:
_, decoded_outputs, _ = log_decode_results(
result["inputs"],
result["outputs"],
problem_name,
None,
inputs_vocab,
targets_vocab,
log_results=decode_hp.log_results,
skip_eos_postprocess=decode_hp.skip_eos_postprocess)
decodes.append(decoded_outputs)
total_time_per_step += elapsed_time
total_cnt += result["outputs"].shape[-1]
duration = time.time() - start_time
tf.logging.info("Elapsed Time: %5.5f" % duration)
tf.logging.info("Averaged Single Token Generation Time: %5.7f "
"(time %5.7f count %d)" %
(total_time_per_step / total_cnt,
total_time_per_step, total_cnt))
if decode_hp.batch_size == 1:
tf.logging.info("Inference time %.4f seconds "
"(Latency = %.4f ms/setences)" %
(duration, 1000.0*duration/num_sentences))
else:
tf.logging.info("Inference time %.4f seconds "
"(Throughput = %.4f sentences/second)" %
(duration, num_sentences/duration))
# If decode_to_file was provided use it as the output filename without change
# (except for adding shard_id if using more shards for decoding).
# Otherwise, use the input filename plus model, hp, problem, beam, alpha.
decode_filename = decode_to_file if decode_to_file else filename
if not decode_to_file:
decode_filename = _decode_filename(decode_filename, problem_name, decode_hp)
else:
decode_filename = _add_shard_to_filename(decode_filename, decode_hp)
tf.logging.info("Writing decodes into %s" % decode_filename)
outfile = tf.gfile.Open(decode_filename, "w")
for index in range(len(sorted_inputs)):
outfile.write("%s%s" % (decodes[sorted_keys[index]], decode_hp.delimiter))
outfile.flush()
outfile.close()
output_dir = os.path.join(estimator.model_dir, "decode")
tf.gfile.MakeDirs(output_dir)
run_postdecode_hooks(DecodeHookArgs(
estimator=estimator,
problem=hparams.problem,
output_dirs=[output_dir],
hparams=hparams,
decode_hparams=decode_hp,
predictions=list(result_iter)
), None) | ['def', 'decode_from_file', '(', 'estimator', ',', 'filename', ',', 'hparams', ',', 'decode_hp', ',', 'decode_to_file', '=', 'None', ',', 'checkpoint_path', '=', 'None', ')', ':', 'if', 'not', 'decode_hp', '.', 'batch_size', ':', 'decode_hp', '.', 'batch_size', '=', '32', 'tf', '.', 'logging', '.', 'info', '(', '"decode_hp.batch_size not specified; default=%d"', '%', 'decode_hp', '.', 'batch_size', ')', '# Inputs vocabulary is set to targets if there are no inputs in the problem,', '# e.g., for language models where the inputs are just a prefix of targets.', 'p_hp', '=', 'hparams', '.', 'problem_hparams', 'has_input', '=', '"inputs"', 'in', 'p_hp', '.', 'vocabulary', 'inputs_vocab_key', '=', '"inputs"', 'if', 'has_input', 'else', '"targets"', 'inputs_vocab', '=', 'p_hp', '.', 'vocabulary', '[', 'inputs_vocab_key', ']', 'targets_vocab', '=', 'p_hp', '.', 'vocabulary', '[', '"targets"', ']', 'problem_name', '=', 'FLAGS', '.', 'problem', 'filename', '=', '_add_shard_to_filename', '(', 'filename', ',', 'decode_hp', ')', 'tf', '.', 'logging', '.', 'info', '(', '"Performing decoding from file (%s)."', '%', 'filename', ')', 'if', 'has_input', ':', 'sorted_inputs', ',', 'sorted_keys', '=', '_get_sorted_inputs', '(', 'filename', ',', 'decode_hp', '.', 'delimiter', ')', 'else', ':', 'sorted_inputs', '=', '_get_language_modeling_inputs', '(', 'filename', ',', 'decode_hp', '.', 'delimiter', ',', 'repeat', '=', 'decode_hp', '.', 'num_decodes', ')', 'sorted_keys', '=', 'range', '(', 'len', '(', 'sorted_inputs', ')', ')', 'num_sentences', '=', 'len', '(', 'sorted_inputs', ')', 'num_decode_batches', '=', '(', 'num_sentences', '-', '1', ')', '//', 'decode_hp', '.', 'batch_size', '+', '1', 'if', 'estimator', '.', 'config', '.', 'use_tpu', ':', 'length', '=', 'getattr', '(', 'hparams', ',', '"length"', ',', '0', ')', 'or', 'hparams', '.', 'max_length', 'batch_ids', '=', '[', ']', 'for', 'line', 'in', 'sorted_inputs', ':', 'if', 'has_input', ':', 'ids', '=', 'inputs_vocab', '.', 'encode', '(', 'line', '.', 'strip', '(', ')', ')', '+', '[', '1', ']', 'else', ':', 'ids', '=', 'targets_vocab', '.', 'encode', '(', 'line', ')', 'if', 'len', '(', 'ids', ')', '<', 'length', ':', 'ids', '.', 'extend', '(', '[', '0', ']', '*', '(', 'length', '-', 'len', '(', 'ids', ')', ')', ')', 'else', ':', 'ids', '=', 'ids', '[', ':', 'length', ']', 'batch_ids', '.', 'append', '(', 'ids', ')', 'np_ids', '=', 'np', '.', 'array', '(', 'batch_ids', ',', 'dtype', '=', 'np', '.', 'int32', ')', 'def', 'input_fn', '(', 'params', ')', ':', 'batch_size', '=', 'params', '[', '"batch_size"', ']', 'dataset', '=', 'tf', '.', 'data', '.', 'Dataset', '.', 'from_tensor_slices', '(', '{', '"inputs"', ':', 'np_ids', '}', ')', 'dataset', '=', 'dataset', '.', 'map', '(', 'lambda', 'ex', ':', '{', '"inputs"', ':', 'tf', '.', 'reshape', '(', 'ex', '[', '"inputs"', ']', ',', '(', 'length', ',', '1', ',', '1', ')', ')', '}', ')', 'dataset', '=', 'dataset', '.', 'batch', '(', 'batch_size', ')', 'return', 'dataset', 'else', ':', 'def', 'input_fn', '(', ')', ':', 'input_gen', '=', '_decode_batch_input_fn', '(', 'num_decode_batches', ',', 'sorted_inputs', ',', 'inputs_vocab', ',', 'decode_hp', '.', 'batch_size', ',', 'decode_hp', '.', 'max_input_size', ',', 'task_id', '=', 'decode_hp', '.', 'multiproblem_task_id', ',', 'has_input', '=', 'has_input', ')', 'gen_fn', '=', 'make_input_fn_from_generator', '(', 'input_gen', ')', 'example', '=', 'gen_fn', '(', ')', 'return', '_decode_input_tensor_to_features_dict', '(', 'example', ',', 'hparams', ')', 'decodes', '=', '[', ']', 'result_iter', '=', 'estimator', '.', 'predict', '(', 'input_fn', ',', 'checkpoint_path', '=', 'checkpoint_path', ')', 'start_time', '=', 'time', '.', 'time', '(', ')', 'total_time_per_step', '=', '0', 'total_cnt', '=', '0', 'def', 'timer', '(', 'gen', ')', ':', 'while', 'True', ':', 'try', ':', 'start_time', '=', 'time', '.', 'time', '(', ')', 'item', '=', 'next', '(', 'gen', ')', 'elapsed_time', '=', 'time', '.', 'time', '(', ')', '-', 'start_time', 'yield', 'elapsed_time', ',', 'item', 'except', 'StopIteration', ':', 'break', 'for', 'elapsed_time', ',', 'result', 'in', 'timer', '(', 'result_iter', ')', ':', 'if', 'decode_hp', '.', 'return_beams', ':', 'beam_decodes', '=', '[', ']', 'beam_scores', '=', '[', ']', 'output_beams', '=', 'np', '.', 'split', '(', 'result', '[', '"outputs"', ']', ',', 'decode_hp', '.', 'beam_size', ',', 'axis', '=', '0', ')', 'scores', '=', 'None', 'if', '"scores"', 'in', 'result', ':', 'if', 'np', '.', 'isscalar', '(', 'result', '[', '"scores"', ']', ')', ':', 'result', '[', '"scores"', ']', '=', 'result', '[', '"scores"', ']', '.', 'reshape', '(', '1', ')', 'scores', '=', 'np', '.', 'split', '(', 'result', '[', '"scores"', ']', ',', 'decode_hp', '.', 'beam_size', ',', 'axis', '=', '0', ')', 'for', 'k', ',', 'beam', 'in', 'enumerate', '(', 'output_beams', ')', ':', 'tf', '.', 'logging', '.', 'info', '(', '"BEAM %d:"', '%', 'k', ')', 'score', '=', 'scores', 'and', 'scores', '[', 'k', ']', '_', ',', 'decoded_outputs', ',', '_', '=', 'log_decode_results', '(', 'result', '[', '"inputs"', ']', ',', 'beam', ',', 'problem_name', ',', 'None', ',', 'inputs_vocab', ',', 'targets_vocab', ',', 'log_results', '=', 'decode_hp', '.', 'log_results', ',', 'skip_eos_postprocess', '=', 'decode_hp', '.', 'skip_eos_postprocess', ')', 'beam_decodes', '.', 'append', '(', 'decoded_outputs', ')', 'if', 'decode_hp', '.', 'write_beam_scores', ':', 'beam_scores', '.', 'append', '(', 'score', ')', 'if', 'decode_hp', '.', 'write_beam_scores', ':', 'decodes', '.', 'append', '(', '"\\t"', '.', 'join', '(', '[', '"\\t"', '.', 'join', '(', '[', 'd', ',', '"%.2f"', '%', 's', ']', ')', 'for', 'd', ',', 's', 'in', 'zip', '(', 'beam_decodes', ',', 'beam_scores', ')', ']', ')', ')', 'else', ':', 'decodes', '.', 'append', '(', '"\\t"', '.', 'join', '(', 'beam_decodes', ')', ')', 'else', ':', '_', ',', 'decoded_outputs', ',', '_', '=', 'log_decode_results', '(', 'result', '[', '"inputs"', ']', ',', 'result', '[', '"outputs"', ']', ',', 'problem_name', ',', 'None', ',', 'inputs_vocab', ',', 'targets_vocab', ',', 'log_results', '=', 'decode_hp', '.', 'log_results', ',', 'skip_eos_postprocess', '=', 'decode_hp', '.', 'skip_eos_postprocess', ')', 'decodes', '.', 'append', '(', 'decoded_outputs', ')', 'total_time_per_step', '+=', 'elapsed_time', 'total_cnt', '+=', 'result', '[', '"outputs"', ']', '.', 'shape', '[', '-', '1', ']', 'duration', '=', 'time', '.', 'time', '(', ')', '-', 'start_time', 'tf', '.', 'logging', '.', 'info', '(', '"Elapsed Time: %5.5f"', '%', 'duration', ')', 'tf', '.', 'logging', '.', 'info', '(', '"Averaged Single Token Generation Time: %5.7f "', '"(time %5.7f count %d)"', '%', '(', 'total_time_per_step', '/', 'total_cnt', ',', 'total_time_per_step', ',', 'total_cnt', ')', ')', 'if', 'decode_hp', '.', 'batch_size', '==', '1', ':', 'tf', '.', 'logging', '.', 'info', '(', '"Inference time %.4f seconds "', '"(Latency = %.4f ms/setences)"', '%', '(', 'duration', ',', '1000.0', '*', 'duration', '/', 'num_sentences', ')', ')', 'else', ':', 'tf', '.', 'logging', '.', 'info', '(', '"Inference time %.4f seconds "', '"(Throughput = %.4f sentences/second)"', '%', '(', 'duration', ',', 'num_sentences', '/', 'duration', ')', ')', '# If decode_to_file was provided use it as the output filename without change', '# (except for adding shard_id if using more shards for decoding).', '# Otherwise, use the input filename plus model, hp, problem, beam, alpha.', 'decode_filename', '=', 'decode_to_file', 'if', 'decode_to_file', 'else', 'filename', 'if', 'not', 'decode_to_file', ':', 'decode_filename', '=', '_decode_filename', '(', 'decode_filename', ',', 'problem_name', ',', 'decode_hp', ')', 'else', ':', 'decode_filename', '=', '_add_shard_to_filename', '(', 'decode_filename', ',', 'decode_hp', ')', 'tf', '.', 'logging', '.', 'info', '(', '"Writing decodes into %s"', '%', 'decode_filename', ')', 'outfile', '=', 'tf', '.', 'gfile', '.', 'Open', '(', 'decode_filename', ',', '"w"', ')', 'for', 'index', 'in', 'range', '(', 'len', '(', 'sorted_inputs', ')', ')', ':', 'outfile', '.', 'write', '(', '"%s%s"', '%', '(', 'decodes', '[', 'sorted_keys', '[', 'index', ']', ']', ',', 'decode_hp', '.', 'delimiter', ')', ')', 'outfile', '.', 'flush', '(', ')', 'outfile', '.', 'close', '(', ')', 'output_dir', '=', 'os', '.', 'path', '.', 'join', '(', 'estimator', '.', 'model_dir', ',', '"decode"', ')', 'tf', '.', 'gfile', '.', 'MakeDirs', '(', 'output_dir', ')', 'run_postdecode_hooks', '(', 'DecodeHookArgs', '(', 'estimator', '=', 'estimator', ',', 'problem', '=', 'hparams', '.', 'problem', ',', 'output_dirs', '=', '[', 'output_dir', ']', ',', 'hparams', '=', 'hparams', ',', 'decode_hparams', '=', 'decode_hp', ',', 'predictions', '=', 'list', '(', 'result_iter', ')', ')', ',', 'None', ')'] | Compute predictions on entries in filename and write them out. | ['Compute', 'predictions', 'on', 'entries', 'in', 'filename', 'and', 'write', 'them', 'out', '.'] | train | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L394-L559 |
3,850 | StackStorm/pybind | pybind/slxos/v17r_1_01a/routing_system/ipv6/__init__.py | ipv6._set_receive | def _set_receive(self, v, load=False):
"""
Setter method for receive, mapped from YANG variable /routing_system/ipv6/receive (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_receive is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_receive() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=receive.receive, is_container='container', presence=False, yang_name="receive", rest_name="receive", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Receive ACL', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-access-list', defining_module='brocade-ipv6-access-list', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """receive must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=receive.receive, is_container='container', presence=False, yang_name="receive", rest_name="receive", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Receive ACL', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-access-list', defining_module='brocade-ipv6-access-list', yang_type='container', is_config=True)""",
})
self.__receive = t
if hasattr(self, '_set'):
self._set() | python | def _set_receive(self, v, load=False):
"""
Setter method for receive, mapped from YANG variable /routing_system/ipv6/receive (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_receive is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_receive() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=receive.receive, is_container='container', presence=False, yang_name="receive", rest_name="receive", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Receive ACL', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-access-list', defining_module='brocade-ipv6-access-list', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """receive must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=receive.receive, is_container='container', presence=False, yang_name="receive", rest_name="receive", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Receive ACL', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-access-list', defining_module='brocade-ipv6-access-list', yang_type='container', is_config=True)""",
})
self.__receive = t
if hasattr(self, '_set'):
self._set() | ['def', '_set_receive', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'receive', '.', 'receive', ',', 'is_container', '=', "'container'", ',', 'presence', '=', 'False', ',', 'yang_name', '=', '"receive"', ',', 'rest_name', '=', '"receive"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'Receive ACL'", ',', "u'cli-incomplete-no'", ':', 'None', '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-ipv6-access-list'", ',', 'defining_module', '=', "'brocade-ipv6-access-list'", ',', 'yang_type', '=', "'container'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""receive must be of a type compatible with container"""', ',', "'defined-type'", ':', '"container"', ',', "'generated-type'", ':', '"""YANGDynClass(base=receive.receive, is_container=\'container\', presence=False, yang_name="receive", rest_name="receive", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Receive ACL\', u\'cli-incomplete-no\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-ipv6-access-list\', defining_module=\'brocade-ipv6-access-list\', yang_type=\'container\', is_config=True)"""', ',', '}', ')', 'self', '.', '__receive', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')'] | Setter method for receive, mapped from YANG variable /routing_system/ipv6/receive (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_receive is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_receive() directly. | ['Setter', 'method', 'for', 'receive', 'mapped', 'from', 'YANG', 'variable', '/', 'routing_system', '/', 'ipv6', '/', 'receive', '(', 'container', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_receive', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_receive', '()', 'directly', '.'] | train | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_1_01a/routing_system/ipv6/__init__.py#L133-L154 |
3,851 | Azure/azure-event-hubs-python | azure/eventprocessorhost/eh_partition_pump.py | EventHubPartitionPump.clean_up_clients_async | async def clean_up_clients_async(self):
"""
Resets the pump swallows all exceptions.
"""
if self.partition_receiver:
if self.eh_client:
await self.eh_client.stop_async()
self.partition_receiver = None
self.partition_receive_handler = None
self.eh_client = None | python | async def clean_up_clients_async(self):
"""
Resets the pump swallows all exceptions.
"""
if self.partition_receiver:
if self.eh_client:
await self.eh_client.stop_async()
self.partition_receiver = None
self.partition_receive_handler = None
self.eh_client = None | ['async', 'def', 'clean_up_clients_async', '(', 'self', ')', ':', 'if', 'self', '.', 'partition_receiver', ':', 'if', 'self', '.', 'eh_client', ':', 'await', 'self', '.', 'eh_client', '.', 'stop_async', '(', ')', 'self', '.', 'partition_receiver', '=', 'None', 'self', '.', 'partition_receive_handler', '=', 'None', 'self', '.', 'eh_client', '=', 'None'] | Resets the pump swallows all exceptions. | ['Resets', 'the', 'pump', 'swallows', 'all', 'exceptions', '.'] | train | https://github.com/Azure/azure-event-hubs-python/blob/737c5f966557ada2cf10fa0d8f3c19671ae96348/azure/eventprocessorhost/eh_partition_pump.py#L81-L90 |
3,852 | pgmpy/pgmpy | pgmpy/factors/continuous/discretize.py | BaseDiscretizer.get_labels | def get_labels(self):
"""
Returns a list of strings representing the values about
which the discretization method calculates the probabilty
masses.
Default value is the points -
[low, low+step, low+2*step, ......... , high-step]
unless the method is overridden by a subclass.
Examples
--------
>>> from pgmpy.factors import ContinuousNode
>>> from pgmpy.discretize import BaseDiscretizer
>>> class ChildDiscretizer(BaseDiscretizer):
... def get_discrete_values(self):
... pass
>>> from scipy.stats import norm
>>> node = ContinuousNode(norm(0).pdf)
>>> child = ChildDiscretizer(node, -5, 5, 20)
>>> chld.get_labels()
['x=-5.0', 'x=-4.5', 'x=-4.0', 'x=-3.5', 'x=-3.0', 'x=-2.5',
'x=-2.0', 'x=-1.5', 'x=-1.0', 'x=-0.5', 'x=0.0', 'x=0.5', 'x=1.0',
'x=1.5', 'x=2.0', 'x=2.5', 'x=3.0', 'x=3.5', 'x=4.0', 'x=4.5']
"""
step = (self.high - self.low) / self.cardinality
labels = ['x={i}'.format(i=str(i)) for i in np.round(
np.arange(self.low, self.high, step), 3)]
return labels | python | def get_labels(self):
"""
Returns a list of strings representing the values about
which the discretization method calculates the probabilty
masses.
Default value is the points -
[low, low+step, low+2*step, ......... , high-step]
unless the method is overridden by a subclass.
Examples
--------
>>> from pgmpy.factors import ContinuousNode
>>> from pgmpy.discretize import BaseDiscretizer
>>> class ChildDiscretizer(BaseDiscretizer):
... def get_discrete_values(self):
... pass
>>> from scipy.stats import norm
>>> node = ContinuousNode(norm(0).pdf)
>>> child = ChildDiscretizer(node, -5, 5, 20)
>>> chld.get_labels()
['x=-5.0', 'x=-4.5', 'x=-4.0', 'x=-3.5', 'x=-3.0', 'x=-2.5',
'x=-2.0', 'x=-1.5', 'x=-1.0', 'x=-0.5', 'x=0.0', 'x=0.5', 'x=1.0',
'x=1.5', 'x=2.0', 'x=2.5', 'x=3.0', 'x=3.5', 'x=4.0', 'x=4.5']
"""
step = (self.high - self.low) / self.cardinality
labels = ['x={i}'.format(i=str(i)) for i in np.round(
np.arange(self.low, self.high, step), 3)]
return labels | ['def', 'get_labels', '(', 'self', ')', ':', 'step', '=', '(', 'self', '.', 'high', '-', 'self', '.', 'low', ')', '/', 'self', '.', 'cardinality', 'labels', '=', '[', "'x={i}'", '.', 'format', '(', 'i', '=', 'str', '(', 'i', ')', ')', 'for', 'i', 'in', 'np', '.', 'round', '(', 'np', '.', 'arange', '(', 'self', '.', 'low', ',', 'self', '.', 'high', ',', 'step', ')', ',', '3', ')', ']', 'return', 'labels'] | Returns a list of strings representing the values about
which the discretization method calculates the probabilty
masses.
Default value is the points -
[low, low+step, low+2*step, ......... , high-step]
unless the method is overridden by a subclass.
Examples
--------
>>> from pgmpy.factors import ContinuousNode
>>> from pgmpy.discretize import BaseDiscretizer
>>> class ChildDiscretizer(BaseDiscretizer):
... def get_discrete_values(self):
... pass
>>> from scipy.stats import norm
>>> node = ContinuousNode(norm(0).pdf)
>>> child = ChildDiscretizer(node, -5, 5, 20)
>>> chld.get_labels()
['x=-5.0', 'x=-4.5', 'x=-4.0', 'x=-3.5', 'x=-3.0', 'x=-2.5',
'x=-2.0', 'x=-1.5', 'x=-1.0', 'x=-0.5', 'x=0.0', 'x=0.5', 'x=1.0',
'x=1.5', 'x=2.0', 'x=2.5', 'x=3.0', 'x=3.5', 'x=4.0', 'x=4.5'] | ['Returns', 'a', 'list', 'of', 'strings', 'representing', 'the', 'values', 'about', 'which', 'the', 'discretization', 'method', 'calculates', 'the', 'probabilty', 'masses', '.'] | train | https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/factors/continuous/discretize.py#L67-L96 |
3,853 | saltstack/salt | salt/modules/bridge.py | _bsd_bradd | def _bsd_bradd(br):
'''
Internal, creates the bridge
'''
kernel = __grains__['kernel']
ifconfig = _tool_path('ifconfig')
if not br:
return False
if __salt__['cmd.retcode']('{0} {1} create up'.format(ifconfig, br),
python_shell=False) != 0:
return False
# NetBSD is two cmds
if kernel == 'NetBSD':
brconfig = _tool_path('brconfig')
if __salt__['cmd.retcode']('{0} {1} up'.format(brconfig, br),
python_shell=False) != 0:
return False
return True | python | def _bsd_bradd(br):
'''
Internal, creates the bridge
'''
kernel = __grains__['kernel']
ifconfig = _tool_path('ifconfig')
if not br:
return False
if __salt__['cmd.retcode']('{0} {1} create up'.format(ifconfig, br),
python_shell=False) != 0:
return False
# NetBSD is two cmds
if kernel == 'NetBSD':
brconfig = _tool_path('brconfig')
if __salt__['cmd.retcode']('{0} {1} up'.format(brconfig, br),
python_shell=False) != 0:
return False
return True | ['def', '_bsd_bradd', '(', 'br', ')', ':', 'kernel', '=', '__grains__', '[', "'kernel'", ']', 'ifconfig', '=', '_tool_path', '(', "'ifconfig'", ')', 'if', 'not', 'br', ':', 'return', 'False', 'if', '__salt__', '[', "'cmd.retcode'", ']', '(', "'{0} {1} create up'", '.', 'format', '(', 'ifconfig', ',', 'br', ')', ',', 'python_shell', '=', 'False', ')', '!=', '0', ':', 'return', 'False', '# NetBSD is two cmds', 'if', 'kernel', '==', "'NetBSD'", ':', 'brconfig', '=', '_tool_path', '(', "'brconfig'", ')', 'if', '__salt__', '[', "'cmd.retcode'", ']', '(', "'{0} {1} up'", '.', 'format', '(', 'brconfig', ',', 'br', ')', ',', 'python_shell', '=', 'False', ')', '!=', '0', ':', 'return', 'False', 'return', 'True'] | Internal, creates the bridge | ['Internal', 'creates', 'the', 'bridge'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/bridge.py#L217-L238 |
3,854 | influxdata/influxdb-python | influxdb/client.py | InfluxDBClient.grant_privilege | def grant_privilege(self, privilege, database, username):
"""Grant a privilege on a database to a user.
:param privilege: the privilege to grant, one of 'read', 'write'
or 'all'. The string is case-insensitive
:type privilege: str
:param database: the database to grant the privilege on
:type database: str
:param username: the username to grant the privilege to
:type username: str
"""
text = "GRANT {0} ON {1} TO {2}".format(privilege,
quote_ident(database),
quote_ident(username))
self.query(text, method="POST") | python | def grant_privilege(self, privilege, database, username):
"""Grant a privilege on a database to a user.
:param privilege: the privilege to grant, one of 'read', 'write'
or 'all'. The string is case-insensitive
:type privilege: str
:param database: the database to grant the privilege on
:type database: str
:param username: the username to grant the privilege to
:type username: str
"""
text = "GRANT {0} ON {1} TO {2}".format(privilege,
quote_ident(database),
quote_ident(username))
self.query(text, method="POST") | ['def', 'grant_privilege', '(', 'self', ',', 'privilege', ',', 'database', ',', 'username', ')', ':', 'text', '=', '"GRANT {0} ON {1} TO {2}"', '.', 'format', '(', 'privilege', ',', 'quote_ident', '(', 'database', ')', ',', 'quote_ident', '(', 'username', ')', ')', 'self', '.', 'query', '(', 'text', ',', 'method', '=', '"POST"', ')'] | Grant a privilege on a database to a user.
:param privilege: the privilege to grant, one of 'read', 'write'
or 'all'. The string is case-insensitive
:type privilege: str
:param database: the database to grant the privilege on
:type database: str
:param username: the username to grant the privilege to
:type username: str | ['Grant', 'a', 'privilege', 'on', 'a', 'database', 'to', 'a', 'user', '.'] | train | https://github.com/influxdata/influxdb-python/blob/d5d12499f3755199d5eedd8b363450f1cf4073bd/influxdb/client.py#L890-L904 |
3,855 | TC01/calcpkg | calcrepo/calcpkg.py | main | def main():
"""Core function for the script"""
commands = ['update', 'list', 'get', 'info', 'count', 'search', 'download']
parser = argparse.ArgumentParser(description="Command line access to software repositories for TI calculators, primarily ticalc.org and Cemetech")
parser.add_argument("action", metavar="ACTION", type=str, help="The calcpkg command to execute (count, get, info, list, update)")
parser.add_argument("string", metavar="STRING", type=str, help="The string to search for when using count, get, info, or list commands", nargs="?", default="")
parser.add_argument("-c", "--category", dest="category", help="Limit searching to a specified category", default="")
parser.add_argument("-e", "--extension", dest="extension", help="Limit searching to a specified file extension", default="")
parser.add_argument("-f", "--filename", dest="searchFiles", action="store_true", help="Search by archive filenames rather than descriptive package name")
parser.add_argument("-g", "--game", dest="game", action="store_true", help="Limit searching to games only")
parser.add_argument("-m", "--math", dest="math", action="store_true", help="Limit searching to math and science programs only")
parser.add_argument("-r", "--repository", dest="repo", help="Limit searching by one repository- default is to use all", default="")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="Always provide verbose output")
parser.add_argument("-x", "--extract", dest="extract", action="store_true", help="After downloading, autoextract archive files when possible")
parser.add_argument("-y", "--assume-yes", dest="prompt", action="store_false", help="Never prompt for verification of command")
args = parser.parse_args()
#Verify that a valid command was specified
if not args.action in commands:
print "Error: Invalid action specified, action must be one of " + str(commands)
return
#args.category is special
if args.category != "":
category = "/" + args.category + "/"
else:
category = ""
#Initialize repositories; all behind-the-scene processing is done by plugins in calcrepo.repos
repositories = createRepoObjects()
if args.repo != "":
for repoName, repository in repositories.iteritems():
if repoName != args.repo:
repositories[repoName] = None
#Now, run commands for each repo
for name, repository in repositories.iteritems():
if repository != None:
repository.setRepoData(args.string, category, args.extension, args.math, args.game, args.searchFiles)
if args.action == "update":
repository.updateRepoIndexes(args.verbose)
elif (args.action == "list" or args.action == "search"):
repository.searchIndex()
elif (args.action == "get" or args.action == "download"):
repository.searchIndex()
repository.downloadFiles(args.prompt, args.extract)
elif args.action == "info":
repository.getFileInfos()
elif args.action == "count":
repository.countIndex() | python | def main():
"""Core function for the script"""
commands = ['update', 'list', 'get', 'info', 'count', 'search', 'download']
parser = argparse.ArgumentParser(description="Command line access to software repositories for TI calculators, primarily ticalc.org and Cemetech")
parser.add_argument("action", metavar="ACTION", type=str, help="The calcpkg command to execute (count, get, info, list, update)")
parser.add_argument("string", metavar="STRING", type=str, help="The string to search for when using count, get, info, or list commands", nargs="?", default="")
parser.add_argument("-c", "--category", dest="category", help="Limit searching to a specified category", default="")
parser.add_argument("-e", "--extension", dest="extension", help="Limit searching to a specified file extension", default="")
parser.add_argument("-f", "--filename", dest="searchFiles", action="store_true", help="Search by archive filenames rather than descriptive package name")
parser.add_argument("-g", "--game", dest="game", action="store_true", help="Limit searching to games only")
parser.add_argument("-m", "--math", dest="math", action="store_true", help="Limit searching to math and science programs only")
parser.add_argument("-r", "--repository", dest="repo", help="Limit searching by one repository- default is to use all", default="")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="Always provide verbose output")
parser.add_argument("-x", "--extract", dest="extract", action="store_true", help="After downloading, autoextract archive files when possible")
parser.add_argument("-y", "--assume-yes", dest="prompt", action="store_false", help="Never prompt for verification of command")
args = parser.parse_args()
#Verify that a valid command was specified
if not args.action in commands:
print "Error: Invalid action specified, action must be one of " + str(commands)
return
#args.category is special
if args.category != "":
category = "/" + args.category + "/"
else:
category = ""
#Initialize repositories; all behind-the-scene processing is done by plugins in calcrepo.repos
repositories = createRepoObjects()
if args.repo != "":
for repoName, repository in repositories.iteritems():
if repoName != args.repo:
repositories[repoName] = None
#Now, run commands for each repo
for name, repository in repositories.iteritems():
if repository != None:
repository.setRepoData(args.string, category, args.extension, args.math, args.game, args.searchFiles)
if args.action == "update":
repository.updateRepoIndexes(args.verbose)
elif (args.action == "list" or args.action == "search"):
repository.searchIndex()
elif (args.action == "get" or args.action == "download"):
repository.searchIndex()
repository.downloadFiles(args.prompt, args.extract)
elif args.action == "info":
repository.getFileInfos()
elif args.action == "count":
repository.countIndex() | ['def', 'main', '(', ')', ':', 'commands', '=', '[', "'update'", ',', "'list'", ',', "'get'", ',', "'info'", ',', "'count'", ',', "'search'", ',', "'download'", ']', 'parser', '=', 'argparse', '.', 'ArgumentParser', '(', 'description', '=', '"Command line access to software repositories for TI calculators, primarily ticalc.org and Cemetech"', ')', 'parser', '.', 'add_argument', '(', '"action"', ',', 'metavar', '=', '"ACTION"', ',', 'type', '=', 'str', ',', 'help', '=', '"The calcpkg command to execute (count, get, info, list, update)"', ')', 'parser', '.', 'add_argument', '(', '"string"', ',', 'metavar', '=', '"STRING"', ',', 'type', '=', 'str', ',', 'help', '=', '"The string to search for when using count, get, info, or list commands"', ',', 'nargs', '=', '"?"', ',', 'default', '=', '""', ')', 'parser', '.', 'add_argument', '(', '"-c"', ',', '"--category"', ',', 'dest', '=', '"category"', ',', 'help', '=', '"Limit searching to a specified category"', ',', 'default', '=', '""', ')', 'parser', '.', 'add_argument', '(', '"-e"', ',', '"--extension"', ',', 'dest', '=', '"extension"', ',', 'help', '=', '"Limit searching to a specified file extension"', ',', 'default', '=', '""', ')', 'parser', '.', 'add_argument', '(', '"-f"', ',', '"--filename"', ',', 'dest', '=', '"searchFiles"', ',', 'action', '=', '"store_true"', ',', 'help', '=', '"Search by archive filenames rather than descriptive package name"', ')', 'parser', '.', 'add_argument', '(', '"-g"', ',', '"--game"', ',', 'dest', '=', '"game"', ',', 'action', '=', '"store_true"', ',', 'help', '=', '"Limit searching to games only"', ')', 'parser', '.', 'add_argument', '(', '"-m"', ',', '"--math"', ',', 'dest', '=', '"math"', ',', 'action', '=', '"store_true"', ',', 'help', '=', '"Limit searching to math and science programs only"', ')', 'parser', '.', 'add_argument', '(', '"-r"', ',', '"--repository"', ',', 'dest', '=', '"repo"', ',', 'help', '=', '"Limit searching by one repository- default is to use all"', ',', 'default', '=', '""', ')', 'parser', '.', 'add_argument', '(', '"-v"', ',', '"--verbose"', ',', 'dest', '=', '"verbose"', ',', 'action', '=', '"store_true"', ',', 'help', '=', '"Always provide verbose output"', ')', 'parser', '.', 'add_argument', '(', '"-x"', ',', '"--extract"', ',', 'dest', '=', '"extract"', ',', 'action', '=', '"store_true"', ',', 'help', '=', '"After downloading, autoextract archive files when possible"', ')', 'parser', '.', 'add_argument', '(', '"-y"', ',', '"--assume-yes"', ',', 'dest', '=', '"prompt"', ',', 'action', '=', '"store_false"', ',', 'help', '=', '"Never prompt for verification of command"', ')', 'args', '=', 'parser', '.', 'parse_args', '(', ')', '#Verify that a valid command was specified', 'if', 'not', 'args', '.', 'action', 'in', 'commands', ':', 'print', '"Error: Invalid action specified, action must be one of "', '+', 'str', '(', 'commands', ')', 'return', '#args.category is special', 'if', 'args', '.', 'category', '!=', '""', ':', 'category', '=', '"/"', '+', 'args', '.', 'category', '+', '"/"', 'else', ':', 'category', '=', '""', '#Initialize repositories; all behind-the-scene processing is done by plugins in calcrepo.repos', 'repositories', '=', 'createRepoObjects', '(', ')', 'if', 'args', '.', 'repo', '!=', '""', ':', 'for', 'repoName', ',', 'repository', 'in', 'repositories', '.', 'iteritems', '(', ')', ':', 'if', 'repoName', '!=', 'args', '.', 'repo', ':', 'repositories', '[', 'repoName', ']', '=', 'None', '#Now, run commands for each repo\t\t', 'for', 'name', ',', 'repository', 'in', 'repositories', '.', 'iteritems', '(', ')', ':', 'if', 'repository', '!=', 'None', ':', 'repository', '.', 'setRepoData', '(', 'args', '.', 'string', ',', 'category', ',', 'args', '.', 'extension', ',', 'args', '.', 'math', ',', 'args', '.', 'game', ',', 'args', '.', 'searchFiles', ')', 'if', 'args', '.', 'action', '==', '"update"', ':', 'repository', '.', 'updateRepoIndexes', '(', 'args', '.', 'verbose', ')', 'elif', '(', 'args', '.', 'action', '==', '"list"', 'or', 'args', '.', 'action', '==', '"search"', ')', ':', 'repository', '.', 'searchIndex', '(', ')', 'elif', '(', 'args', '.', 'action', '==', '"get"', 'or', 'args', '.', 'action', '==', '"download"', ')', ':', 'repository', '.', 'searchIndex', '(', ')', 'repository', '.', 'downloadFiles', '(', 'args', '.', 'prompt', ',', 'args', '.', 'extract', ')', 'elif', 'args', '.', 'action', '==', '"info"', ':', 'repository', '.', 'getFileInfos', '(', ')', 'elif', 'args', '.', 'action', '==', '"count"', ':', 'repository', '.', 'countIndex', '(', ')'] | Core function for the script | ['Core', 'function', 'for', 'the', 'script'] | train | https://github.com/TC01/calcpkg/blob/5168f606264620a090b42a64354331d208b00d5f/calcrepo/calcpkg.py#L12-L62 |
3,856 | secdev/scapy | scapy/layers/tls/tools.py | _tls_aead_auth_decrypt | def _tls_aead_auth_decrypt(alg, c, read_seq_num):
"""
Provided with a TLSCiphertext instance c, the function applies AEAD
cipher alg auth_decrypt function to c.data (and additional data)
in order to authenticate the data and decrypt c.data. When those
steps succeed, the result is a newly created TLSCompressed instance.
On error, None is returned. Note that it is the caller's responsibility to
increment read_seq_num afterwards.
"""
# 'Deduce' TLSCompressed length from TLSCiphertext length
# There is actually no guaranty of this equality, but this is defined as
# such in TLS 1.2 specifications, and it works for GCM and CCM at least.
#
plen = c.len - getattr(alg, "nonce_explicit_len", 0) - alg.tag_len
read_seq_num = struct.pack("!Q", read_seq_num)
A = read_seq_num + struct.pack('!BHH', c.type, c.version, plen)
p = TLSCompressed()
p.type = c.type
p.version = c.version
p.len = plen
p.data = alg.auth_decrypt(A, c.data, read_seq_num)
if p.data is None: # Verification failed.
return None
return p | python | def _tls_aead_auth_decrypt(alg, c, read_seq_num):
"""
Provided with a TLSCiphertext instance c, the function applies AEAD
cipher alg auth_decrypt function to c.data (and additional data)
in order to authenticate the data and decrypt c.data. When those
steps succeed, the result is a newly created TLSCompressed instance.
On error, None is returned. Note that it is the caller's responsibility to
increment read_seq_num afterwards.
"""
# 'Deduce' TLSCompressed length from TLSCiphertext length
# There is actually no guaranty of this equality, but this is defined as
# such in TLS 1.2 specifications, and it works for GCM and CCM at least.
#
plen = c.len - getattr(alg, "nonce_explicit_len", 0) - alg.tag_len
read_seq_num = struct.pack("!Q", read_seq_num)
A = read_seq_num + struct.pack('!BHH', c.type, c.version, plen)
p = TLSCompressed()
p.type = c.type
p.version = c.version
p.len = plen
p.data = alg.auth_decrypt(A, c.data, read_seq_num)
if p.data is None: # Verification failed.
return None
return p | ['def', '_tls_aead_auth_decrypt', '(', 'alg', ',', 'c', ',', 'read_seq_num', ')', ':', "# 'Deduce' TLSCompressed length from TLSCiphertext length", '# There is actually no guaranty of this equality, but this is defined as', '# such in TLS 1.2 specifications, and it works for GCM and CCM at least.', '#', 'plen', '=', 'c', '.', 'len', '-', 'getattr', '(', 'alg', ',', '"nonce_explicit_len"', ',', '0', ')', '-', 'alg', '.', 'tag_len', 'read_seq_num', '=', 'struct', '.', 'pack', '(', '"!Q"', ',', 'read_seq_num', ')', 'A', '=', 'read_seq_num', '+', 'struct', '.', 'pack', '(', "'!BHH'", ',', 'c', '.', 'type', ',', 'c', '.', 'version', ',', 'plen', ')', 'p', '=', 'TLSCompressed', '(', ')', 'p', '.', 'type', '=', 'c', '.', 'type', 'p', '.', 'version', '=', 'c', '.', 'version', 'p', '.', 'len', '=', 'plen', 'p', '.', 'data', '=', 'alg', '.', 'auth_decrypt', '(', 'A', ',', 'c', '.', 'data', ',', 'read_seq_num', ')', 'if', 'p', '.', 'data', 'is', 'None', ':', '# Verification failed.', 'return', 'None', 'return', 'p'] | Provided with a TLSCiphertext instance c, the function applies AEAD
cipher alg auth_decrypt function to c.data (and additional data)
in order to authenticate the data and decrypt c.data. When those
steps succeed, the result is a newly created TLSCompressed instance.
On error, None is returned. Note that it is the caller's responsibility to
increment read_seq_num afterwards. | ['Provided', 'with', 'a', 'TLSCiphertext', 'instance', 'c', 'the', 'function', 'applies', 'AEAD', 'cipher', 'alg', 'auth_decrypt', 'function', 'to', 'c', '.', 'data', '(', 'and', 'additional', 'data', ')', 'in', 'order', 'to', 'authenticate', 'the', 'data', 'and', 'decrypt', 'c', '.', 'data', '.', 'When', 'those', 'steps', 'succeed', 'the', 'result', 'is', 'a', 'newly', 'created', 'TLSCompressed', 'instance', '.', 'On', 'error', 'None', 'is', 'returned', '.', 'Note', 'that', 'it', 'is', 'the', 'caller', 's', 'responsibility', 'to', 'increment', 'read_seq_num', 'afterwards', '.'] | train | https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/tls/tools.py#L200-L225 |
3,857 | mikedh/trimesh | trimesh/graph.py | smoothed | def smoothed(mesh, angle):
"""
Return a non- watertight version of the mesh which will
render nicely with smooth shading by disconnecting faces
at sharp angles to each other.
Parameters
---------
mesh : trimesh.Trimesh
Source geometry
angle : float
Angle in radians, adjacent faces which have normals
below this angle will be smoothed
Returns
---------
smooth : trimesh.Trimesh
Geometry with disconnected face patches
"""
# if the mesh has no adjacent faces return a copy
if len(mesh.face_adjacency) == 0:
return mesh.copy()
# face pairs below angle threshold
angle_ok = mesh.face_adjacency_angles <= angle
# subset of face adjacency
adjacency = mesh.face_adjacency[angle_ok]
# list of connected groups of faces
components = connected_components(adjacency,
min_len=1,
nodes=np.arange(len(mesh.faces)))
# get a submesh as a single appended Trimesh
smooth = mesh.submesh(components,
only_watertight=False,
append=True)
return smooth | python | def smoothed(mesh, angle):
"""
Return a non- watertight version of the mesh which will
render nicely with smooth shading by disconnecting faces
at sharp angles to each other.
Parameters
---------
mesh : trimesh.Trimesh
Source geometry
angle : float
Angle in radians, adjacent faces which have normals
below this angle will be smoothed
Returns
---------
smooth : trimesh.Trimesh
Geometry with disconnected face patches
"""
# if the mesh has no adjacent faces return a copy
if len(mesh.face_adjacency) == 0:
return mesh.copy()
# face pairs below angle threshold
angle_ok = mesh.face_adjacency_angles <= angle
# subset of face adjacency
adjacency = mesh.face_adjacency[angle_ok]
# list of connected groups of faces
components = connected_components(adjacency,
min_len=1,
nodes=np.arange(len(mesh.faces)))
# get a submesh as a single appended Trimesh
smooth = mesh.submesh(components,
only_watertight=False,
append=True)
return smooth | ['def', 'smoothed', '(', 'mesh', ',', 'angle', ')', ':', '# if the mesh has no adjacent faces return a copy', 'if', 'len', '(', 'mesh', '.', 'face_adjacency', ')', '==', '0', ':', 'return', 'mesh', '.', 'copy', '(', ')', '# face pairs below angle threshold', 'angle_ok', '=', 'mesh', '.', 'face_adjacency_angles', '<=', 'angle', '# subset of face adjacency', 'adjacency', '=', 'mesh', '.', 'face_adjacency', '[', 'angle_ok', ']', '# list of connected groups of faces', 'components', '=', 'connected_components', '(', 'adjacency', ',', 'min_len', '=', '1', ',', 'nodes', '=', 'np', '.', 'arange', '(', 'len', '(', 'mesh', '.', 'faces', ')', ')', ')', '# get a submesh as a single appended Trimesh', 'smooth', '=', 'mesh', '.', 'submesh', '(', 'components', ',', 'only_watertight', '=', 'False', ',', 'append', '=', 'True', ')', 'return', 'smooth'] | Return a non- watertight version of the mesh which will
render nicely with smooth shading by disconnecting faces
at sharp angles to each other.
Parameters
---------
mesh : trimesh.Trimesh
Source geometry
angle : float
Angle in radians, adjacent faces which have normals
below this angle will be smoothed
Returns
---------
smooth : trimesh.Trimesh
Geometry with disconnected face patches | ['Return', 'a', 'non', '-', 'watertight', 'version', 'of', 'the', 'mesh', 'which', 'will', 'render', 'nicely', 'with', 'smooth', 'shading', 'by', 'disconnecting', 'faces', 'at', 'sharp', 'angles', 'to', 'each', 'other', '.'] | train | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/graph.py#L728-L763 |
3,858 | PlaidWeb/Publ | publ/queries.py | where_entry_visible | def where_entry_visible(query, date=None):
""" Generate a where clause for currently-visible entries
Arguments:
date -- The date to generate it relative to (defaults to right now)
"""
return orm.select(
e for e in query
if e.status == model.PublishStatus.PUBLISHED.value or
(e.status == model.PublishStatus.SCHEDULED.value and
(e.utc_date <= (date or arrow.utcnow().datetime))
)
) | python | def where_entry_visible(query, date=None):
""" Generate a where clause for currently-visible entries
Arguments:
date -- The date to generate it relative to (defaults to right now)
"""
return orm.select(
e for e in query
if e.status == model.PublishStatus.PUBLISHED.value or
(e.status == model.PublishStatus.SCHEDULED.value and
(e.utc_date <= (date or arrow.utcnow().datetime))
)
) | ['def', 'where_entry_visible', '(', 'query', ',', 'date', '=', 'None', ')', ':', 'return', 'orm', '.', 'select', '(', 'e', 'for', 'e', 'in', 'query', 'if', 'e', '.', 'status', '==', 'model', '.', 'PublishStatus', '.', 'PUBLISHED', '.', 'value', 'or', '(', 'e', '.', 'status', '==', 'model', '.', 'PublishStatus', '.', 'SCHEDULED', '.', 'value', 'and', '(', 'e', '.', 'utc_date', '<=', '(', 'date', 'or', 'arrow', '.', 'utcnow', '(', ')', '.', 'datetime', ')', ')', ')', ')'] | Generate a where clause for currently-visible entries
Arguments:
date -- The date to generate it relative to (defaults to right now) | ['Generate', 'a', 'where', 'clause', 'for', 'currently', '-', 'visible', 'entries'] | train | https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/queries.py#L10-L24 |
3,859 | roboogle/gtkmvc3 | gtkmvco/gtkmvc3/support/metaclasses.py | PropertyMeta.__create_log_props | def __create_log_props(cls, log_props, _getdict, _setdict): # @NoSelf
"""Creates all the logical property.
The list of names of properties to be created is passed
with frozenset log_props. The getter/setter information is
taken from _{get,set}dict.
This method resolves also wildcards in names, and performs
all checks to ensure correctness.
Returns the frozen set of the actually created properties
(as not log_props may be really created, e.g. when no
getter is provided, and a warning is issued).
"""
real_log_props = set()
resolved_getdict = {}
resolved_setdict = {}
for _dict_name, _dict, _resolved_dict in (
("getter", _getdict, resolved_getdict),
("setter", _setdict, resolved_setdict)):
# first resolve all wildcards
for pat, ai in ((pat, ai)
for pat, ai in _dict.items()
if frozenset(pat) & WILDCARDS):
matches = fnmatch.filter(log_props, pat)
for match in matches:
if match in _resolved_dict:
raise NameError("In class %s.%s %s property '%s' "
"is matched multiple times"
" by patterns" % \
(cls.__module__, cls.__name__, _dict_name, match))
_resolved_dict[match] = ai
if not matches:
logger.warning("In class %s.%s %s pattern '%s' "
"did not match any existing "
"logical property",
cls.__module__, cls.__name__, _dict_name, pat)
# now adds the exact matches (no wilcards) which override
# the pattern-matches
_resolved_dict.update((name, ai)
for name, ai in _dict.items()
if name in log_props)
# checks that all getter/setter have a corresponding logical
# property
not_found = [name for name in _resolved_dict
if name not in log_props]
if not_found:
logger.warning("In class %s.%s logical %s were declared for "
"non-existent observables: %s",
cls.__module__, cls.__name__, _dict_name,
str(not_found))
# creates the properties
for name in log_props:
# finds the getter
ai_get = resolved_getdict.get(name, None)
if ai_get:
# decorator-based
_getter = type(cls).get_getter(cls, name, ai_get.func,
ai_get.has_args)
_deps = ai_get.deps
else:
# old style
_getter = type(cls).get_getter(cls, name)
if _getter is None:
raise RuntimeError("In class %s.%s "
"logical observable '%s' "
"has no getter method" % \
(cls.__module__, cls.__name__, name))
_deps = type(cls)._get_old_style_getter_deps(cls, name,
_getter)
# finds the setter
ai_set = resolved_setdict.get(name, None)
if ai_set:
# decorator-based
if ai_get:
_setter = type(cls).get_setter(cls, name,
ai_set.func, ai_set.has_args,
ai_get.func, ai_get.has_args)
else:
# the getter is old style. _getter is already
# resolved wrt the name it may take, so
# getter_takes_name is False
_setter = type(cls).get_setter(cls, name,
ai_set.func, ai_set.has_args,
_getter, False)
else:
# old style setter
if ai_get:
_setter = type(cls).get_setter(cls, name,
None, None,
ai_get.func,
ai_get.has_args)
else:
_setter = type(cls).get_setter(cls, name)
# creates the logical property, here _setter can be None
prop = PropertyMeta.LogicalOP(_getter, _setter, frozenset(_deps))
setattr(cls, name, prop)
real_log_props.add(name)
# checks that all setters have a getter
setters_no_getters = (set(resolved_setdict) - real_log_props) & \
log_props
if setters_no_getters:
logger.warning("In class %s.%s logical setters have no "
"getters: %s",
cls.__module__, cls.__name__,
", ".join(setters_no_getters))
return frozenset(real_log_props) | python | def __create_log_props(cls, log_props, _getdict, _setdict): # @NoSelf
"""Creates all the logical property.
The list of names of properties to be created is passed
with frozenset log_props. The getter/setter information is
taken from _{get,set}dict.
This method resolves also wildcards in names, and performs
all checks to ensure correctness.
Returns the frozen set of the actually created properties
(as not log_props may be really created, e.g. when no
getter is provided, and a warning is issued).
"""
real_log_props = set()
resolved_getdict = {}
resolved_setdict = {}
for _dict_name, _dict, _resolved_dict in (
("getter", _getdict, resolved_getdict),
("setter", _setdict, resolved_setdict)):
# first resolve all wildcards
for pat, ai in ((pat, ai)
for pat, ai in _dict.items()
if frozenset(pat) & WILDCARDS):
matches = fnmatch.filter(log_props, pat)
for match in matches:
if match in _resolved_dict:
raise NameError("In class %s.%s %s property '%s' "
"is matched multiple times"
" by patterns" % \
(cls.__module__, cls.__name__, _dict_name, match))
_resolved_dict[match] = ai
if not matches:
logger.warning("In class %s.%s %s pattern '%s' "
"did not match any existing "
"logical property",
cls.__module__, cls.__name__, _dict_name, pat)
# now adds the exact matches (no wilcards) which override
# the pattern-matches
_resolved_dict.update((name, ai)
for name, ai in _dict.items()
if name in log_props)
# checks that all getter/setter have a corresponding logical
# property
not_found = [name for name in _resolved_dict
if name not in log_props]
if not_found:
logger.warning("In class %s.%s logical %s were declared for "
"non-existent observables: %s",
cls.__module__, cls.__name__, _dict_name,
str(not_found))
# creates the properties
for name in log_props:
# finds the getter
ai_get = resolved_getdict.get(name, None)
if ai_get:
# decorator-based
_getter = type(cls).get_getter(cls, name, ai_get.func,
ai_get.has_args)
_deps = ai_get.deps
else:
# old style
_getter = type(cls).get_getter(cls, name)
if _getter is None:
raise RuntimeError("In class %s.%s "
"logical observable '%s' "
"has no getter method" % \
(cls.__module__, cls.__name__, name))
_deps = type(cls)._get_old_style_getter_deps(cls, name,
_getter)
# finds the setter
ai_set = resolved_setdict.get(name, None)
if ai_set:
# decorator-based
if ai_get:
_setter = type(cls).get_setter(cls, name,
ai_set.func, ai_set.has_args,
ai_get.func, ai_get.has_args)
else:
# the getter is old style. _getter is already
# resolved wrt the name it may take, so
# getter_takes_name is False
_setter = type(cls).get_setter(cls, name,
ai_set.func, ai_set.has_args,
_getter, False)
else:
# old style setter
if ai_get:
_setter = type(cls).get_setter(cls, name,
None, None,
ai_get.func,
ai_get.has_args)
else:
_setter = type(cls).get_setter(cls, name)
# creates the logical property, here _setter can be None
prop = PropertyMeta.LogicalOP(_getter, _setter, frozenset(_deps))
setattr(cls, name, prop)
real_log_props.add(name)
# checks that all setters have a getter
setters_no_getters = (set(resolved_setdict) - real_log_props) & \
log_props
if setters_no_getters:
logger.warning("In class %s.%s logical setters have no "
"getters: %s",
cls.__module__, cls.__name__,
", ".join(setters_no_getters))
return frozenset(real_log_props) | ['def', '__create_log_props', '(', 'cls', ',', 'log_props', ',', '_getdict', ',', '_setdict', ')', ':', '# @NoSelf', 'real_log_props', '=', 'set', '(', ')', 'resolved_getdict', '=', '{', '}', 'resolved_setdict', '=', '{', '}', 'for', '_dict_name', ',', '_dict', ',', '_resolved_dict', 'in', '(', '(', '"getter"', ',', '_getdict', ',', 'resolved_getdict', ')', ',', '(', '"setter"', ',', '_setdict', ',', 'resolved_setdict', ')', ')', ':', '# first resolve all wildcards', 'for', 'pat', ',', 'ai', 'in', '(', '(', 'pat', ',', 'ai', ')', 'for', 'pat', ',', 'ai', 'in', '_dict', '.', 'items', '(', ')', 'if', 'frozenset', '(', 'pat', ')', '&', 'WILDCARDS', ')', ':', 'matches', '=', 'fnmatch', '.', 'filter', '(', 'log_props', ',', 'pat', ')', 'for', 'match', 'in', 'matches', ':', 'if', 'match', 'in', '_resolved_dict', ':', 'raise', 'NameError', '(', '"In class %s.%s %s property \'%s\' "', '"is matched multiple times"', '" by patterns"', '%', '(', 'cls', '.', '__module__', ',', 'cls', '.', '__name__', ',', '_dict_name', ',', 'match', ')', ')', '_resolved_dict', '[', 'match', ']', '=', 'ai', 'if', 'not', 'matches', ':', 'logger', '.', 'warning', '(', '"In class %s.%s %s pattern \'%s\' "', '"did not match any existing "', '"logical property"', ',', 'cls', '.', '__module__', ',', 'cls', '.', '__name__', ',', '_dict_name', ',', 'pat', ')', '# now adds the exact matches (no wilcards) which override', '# the pattern-matches', '_resolved_dict', '.', 'update', '(', '(', 'name', ',', 'ai', ')', 'for', 'name', ',', 'ai', 'in', '_dict', '.', 'items', '(', ')', 'if', 'name', 'in', 'log_props', ')', '# checks that all getter/setter have a corresponding logical', '# property', 'not_found', '=', '[', 'name', 'for', 'name', 'in', '_resolved_dict', 'if', 'name', 'not', 'in', 'log_props', ']', 'if', 'not_found', ':', 'logger', '.', 'warning', '(', '"In class %s.%s logical %s were declared for "', '"non-existent observables: %s"', ',', 'cls', '.', '__module__', ',', 'cls', '.', '__name__', ',', '_dict_name', ',', 'str', '(', 'not_found', ')', ')', '# creates the properties', 'for', 'name', 'in', 'log_props', ':', '# finds the getter', 'ai_get', '=', 'resolved_getdict', '.', 'get', '(', 'name', ',', 'None', ')', 'if', 'ai_get', ':', '# decorator-based', '_getter', '=', 'type', '(', 'cls', ')', '.', 'get_getter', '(', 'cls', ',', 'name', ',', 'ai_get', '.', 'func', ',', 'ai_get', '.', 'has_args', ')', '_deps', '=', 'ai_get', '.', 'deps', 'else', ':', '# old style', '_getter', '=', 'type', '(', 'cls', ')', '.', 'get_getter', '(', 'cls', ',', 'name', ')', 'if', '_getter', 'is', 'None', ':', 'raise', 'RuntimeError', '(', '"In class %s.%s "', '"logical observable \'%s\' "', '"has no getter method"', '%', '(', 'cls', '.', '__module__', ',', 'cls', '.', '__name__', ',', 'name', ')', ')', '_deps', '=', 'type', '(', 'cls', ')', '.', '_get_old_style_getter_deps', '(', 'cls', ',', 'name', ',', '_getter', ')', '# finds the setter', 'ai_set', '=', 'resolved_setdict', '.', 'get', '(', 'name', ',', 'None', ')', 'if', 'ai_set', ':', '# decorator-based', 'if', 'ai_get', ':', '_setter', '=', 'type', '(', 'cls', ')', '.', 'get_setter', '(', 'cls', ',', 'name', ',', 'ai_set', '.', 'func', ',', 'ai_set', '.', 'has_args', ',', 'ai_get', '.', 'func', ',', 'ai_get', '.', 'has_args', ')', 'else', ':', '# the getter is old style. _getter is already', '# resolved wrt the name it may take, so', '# getter_takes_name is False', '_setter', '=', 'type', '(', 'cls', ')', '.', 'get_setter', '(', 'cls', ',', 'name', ',', 'ai_set', '.', 'func', ',', 'ai_set', '.', 'has_args', ',', '_getter', ',', 'False', ')', 'else', ':', '# old style setter', 'if', 'ai_get', ':', '_setter', '=', 'type', '(', 'cls', ')', '.', 'get_setter', '(', 'cls', ',', 'name', ',', 'None', ',', 'None', ',', 'ai_get', '.', 'func', ',', 'ai_get', '.', 'has_args', ')', 'else', ':', '_setter', '=', 'type', '(', 'cls', ')', '.', 'get_setter', '(', 'cls', ',', 'name', ')', '# creates the logical property, here _setter can be None', 'prop', '=', 'PropertyMeta', '.', 'LogicalOP', '(', '_getter', ',', '_setter', ',', 'frozenset', '(', '_deps', ')', ')', 'setattr', '(', 'cls', ',', 'name', ',', 'prop', ')', 'real_log_props', '.', 'add', '(', 'name', ')', '# checks that all setters have a getter', 'setters_no_getters', '=', '(', 'set', '(', 'resolved_setdict', ')', '-', 'real_log_props', ')', '&', 'log_props', 'if', 'setters_no_getters', ':', 'logger', '.', 'warning', '(', '"In class %s.%s logical setters have no "', '"getters: %s"', ',', 'cls', '.', '__module__', ',', 'cls', '.', '__name__', ',', '", "', '.', 'join', '(', 'setters_no_getters', ')', ')', 'return', 'frozenset', '(', 'real_log_props', ')'] | Creates all the logical property.
The list of names of properties to be created is passed
with frozenset log_props. The getter/setter information is
taken from _{get,set}dict.
This method resolves also wildcards in names, and performs
all checks to ensure correctness.
Returns the frozen set of the actually created properties
(as not log_props may be really created, e.g. when no
getter is provided, and a warning is issued). | ['Creates', 'all', 'the', 'logical', 'property', '.'] | train | https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/gtkmvc3/support/metaclasses.py#L225-L342 |
3,860 | jaredLunde/vital-tools | vital/debug/__init__.py | ProgressBar.finish | def finish(self):
""" Resets the progress bar and clears it from the terminal """
pct = floor(round(self.progress/self.size, 2)*100)
pr = floor(pct*.33)
bar = "".join([" " for x in range(pr-1)] + ["↦"])
subprogress = self.format_parent_bar() if self.parent_bar else ""
fin = "Loading{} ={}{} ({}%)".format(subprogress, bar, "ӿ", pct)
print(fin.ljust(len(fin)+5), end="\r")
time.sleep(0.10)
print("\033[K\033[1A")
self.progress = 0 | python | def finish(self):
""" Resets the progress bar and clears it from the terminal """
pct = floor(round(self.progress/self.size, 2)*100)
pr = floor(pct*.33)
bar = "".join([" " for x in range(pr-1)] + ["↦"])
subprogress = self.format_parent_bar() if self.parent_bar else ""
fin = "Loading{} ={}{} ({}%)".format(subprogress, bar, "ӿ", pct)
print(fin.ljust(len(fin)+5), end="\r")
time.sleep(0.10)
print("\033[K\033[1A")
self.progress = 0 | ['def', 'finish', '(', 'self', ')', ':', 'pct', '=', 'floor', '(', 'round', '(', 'self', '.', 'progress', '/', 'self', '.', 'size', ',', '2', ')', '*', '100', ')', 'pr', '=', 'floor', '(', 'pct', '*', '.33', ')', 'bar', '=', '""', '.', 'join', '(', '[', '" "', 'for', 'x', 'in', 'range', '(', 'pr', '-', '1', ')', ']', '+', '[', '"↦"])', '', '', 'subprogress', '=', 'self', '.', 'format_parent_bar', '(', ')', 'if', 'self', '.', 'parent_bar', 'else', '""', 'fin', '=', '"Loading{} ={}{} ({}%)"', '.', 'format', '(', 'subprogress', ',', 'bar', ',', '"ӿ",', ' ', 'ct)', '', 'print', '(', 'fin', '.', 'ljust', '(', 'len', '(', 'fin', ')', '+', '5', ')', ',', 'end', '=', '"\\r"', ')', 'time', '.', 'sleep', '(', '0.10', ')', 'print', '(', '"\\033[K\\033[1A"', ')', 'self', '.', 'progress', '=', '0'] | Resets the progress bar and clears it from the terminal | ['Resets', 'the', 'progress', 'bar', 'and', 'clears', 'it', 'from', 'the', 'terminal'] | train | https://github.com/jaredLunde/vital-tools/blob/ea924c9bbb6ec22aa66f8095f018b1ee0099ac04/vital/debug/__init__.py#L1925-L1935 |
3,861 | globality-corp/microcosm-flask | microcosm_flask/swagger/definitions.py | build_path | def build_path(operation, ns):
"""
Build a path URI for an operation.
"""
try:
return ns.url_for(operation, _external=False)
except BuildError as error:
# we are missing some URI path parameters
uri_templates = {
argument: "{{{}}}".format(argument)
for argument in error.suggested.arguments
}
# flask will sometimes try to quote '{' and '}' characters
return unquote(ns.url_for(operation, _external=False, **uri_templates)) | python | def build_path(operation, ns):
"""
Build a path URI for an operation.
"""
try:
return ns.url_for(operation, _external=False)
except BuildError as error:
# we are missing some URI path parameters
uri_templates = {
argument: "{{{}}}".format(argument)
for argument in error.suggested.arguments
}
# flask will sometimes try to quote '{' and '}' characters
return unquote(ns.url_for(operation, _external=False, **uri_templates)) | ['def', 'build_path', '(', 'operation', ',', 'ns', ')', ':', 'try', ':', 'return', 'ns', '.', 'url_for', '(', 'operation', ',', '_external', '=', 'False', ')', 'except', 'BuildError', 'as', 'error', ':', '# we are missing some URI path parameters', 'uri_templates', '=', '{', 'argument', ':', '"{{{}}}"', '.', 'format', '(', 'argument', ')', 'for', 'argument', 'in', 'error', '.', 'suggested', '.', 'arguments', '}', "# flask will sometimes try to quote '{' and '}' characters", 'return', 'unquote', '(', 'ns', '.', 'url_for', '(', 'operation', ',', '_external', '=', 'False', ',', '*', '*', 'uri_templates', ')', ')'] | Build a path URI for an operation. | ['Build', 'a', 'path', 'URI', 'for', 'an', 'operation', '.'] | train | https://github.com/globality-corp/microcosm-flask/blob/c2eaf57f03e7d041eea343751a4a90fcc80df418/microcosm_flask/swagger/definitions.py#L123-L137 |
3,862 | NatLibFi/Skosify | skosify/check.py | hierarchy_cycles | def hierarchy_cycles(rdf, fix=False):
"""Check if the graph contains skos:broader cycles and optionally break these.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix: Fix the problem by removing any skos:broader that overlaps
with skos:broaderTransitive.
"""
top_concepts = sorted(rdf.subject_objects(SKOS.hasTopConcept))
status = {}
for cs, root in top_concepts:
_hierarchy_cycles_visit(
rdf, root, None, fix, status=status)
# double check that all concepts were actually visited in the search,
# and visit remaining ones if necessary
recheck_top_concepts = False
for conc in sorted(rdf.subjects(RDF.type, SKOS.Concept)):
if conc not in status:
recheck_top_concepts = True
_hierarchy_cycles_visit(
rdf, conc, None, fix, status=status)
return recheck_top_concepts | python | def hierarchy_cycles(rdf, fix=False):
"""Check if the graph contains skos:broader cycles and optionally break these.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix: Fix the problem by removing any skos:broader that overlaps
with skos:broaderTransitive.
"""
top_concepts = sorted(rdf.subject_objects(SKOS.hasTopConcept))
status = {}
for cs, root in top_concepts:
_hierarchy_cycles_visit(
rdf, root, None, fix, status=status)
# double check that all concepts were actually visited in the search,
# and visit remaining ones if necessary
recheck_top_concepts = False
for conc in sorted(rdf.subjects(RDF.type, SKOS.Concept)):
if conc not in status:
recheck_top_concepts = True
_hierarchy_cycles_visit(
rdf, conc, None, fix, status=status)
return recheck_top_concepts | ['def', 'hierarchy_cycles', '(', 'rdf', ',', 'fix', '=', 'False', ')', ':', 'top_concepts', '=', 'sorted', '(', 'rdf', '.', 'subject_objects', '(', 'SKOS', '.', 'hasTopConcept', ')', ')', 'status', '=', '{', '}', 'for', 'cs', ',', 'root', 'in', 'top_concepts', ':', '_hierarchy_cycles_visit', '(', 'rdf', ',', 'root', ',', 'None', ',', 'fix', ',', 'status', '=', 'status', ')', '# double check that all concepts were actually visited in the search,', '# and visit remaining ones if necessary', 'recheck_top_concepts', '=', 'False', 'for', 'conc', 'in', 'sorted', '(', 'rdf', '.', 'subjects', '(', 'RDF', '.', 'type', ',', 'SKOS', '.', 'Concept', ')', ')', ':', 'if', 'conc', 'not', 'in', 'status', ':', 'recheck_top_concepts', '=', 'True', '_hierarchy_cycles_visit', '(', 'rdf', ',', 'conc', ',', 'None', ',', 'fix', ',', 'status', '=', 'status', ')', 'return', 'recheck_top_concepts'] | Check if the graph contains skos:broader cycles and optionally break these.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix: Fix the problem by removing any skos:broader that overlaps
with skos:broaderTransitive. | ['Check', 'if', 'the', 'graph', 'contains', 'skos', ':', 'broader', 'cycles', 'and', 'optionally', 'break', 'these', '.'] | train | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/check.py#L37-L58 |
3,863 | edublancas/sklearn-evaluation | sklearn_evaluation/plot/classification.py | confusion_matrix | def confusion_matrix(y_true, y_pred, target_names=None, normalize=False,
cmap=None, ax=None):
"""
Plot confustion matrix.
Parameters
----------
y_true : array-like, shape = [n_samples]
Correct target values (ground truth).
y_pred : array-like, shape = [n_samples]
Target predicted classes (estimator predictions).
target_names : list
List containing the names of the target classes. List must be in order
e.g. ``['Label for class 0', 'Label for class 1']``. If ``None``,
generic labels will be generated e.g. ``['Class 0', 'Class 1']``
ax: matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
normalize : bool
Normalize the confusion matrix
cmap : matplotlib Colormap
If ``None`` uses a modified version of matplotlib's OrRd colormap.
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/confusion_matrix.py
"""
if any((val is None for val in (y_true, y_pred))):
raise ValueError("y_true and y_pred are needed to plot confusion "
"matrix")
# calculate how many names you expect
values = set(y_true).union(set(y_pred))
expected_len = len(values)
if target_names and (expected_len != len(target_names)):
raise ValueError(('Data cointains {} different values, but target'
' names contains {} values.'.format(expected_len,
len(target_names)
)))
# if the user didn't pass target_names, create generic ones
if not target_names:
values = list(values)
values.sort()
target_names = ['Class {}'.format(v) for v in values]
cm = sk_confusion_matrix(y_true, y_pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
np.set_printoptions(precision=2)
if ax is None:
ax = plt.gca()
# this (y, x) may sound counterintuitive. The reason is that
# in a matrix cell (i, j) is in row=i and col=j, translating that
# to an x, y plane (which matplotlib uses to plot), we need to use
# i as the y coordinate (how many steps down) and j as the x coordinate
# how many steps to the right.
for (y, x), v in np.ndenumerate(cm):
try:
label = '{:.2}'.format(v)
except:
label = v
ax.text(x, y, label, horizontalalignment='center',
verticalalignment='center')
if cmap is None:
cmap = default_heatmap()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
plt.colorbar(im, ax=ax)
tick_marks = np.arange(len(target_names))
ax.set_xticks(tick_marks)
ax.set_xticklabels(target_names)
ax.set_yticks(tick_marks)
ax.set_yticklabels(target_names)
title = 'Confusion matrix'
if normalize:
title += ' (normalized)'
ax.set_title(title)
ax.set_ylabel('True label')
ax.set_xlabel('Predicted label')
return ax | python | def confusion_matrix(y_true, y_pred, target_names=None, normalize=False,
cmap=None, ax=None):
"""
Plot confustion matrix.
Parameters
----------
y_true : array-like, shape = [n_samples]
Correct target values (ground truth).
y_pred : array-like, shape = [n_samples]
Target predicted classes (estimator predictions).
target_names : list
List containing the names of the target classes. List must be in order
e.g. ``['Label for class 0', 'Label for class 1']``. If ``None``,
generic labels will be generated e.g. ``['Class 0', 'Class 1']``
ax: matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
normalize : bool
Normalize the confusion matrix
cmap : matplotlib Colormap
If ``None`` uses a modified version of matplotlib's OrRd colormap.
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/confusion_matrix.py
"""
if any((val is None for val in (y_true, y_pred))):
raise ValueError("y_true and y_pred are needed to plot confusion "
"matrix")
# calculate how many names you expect
values = set(y_true).union(set(y_pred))
expected_len = len(values)
if target_names and (expected_len != len(target_names)):
raise ValueError(('Data cointains {} different values, but target'
' names contains {} values.'.format(expected_len,
len(target_names)
)))
# if the user didn't pass target_names, create generic ones
if not target_names:
values = list(values)
values.sort()
target_names = ['Class {}'.format(v) for v in values]
cm = sk_confusion_matrix(y_true, y_pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
np.set_printoptions(precision=2)
if ax is None:
ax = plt.gca()
# this (y, x) may sound counterintuitive. The reason is that
# in a matrix cell (i, j) is in row=i and col=j, translating that
# to an x, y plane (which matplotlib uses to plot), we need to use
# i as the y coordinate (how many steps down) and j as the x coordinate
# how many steps to the right.
for (y, x), v in np.ndenumerate(cm):
try:
label = '{:.2}'.format(v)
except:
label = v
ax.text(x, y, label, horizontalalignment='center',
verticalalignment='center')
if cmap is None:
cmap = default_heatmap()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
plt.colorbar(im, ax=ax)
tick_marks = np.arange(len(target_names))
ax.set_xticks(tick_marks)
ax.set_xticklabels(target_names)
ax.set_yticks(tick_marks)
ax.set_yticklabels(target_names)
title = 'Confusion matrix'
if normalize:
title += ' (normalized)'
ax.set_title(title)
ax.set_ylabel('True label')
ax.set_xlabel('Predicted label')
return ax | ['def', 'confusion_matrix', '(', 'y_true', ',', 'y_pred', ',', 'target_names', '=', 'None', ',', 'normalize', '=', 'False', ',', 'cmap', '=', 'None', ',', 'ax', '=', 'None', ')', ':', 'if', 'any', '(', '(', 'val', 'is', 'None', 'for', 'val', 'in', '(', 'y_true', ',', 'y_pred', ')', ')', ')', ':', 'raise', 'ValueError', '(', '"y_true and y_pred are needed to plot confusion "', '"matrix"', ')', '# calculate how many names you expect', 'values', '=', 'set', '(', 'y_true', ')', '.', 'union', '(', 'set', '(', 'y_pred', ')', ')', 'expected_len', '=', 'len', '(', 'values', ')', 'if', 'target_names', 'and', '(', 'expected_len', '!=', 'len', '(', 'target_names', ')', ')', ':', 'raise', 'ValueError', '(', '(', "'Data cointains {} different values, but target'", "' names contains {} values.'", '.', 'format', '(', 'expected_len', ',', 'len', '(', 'target_names', ')', ')', ')', ')', "# if the user didn't pass target_names, create generic ones", 'if', 'not', 'target_names', ':', 'values', '=', 'list', '(', 'values', ')', 'values', '.', 'sort', '(', ')', 'target_names', '=', '[', "'Class {}'", '.', 'format', '(', 'v', ')', 'for', 'v', 'in', 'values', ']', 'cm', '=', 'sk_confusion_matrix', '(', 'y_true', ',', 'y_pred', ')', 'if', 'normalize', ':', 'cm', '=', 'cm', '.', 'astype', '(', "'float'", ')', '/', 'cm', '.', 'sum', '(', 'axis', '=', '1', ')', '[', ':', ',', 'np', '.', 'newaxis', ']', 'np', '.', 'set_printoptions', '(', 'precision', '=', '2', ')', 'if', 'ax', 'is', 'None', ':', 'ax', '=', 'plt', '.', 'gca', '(', ')', '# this (y, x) may sound counterintuitive. The reason is that', '# in a matrix cell (i, j) is in row=i and col=j, translating that', '# to an x, y plane (which matplotlib uses to plot), we need to use', '# i as the y coordinate (how many steps down) and j as the x coordinate', '# how many steps to the right.', 'for', '(', 'y', ',', 'x', ')', ',', 'v', 'in', 'np', '.', 'ndenumerate', '(', 'cm', ')', ':', 'try', ':', 'label', '=', "'{:.2}'", '.', 'format', '(', 'v', ')', 'except', ':', 'label', '=', 'v', 'ax', '.', 'text', '(', 'x', ',', 'y', ',', 'label', ',', 'horizontalalignment', '=', "'center'", ',', 'verticalalignment', '=', "'center'", ')', 'if', 'cmap', 'is', 'None', ':', 'cmap', '=', 'default_heatmap', '(', ')', 'im', '=', 'ax', '.', 'imshow', '(', 'cm', ',', 'interpolation', '=', "'nearest'", ',', 'cmap', '=', 'cmap', ')', 'plt', '.', 'colorbar', '(', 'im', ',', 'ax', '=', 'ax', ')', 'tick_marks', '=', 'np', '.', 'arange', '(', 'len', '(', 'target_names', ')', ')', 'ax', '.', 'set_xticks', '(', 'tick_marks', ')', 'ax', '.', 'set_xticklabels', '(', 'target_names', ')', 'ax', '.', 'set_yticks', '(', 'tick_marks', ')', 'ax', '.', 'set_yticklabels', '(', 'target_names', ')', 'title', '=', "'Confusion matrix'", 'if', 'normalize', ':', 'title', '+=', "' (normalized)'", 'ax', '.', 'set_title', '(', 'title', ')', 'ax', '.', 'set_ylabel', '(', "'True label'", ')', 'ax', '.', 'set_xlabel', '(', "'Predicted label'", ')', 'return', 'ax'] | Plot confustion matrix.
Parameters
----------
y_true : array-like, shape = [n_samples]
Correct target values (ground truth).
y_pred : array-like, shape = [n_samples]
Target predicted classes (estimator predictions).
target_names : list
List containing the names of the target classes. List must be in order
e.g. ``['Label for class 0', 'Label for class 1']``. If ``None``,
generic labels will be generated e.g. ``['Class 0', 'Class 1']``
ax: matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
normalize : bool
Normalize the confusion matrix
cmap : matplotlib Colormap
If ``None`` uses a modified version of matplotlib's OrRd colormap.
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/confusion_matrix.py | ['Plot', 'confustion', 'matrix', '.'] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/plot/classification.py#L13-L105 |
3,864 | indico/indico-plugins | piwik/indico_piwik/queries/utils.py | get_json_from_remote_server | def get_json_from_remote_server(func, **kwargs):
"""
Safely manage calls to the remote server by encapsulating JSON creation
from Piwik data.
"""
rawjson = func(**kwargs)
if rawjson is None:
# If the request failed we already logged in in PiwikRequest;
# no need to get into the exception handler below.
return {}
try:
data = json.loads(rawjson)
if isinstance(data, dict) and data.get('result') == 'error':
current_plugin.logger.error('The Piwik server responded with an error: %s', data['message'])
return {}
return data
except Exception:
current_plugin.logger.exception('Unable to load JSON from source %s', rawjson)
return {} | python | def get_json_from_remote_server(func, **kwargs):
"""
Safely manage calls to the remote server by encapsulating JSON creation
from Piwik data.
"""
rawjson = func(**kwargs)
if rawjson is None:
# If the request failed we already logged in in PiwikRequest;
# no need to get into the exception handler below.
return {}
try:
data = json.loads(rawjson)
if isinstance(data, dict) and data.get('result') == 'error':
current_plugin.logger.error('The Piwik server responded with an error: %s', data['message'])
return {}
return data
except Exception:
current_plugin.logger.exception('Unable to load JSON from source %s', rawjson)
return {} | ['def', 'get_json_from_remote_server', '(', 'func', ',', '*', '*', 'kwargs', ')', ':', 'rawjson', '=', 'func', '(', '*', '*', 'kwargs', ')', 'if', 'rawjson', 'is', 'None', ':', '# If the request failed we already logged in in PiwikRequest;', '# no need to get into the exception handler below.', 'return', '{', '}', 'try', ':', 'data', '=', 'json', '.', 'loads', '(', 'rawjson', ')', 'if', 'isinstance', '(', 'data', ',', 'dict', ')', 'and', 'data', '.', 'get', '(', "'result'", ')', '==', "'error'", ':', 'current_plugin', '.', 'logger', '.', 'error', '(', "'The Piwik server responded with an error: %s'", ',', 'data', '[', "'message'", ']', ')', 'return', '{', '}', 'return', 'data', 'except', 'Exception', ':', 'current_plugin', '.', 'logger', '.', 'exception', '(', "'Unable to load JSON from source %s'", ',', 'rawjson', ')', 'return', '{', '}'] | Safely manage calls to the remote server by encapsulating JSON creation
from Piwik data. | ['Safely', 'manage', 'calls', 'to', 'the', 'remote', 'server', 'by', 'encapsulating', 'JSON', 'creation', 'from', 'Piwik', 'data', '.'] | train | https://github.com/indico/indico-plugins/blob/fe50085cc63be9b8161b09539e662e7b04e4b38e/piwik/indico_piwik/queries/utils.py#L22-L40 |
3,865 | gbowerman/azurerm | examples/jumpbox.py | main | def main():
'''Main routine.'''
# validate command line arguments
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--vmname', '-n', required=True, action='store', help='Name')
arg_parser.add_argument('--rgname', '-g', required=True, action='store',
help='Resource Group Name')
arg_parser.add_argument('--user', '-u', required=False, action='store', default='azure',
help='Optional username')
arg_parser.add_argument('--password', '-p', required=False, action='store',
help='Optional password')
arg_parser.add_argument('--sshkey', '-k', required=False, action='store',
help='SSH public key')
arg_parser.add_argument('--sshpath', '-s', required=False, action='store',
help='SSH public key file path')
arg_parser.add_argument('--location', '-l', required=False, action='store',
help='Location, e.g. eastus')
arg_parser.add_argument('--vmsize', required=False, action='store', default='Standard_D1_V2',
help='VM size, defaults to Standard_D1_V2')
arg_parser.add_argument('--dns', '-d', required=False, action='store',
help='DNS, e.g. myuniquename')
arg_parser.add_argument('--vnet', required=False, action='store',
help='Optional VNET Name (else first VNET in resource group used)')
arg_parser.add_argument('--nowait', action='store_true', default=False,
help='Do not wait for VM to finish provisioning')
arg_parser.add_argument('--nonsg', action='store_true', default=False,
help='Do not create a network security group on the NIC')
arg_parser.add_argument('--verbose', '-v', action='store_true', default=False,
help='Print operational details')
args = arg_parser.parse_args()
name = args.vmname
rgname = args.rgname
vnet = args.vnet
location = args.location
username = args.user
password = args.password
sshkey = args.sshkey
sshpath = args.sshpath
verbose = args.verbose
dns_label = args.dns
no_wait = args.nowait
no_nsg = args.nonsg
vmsize = args.vmsize
# make sure all authentication scenarios are handled
if sshkey is not None and sshpath is not None:
sys.exit('Error: You can provide an SSH public key, or a public key file path, not both.')
if password is not None and (sshkey is not None or sshpath is not None):
sys.exit('Error: provide a password or SSH key (or nothing), not both')
use_password = False
if password is not None:
use_password = True
else:
if sshkey is None and sshpath is None: # no auth parameters were provided
# look for ~/id_rsa.pub
home = os.path.expanduser('~')
sshpath = home + os.sep + '.ssh' + os.sep + 'id_rsa.pub'
if os.path.isfile(sshpath) is False:
print('Default public key file not found.')
use_password = True
password = Haikunator().haikunate(delimiter=',') # creates random password
print('Created new password = ' + password)
else:
print('Default public key file found')
if use_password is False:
print('Reading public key..')
if sshkey is None:
# at this point sshpath should have a valid Value
with open(sshpath, 'r') as pub_ssh_file_fd:
sshkey = pub_ssh_file_fd.read()
# Load Azure app defaults
try:
with open('azurermconfig.json') as config_file:
config_data = json.load(config_file)
except FileNotFoundError:
sys.exit("Error: Expecting azurermconfig.json in current folder")
tenant_id = config_data['tenantId']
app_id = config_data['appId']
app_secret = config_data['appSecret']
subscription_id = config_data['subscriptionId']
# authenticate
access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
# if no location parameter was specified now would be a good time to figure out the location
if location is None:
try:
rgroup = azurerm.get_resource_group(access_token, subscription_id, rgname)
location = rgroup['location']
except KeyError:
print('Cannot find resource group ' + rgname + '. Check connection/authorization.')
print(json.dumps(rgroup, sort_keys=False, indent=2, separators=(',', ': ')))
sys.exit()
print('location = ' + location)
# get VNET
print('Getting VNet')
vnet_not_found = False
if vnet is None:
print('VNet not set, checking resource group')
# get first VNET in resource group
try:
vnets = azurerm.list_vnets_rg(access_token, subscription_id, rgname)
# print(json.dumps(vnets, sort_keys=False, indent=2, separators=(',', ': ')))
vnetresource = vnets['value'][0]
except IndexError:
print('No VNET found in resource group.')
vnet_not_found = True
vnet = name + 'vnet'
else:
print('Getting VNet: ' + vnet)
vnetresource = azurerm.get_vnet(access_token, subscription_id, rgname, vnet)
if 'properties' not in vnetresource:
print('VNet ' + vnet + ' not found in resource group ' + rgname)
vnet_not_found = True
if vnet_not_found is True:
# create a vnet
print('Creating vnet: ' + vnet)
rmresource = azurerm.create_vnet(access_token, subscription_id, rgname, vnet, location, \
address_prefix='10.0.0.0/16', nsg_id=None)
if rmresource.status_code != 201:
print('Error ' + str(vnetresource.status_code) + ' creating VNET. ' + vnetresource.text)
sys.exit()
vnetresource = azurerm.get_vnet(access_token, subscription_id, rgname, vnet)
try:
subnet_id = vnetresource['properties']['subnets'][0]['id']
except KeyError:
print('Subnet not found for VNet ' + vnet)
sys.exit()
if verbose is True:
print('subnet_id = ' + subnet_id)
public_ip_name = name + 'ip'
if dns_label is None:
dns_label = name + 'dns'
print('Creating public ipaddr')
rmreturn = azurerm.create_public_ip(access_token, subscription_id, rgname, public_ip_name,
dns_label, location)
if rmreturn.status_code not in [200, 201]:
print(rmreturn.text)
sys.exit('Error: ' + str(rmreturn.status_code) + ' from azurerm.create_public_ip()')
ip_id = rmreturn.json()['id']
if verbose is True:
print('ip_id = ' + ip_id)
print('Waiting for IP provisioning..')
waiting = True
while waiting:
pip = azurerm.get_public_ip(access_token, subscription_id, rgname, public_ip_name)
if pip['properties']['provisioningState'] == 'Succeeded':
waiting = False
time.sleep(1)
if no_nsg is True:
nsg_id = None
else:
# create NSG
nsg_name = name + 'nsg'
print('Creating NSG: ' + nsg_name)
rmreturn = azurerm.create_nsg(access_token, subscription_id, rgname, nsg_name, location)
if rmreturn.status_code not in [200, 201]:
print('Error ' + str(rmreturn.status_code) + ' creating NSG. ' + rmreturn.text)
sys.exit()
nsg_id = rmreturn.json()['id']
# create NSG rule for ssh, scp
nsg_rule = 'ssh'
print('Creating NSG rule: ' + nsg_rule)
rmreturn = azurerm.create_nsg_rule(access_token, subscription_id, rgname, nsg_name,
nsg_rule, description='ssh rule',
destination_range='22')
if rmreturn.status_code not in [200, 201]:
print('Error ' + str(rmreturn.status_code) + ' creating NSG rule. ' + rmreturn.text)
sys.exit()
# create NIC
nic_name = name + 'nic'
print('Creating NIC: ' + nic_name)
rmreturn = azurerm.create_nic(access_token, subscription_id, rgname, nic_name, ip_id,
subnet_id, location, nsg_id=nsg_id)
if rmreturn.status_code not in [200, 201]:
print('Error ' + rmreturn.status_code + ' creating NSG rule. ' + rmreturn.text)
sys.exit()
nic_id = rmreturn.json()['id']
print('Waiting for NIC provisioning..')
waiting = True
while waiting:
nic = azurerm.get_nic(access_token, subscription_id, rgname, nic_name)
if nic['properties']['provisioningState'] == 'Succeeded':
waiting = False
time.sleep(1)
# create VM
vm_name = name
#publisher = 'CoreOS'
#offer = 'CoreOS'
#sku = 'Stable'
publisher = 'Canonical'
offer = 'UbuntuServer'
sku = '16.04-LTS'
version = 'latest'
print('Creating VM: ' + vm_name)
if use_password is True:
rmreturn = azurerm.create_vm(access_token, subscription_id, rgname, vm_name, vmsize,
publisher, offer, sku, version, nic_id, location,
username=username, password=password)
else:
rmreturn = azurerm.create_vm(access_token, subscription_id, rgname, vm_name, vmsize,
publisher, offer, sku, version, nic_id, location,
username=username, public_key=sshkey)
if rmreturn.status_code != 201:
sys.exit('Error ' + rmreturn.status_code + ' creating VM. ' + rmreturn.text)
if no_wait is False:
print('Waiting for VM provisioning..')
waiting = True
while waiting:
vm_model = azurerm.get_vm(access_token, subscription_id, rgname, vm_name)
if vm_model['properties']['provisioningState'] == 'Succeeded':
waiting = False
time.sleep(5)
print('VM provisioning complete.')
print('Connect with:')
print('ssh ' + dns_label + '.' + location + '.cloudapp.azure.com -l ' + username) | python | def main():
'''Main routine.'''
# validate command line arguments
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--vmname', '-n', required=True, action='store', help='Name')
arg_parser.add_argument('--rgname', '-g', required=True, action='store',
help='Resource Group Name')
arg_parser.add_argument('--user', '-u', required=False, action='store', default='azure',
help='Optional username')
arg_parser.add_argument('--password', '-p', required=False, action='store',
help='Optional password')
arg_parser.add_argument('--sshkey', '-k', required=False, action='store',
help='SSH public key')
arg_parser.add_argument('--sshpath', '-s', required=False, action='store',
help='SSH public key file path')
arg_parser.add_argument('--location', '-l', required=False, action='store',
help='Location, e.g. eastus')
arg_parser.add_argument('--vmsize', required=False, action='store', default='Standard_D1_V2',
help='VM size, defaults to Standard_D1_V2')
arg_parser.add_argument('--dns', '-d', required=False, action='store',
help='DNS, e.g. myuniquename')
arg_parser.add_argument('--vnet', required=False, action='store',
help='Optional VNET Name (else first VNET in resource group used)')
arg_parser.add_argument('--nowait', action='store_true', default=False,
help='Do not wait for VM to finish provisioning')
arg_parser.add_argument('--nonsg', action='store_true', default=False,
help='Do not create a network security group on the NIC')
arg_parser.add_argument('--verbose', '-v', action='store_true', default=False,
help='Print operational details')
args = arg_parser.parse_args()
name = args.vmname
rgname = args.rgname
vnet = args.vnet
location = args.location
username = args.user
password = args.password
sshkey = args.sshkey
sshpath = args.sshpath
verbose = args.verbose
dns_label = args.dns
no_wait = args.nowait
no_nsg = args.nonsg
vmsize = args.vmsize
# make sure all authentication scenarios are handled
if sshkey is not None and sshpath is not None:
sys.exit('Error: You can provide an SSH public key, or a public key file path, not both.')
if password is not None and (sshkey is not None or sshpath is not None):
sys.exit('Error: provide a password or SSH key (or nothing), not both')
use_password = False
if password is not None:
use_password = True
else:
if sshkey is None and sshpath is None: # no auth parameters were provided
# look for ~/id_rsa.pub
home = os.path.expanduser('~')
sshpath = home + os.sep + '.ssh' + os.sep + 'id_rsa.pub'
if os.path.isfile(sshpath) is False:
print('Default public key file not found.')
use_password = True
password = Haikunator().haikunate(delimiter=',') # creates random password
print('Created new password = ' + password)
else:
print('Default public key file found')
if use_password is False:
print('Reading public key..')
if sshkey is None:
# at this point sshpath should have a valid Value
with open(sshpath, 'r') as pub_ssh_file_fd:
sshkey = pub_ssh_file_fd.read()
# Load Azure app defaults
try:
with open('azurermconfig.json') as config_file:
config_data = json.load(config_file)
except FileNotFoundError:
sys.exit("Error: Expecting azurermconfig.json in current folder")
tenant_id = config_data['tenantId']
app_id = config_data['appId']
app_secret = config_data['appSecret']
subscription_id = config_data['subscriptionId']
# authenticate
access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
# if no location parameter was specified now would be a good time to figure out the location
if location is None:
try:
rgroup = azurerm.get_resource_group(access_token, subscription_id, rgname)
location = rgroup['location']
except KeyError:
print('Cannot find resource group ' + rgname + '. Check connection/authorization.')
print(json.dumps(rgroup, sort_keys=False, indent=2, separators=(',', ': ')))
sys.exit()
print('location = ' + location)
# get VNET
print('Getting VNet')
vnet_not_found = False
if vnet is None:
print('VNet not set, checking resource group')
# get first VNET in resource group
try:
vnets = azurerm.list_vnets_rg(access_token, subscription_id, rgname)
# print(json.dumps(vnets, sort_keys=False, indent=2, separators=(',', ': ')))
vnetresource = vnets['value'][0]
except IndexError:
print('No VNET found in resource group.')
vnet_not_found = True
vnet = name + 'vnet'
else:
print('Getting VNet: ' + vnet)
vnetresource = azurerm.get_vnet(access_token, subscription_id, rgname, vnet)
if 'properties' not in vnetresource:
print('VNet ' + vnet + ' not found in resource group ' + rgname)
vnet_not_found = True
if vnet_not_found is True:
# create a vnet
print('Creating vnet: ' + vnet)
rmresource = azurerm.create_vnet(access_token, subscription_id, rgname, vnet, location, \
address_prefix='10.0.0.0/16', nsg_id=None)
if rmresource.status_code != 201:
print('Error ' + str(vnetresource.status_code) + ' creating VNET. ' + vnetresource.text)
sys.exit()
vnetresource = azurerm.get_vnet(access_token, subscription_id, rgname, vnet)
try:
subnet_id = vnetresource['properties']['subnets'][0]['id']
except KeyError:
print('Subnet not found for VNet ' + vnet)
sys.exit()
if verbose is True:
print('subnet_id = ' + subnet_id)
public_ip_name = name + 'ip'
if dns_label is None:
dns_label = name + 'dns'
print('Creating public ipaddr')
rmreturn = azurerm.create_public_ip(access_token, subscription_id, rgname, public_ip_name,
dns_label, location)
if rmreturn.status_code not in [200, 201]:
print(rmreturn.text)
sys.exit('Error: ' + str(rmreturn.status_code) + ' from azurerm.create_public_ip()')
ip_id = rmreturn.json()['id']
if verbose is True:
print('ip_id = ' + ip_id)
print('Waiting for IP provisioning..')
waiting = True
while waiting:
pip = azurerm.get_public_ip(access_token, subscription_id, rgname, public_ip_name)
if pip['properties']['provisioningState'] == 'Succeeded':
waiting = False
time.sleep(1)
if no_nsg is True:
nsg_id = None
else:
# create NSG
nsg_name = name + 'nsg'
print('Creating NSG: ' + nsg_name)
rmreturn = azurerm.create_nsg(access_token, subscription_id, rgname, nsg_name, location)
if rmreturn.status_code not in [200, 201]:
print('Error ' + str(rmreturn.status_code) + ' creating NSG. ' + rmreturn.text)
sys.exit()
nsg_id = rmreturn.json()['id']
# create NSG rule for ssh, scp
nsg_rule = 'ssh'
print('Creating NSG rule: ' + nsg_rule)
rmreturn = azurerm.create_nsg_rule(access_token, subscription_id, rgname, nsg_name,
nsg_rule, description='ssh rule',
destination_range='22')
if rmreturn.status_code not in [200, 201]:
print('Error ' + str(rmreturn.status_code) + ' creating NSG rule. ' + rmreturn.text)
sys.exit()
# create NIC
nic_name = name + 'nic'
print('Creating NIC: ' + nic_name)
rmreturn = azurerm.create_nic(access_token, subscription_id, rgname, nic_name, ip_id,
subnet_id, location, nsg_id=nsg_id)
if rmreturn.status_code not in [200, 201]:
print('Error ' + rmreturn.status_code + ' creating NSG rule. ' + rmreturn.text)
sys.exit()
nic_id = rmreturn.json()['id']
print('Waiting for NIC provisioning..')
waiting = True
while waiting:
nic = azurerm.get_nic(access_token, subscription_id, rgname, nic_name)
if nic['properties']['provisioningState'] == 'Succeeded':
waiting = False
time.sleep(1)
# create VM
vm_name = name
#publisher = 'CoreOS'
#offer = 'CoreOS'
#sku = 'Stable'
publisher = 'Canonical'
offer = 'UbuntuServer'
sku = '16.04-LTS'
version = 'latest'
print('Creating VM: ' + vm_name)
if use_password is True:
rmreturn = azurerm.create_vm(access_token, subscription_id, rgname, vm_name, vmsize,
publisher, offer, sku, version, nic_id, location,
username=username, password=password)
else:
rmreturn = azurerm.create_vm(access_token, subscription_id, rgname, vm_name, vmsize,
publisher, offer, sku, version, nic_id, location,
username=username, public_key=sshkey)
if rmreturn.status_code != 201:
sys.exit('Error ' + rmreturn.status_code + ' creating VM. ' + rmreturn.text)
if no_wait is False:
print('Waiting for VM provisioning..')
waiting = True
while waiting:
vm_model = azurerm.get_vm(access_token, subscription_id, rgname, vm_name)
if vm_model['properties']['provisioningState'] == 'Succeeded':
waiting = False
time.sleep(5)
print('VM provisioning complete.')
print('Connect with:')
print('ssh ' + dns_label + '.' + location + '.cloudapp.azure.com -l ' + username) | ['def', 'main', '(', ')', ':', '# validate command line arguments', 'arg_parser', '=', 'argparse', '.', 'ArgumentParser', '(', ')', 'arg_parser', '.', 'add_argument', '(', "'--vmname'", ',', "'-n'", ',', 'required', '=', 'True', ',', 'action', '=', "'store'", ',', 'help', '=', "'Name'", ')', 'arg_parser', '.', 'add_argument', '(', "'--rgname'", ',', "'-g'", ',', 'required', '=', 'True', ',', 'action', '=', "'store'", ',', 'help', '=', "'Resource Group Name'", ')', 'arg_parser', '.', 'add_argument', '(', "'--user'", ',', "'-u'", ',', 'required', '=', 'False', ',', 'action', '=', "'store'", ',', 'default', '=', "'azure'", ',', 'help', '=', "'Optional username'", ')', 'arg_parser', '.', 'add_argument', '(', "'--password'", ',', "'-p'", ',', 'required', '=', 'False', ',', 'action', '=', "'store'", ',', 'help', '=', "'Optional password'", ')', 'arg_parser', '.', 'add_argument', '(', "'--sshkey'", ',', "'-k'", ',', 'required', '=', 'False', ',', 'action', '=', "'store'", ',', 'help', '=', "'SSH public key'", ')', 'arg_parser', '.', 'add_argument', '(', "'--sshpath'", ',', "'-s'", ',', 'required', '=', 'False', ',', 'action', '=', "'store'", ',', 'help', '=', "'SSH public key file path'", ')', 'arg_parser', '.', 'add_argument', '(', "'--location'", ',', "'-l'", ',', 'required', '=', 'False', ',', 'action', '=', "'store'", ',', 'help', '=', "'Location, e.g. eastus'", ')', 'arg_parser', '.', 'add_argument', '(', "'--vmsize'", ',', 'required', '=', 'False', ',', 'action', '=', "'store'", ',', 'default', '=', "'Standard_D1_V2'", ',', 'help', '=', "'VM size, defaults to Standard_D1_V2'", ')', 'arg_parser', '.', 'add_argument', '(', "'--dns'", ',', "'-d'", ',', 'required', '=', 'False', ',', 'action', '=', "'store'", ',', 'help', '=', "'DNS, e.g. myuniquename'", ')', 'arg_parser', '.', 'add_argument', '(', "'--vnet'", ',', 'required', '=', 'False', ',', 'action', '=', "'store'", ',', 'help', '=', "'Optional VNET Name (else first VNET in resource group used)'", ')', 'arg_parser', '.', 'add_argument', '(', "'--nowait'", ',', 'action', '=', "'store_true'", ',', 'default', '=', 'False', ',', 'help', '=', "'Do not wait for VM to finish provisioning'", ')', 'arg_parser', '.', 'add_argument', '(', "'--nonsg'", ',', 'action', '=', "'store_true'", ',', 'default', '=', 'False', ',', 'help', '=', "'Do not create a network security group on the NIC'", ')', 'arg_parser', '.', 'add_argument', '(', "'--verbose'", ',', "'-v'", ',', 'action', '=', "'store_true'", ',', 'default', '=', 'False', ',', 'help', '=', "'Print operational details'", ')', 'args', '=', 'arg_parser', '.', 'parse_args', '(', ')', 'name', '=', 'args', '.', 'vmname', 'rgname', '=', 'args', '.', 'rgname', 'vnet', '=', 'args', '.', 'vnet', 'location', '=', 'args', '.', 'location', 'username', '=', 'args', '.', 'user', 'password', '=', 'args', '.', 'password', 'sshkey', '=', 'args', '.', 'sshkey', 'sshpath', '=', 'args', '.', 'sshpath', 'verbose', '=', 'args', '.', 'verbose', 'dns_label', '=', 'args', '.', 'dns', 'no_wait', '=', 'args', '.', 'nowait', 'no_nsg', '=', 'args', '.', 'nonsg', 'vmsize', '=', 'args', '.', 'vmsize', '# make sure all authentication scenarios are handled', 'if', 'sshkey', 'is', 'not', 'None', 'and', 'sshpath', 'is', 'not', 'None', ':', 'sys', '.', 'exit', '(', "'Error: You can provide an SSH public key, or a public key file path, not both.'", ')', 'if', 'password', 'is', 'not', 'None', 'and', '(', 'sshkey', 'is', 'not', 'None', 'or', 'sshpath', 'is', 'not', 'None', ')', ':', 'sys', '.', 'exit', '(', "'Error: provide a password or SSH key (or nothing), not both'", ')', 'use_password', '=', 'False', 'if', 'password', 'is', 'not', 'None', ':', 'use_password', '=', 'True', 'else', ':', 'if', 'sshkey', 'is', 'None', 'and', 'sshpath', 'is', 'None', ':', '# no auth parameters were provided', '# look for ~/id_rsa.pub', 'home', '=', 'os', '.', 'path', '.', 'expanduser', '(', "'~'", ')', 'sshpath', '=', 'home', '+', 'os', '.', 'sep', '+', "'.ssh'", '+', 'os', '.', 'sep', '+', "'id_rsa.pub'", 'if', 'os', '.', 'path', '.', 'isfile', '(', 'sshpath', ')', 'is', 'False', ':', 'print', '(', "'Default public key file not found.'", ')', 'use_password', '=', 'True', 'password', '=', 'Haikunator', '(', ')', '.', 'haikunate', '(', 'delimiter', '=', "','", ')', '# creates random password', 'print', '(', "'Created new password = '", '+', 'password', ')', 'else', ':', 'print', '(', "'Default public key file found'", ')', 'if', 'use_password', 'is', 'False', ':', 'print', '(', "'Reading public key..'", ')', 'if', 'sshkey', 'is', 'None', ':', '# at this point sshpath should have a valid Value', 'with', 'open', '(', 'sshpath', ',', "'r'", ')', 'as', 'pub_ssh_file_fd', ':', 'sshkey', '=', 'pub_ssh_file_fd', '.', 'read', '(', ')', '# Load Azure app defaults', 'try', ':', 'with', 'open', '(', "'azurermconfig.json'", ')', 'as', 'config_file', ':', 'config_data', '=', 'json', '.', 'load', '(', 'config_file', ')', 'except', 'FileNotFoundError', ':', 'sys', '.', 'exit', '(', '"Error: Expecting azurermconfig.json in current folder"', ')', 'tenant_id', '=', 'config_data', '[', "'tenantId'", ']', 'app_id', '=', 'config_data', '[', "'appId'", ']', 'app_secret', '=', 'config_data', '[', "'appSecret'", ']', 'subscription_id', '=', 'config_data', '[', "'subscriptionId'", ']', '# authenticate', 'access_token', '=', 'azurerm', '.', 'get_access_token', '(', 'tenant_id', ',', 'app_id', ',', 'app_secret', ')', '# if no location parameter was specified now would be a good time to figure out the location', 'if', 'location', 'is', 'None', ':', 'try', ':', 'rgroup', '=', 'azurerm', '.', 'get_resource_group', '(', 'access_token', ',', 'subscription_id', ',', 'rgname', ')', 'location', '=', 'rgroup', '[', "'location'", ']', 'except', 'KeyError', ':', 'print', '(', "'Cannot find resource group '", '+', 'rgname', '+', "'. Check connection/authorization.'", ')', 'print', '(', 'json', '.', 'dumps', '(', 'rgroup', ',', 'sort_keys', '=', 'False', ',', 'indent', '=', '2', ',', 'separators', '=', '(', "','", ',', "': '", ')', ')', ')', 'sys', '.', 'exit', '(', ')', 'print', '(', "'location = '", '+', 'location', ')', '# get VNET', 'print', '(', "'Getting VNet'", ')', 'vnet_not_found', '=', 'False', 'if', 'vnet', 'is', 'None', ':', 'print', '(', "'VNet not set, checking resource group'", ')', '# get first VNET in resource group', 'try', ':', 'vnets', '=', 'azurerm', '.', 'list_vnets_rg', '(', 'access_token', ',', 'subscription_id', ',', 'rgname', ')', "# print(json.dumps(vnets, sort_keys=False, indent=2, separators=(',', ': ')))", 'vnetresource', '=', 'vnets', '[', "'value'", ']', '[', '0', ']', 'except', 'IndexError', ':', 'print', '(', "'No VNET found in resource group.'", ')', 'vnet_not_found', '=', 'True', 'vnet', '=', 'name', '+', "'vnet'", 'else', ':', 'print', '(', "'Getting VNet: '", '+', 'vnet', ')', 'vnetresource', '=', 'azurerm', '.', 'get_vnet', '(', 'access_token', ',', 'subscription_id', ',', 'rgname', ',', 'vnet', ')', 'if', "'properties'", 'not', 'in', 'vnetresource', ':', 'print', '(', "'VNet '", '+', 'vnet', '+', "' not found in resource group '", '+', 'rgname', ')', 'vnet_not_found', '=', 'True', 'if', 'vnet_not_found', 'is', 'True', ':', '# create a vnet', 'print', '(', "'Creating vnet: '", '+', 'vnet', ')', 'rmresource', '=', 'azurerm', '.', 'create_vnet', '(', 'access_token', ',', 'subscription_id', ',', 'rgname', ',', 'vnet', ',', 'location', ',', 'address_prefix', '=', "'10.0.0.0/16'", ',', 'nsg_id', '=', 'None', ')', 'if', 'rmresource', '.', 'status_code', '!=', '201', ':', 'print', '(', "'Error '", '+', 'str', '(', 'vnetresource', '.', 'status_code', ')', '+', "' creating VNET. '", '+', 'vnetresource', '.', 'text', ')', 'sys', '.', 'exit', '(', ')', 'vnetresource', '=', 'azurerm', '.', 'get_vnet', '(', 'access_token', ',', 'subscription_id', ',', 'rgname', ',', 'vnet', ')', 'try', ':', 'subnet_id', '=', 'vnetresource', '[', "'properties'", ']', '[', "'subnets'", ']', '[', '0', ']', '[', "'id'", ']', 'except', 'KeyError', ':', 'print', '(', "'Subnet not found for VNet '", '+', 'vnet', ')', 'sys', '.', 'exit', '(', ')', 'if', 'verbose', 'is', 'True', ':', 'print', '(', "'subnet_id = '", '+', 'subnet_id', ')', 'public_ip_name', '=', 'name', '+', "'ip'", 'if', 'dns_label', 'is', 'None', ':', 'dns_label', '=', 'name', '+', "'dns'", 'print', '(', "'Creating public ipaddr'", ')', 'rmreturn', '=', 'azurerm', '.', 'create_public_ip', '(', 'access_token', ',', 'subscription_id', ',', 'rgname', ',', 'public_ip_name', ',', 'dns_label', ',', 'location', ')', 'if', 'rmreturn', '.', 'status_code', 'not', 'in', '[', '200', ',', '201', ']', ':', 'print', '(', 'rmreturn', '.', 'text', ')', 'sys', '.', 'exit', '(', "'Error: '", '+', 'str', '(', 'rmreturn', '.', 'status_code', ')', '+', "' from azurerm.create_public_ip()'", ')', 'ip_id', '=', 'rmreturn', '.', 'json', '(', ')', '[', "'id'", ']', 'if', 'verbose', 'is', 'True', ':', 'print', '(', "'ip_id = '", '+', 'ip_id', ')', 'print', '(', "'Waiting for IP provisioning..'", ')', 'waiting', '=', 'True', 'while', 'waiting', ':', 'pip', '=', 'azurerm', '.', 'get_public_ip', '(', 'access_token', ',', 'subscription_id', ',', 'rgname', ',', 'public_ip_name', ')', 'if', 'pip', '[', "'properties'", ']', '[', "'provisioningState'", ']', '==', "'Succeeded'", ':', 'waiting', '=', 'False', 'time', '.', 'sleep', '(', '1', ')', 'if', 'no_nsg', 'is', 'True', ':', 'nsg_id', '=', 'None', 'else', ':', '# create NSG', 'nsg_name', '=', 'name', '+', "'nsg'", 'print', '(', "'Creating NSG: '", '+', 'nsg_name', ')', 'rmreturn', '=', 'azurerm', '.', 'create_nsg', '(', 'access_token', ',', 'subscription_id', ',', 'rgname', ',', 'nsg_name', ',', 'location', ')', 'if', 'rmreturn', '.', 'status_code', 'not', 'in', '[', '200', ',', '201', ']', ':', 'print', '(', "'Error '", '+', 'str', '(', 'rmreturn', '.', 'status_code', ')', '+', "' creating NSG. '", '+', 'rmreturn', '.', 'text', ')', 'sys', '.', 'exit', '(', ')', 'nsg_id', '=', 'rmreturn', '.', 'json', '(', ')', '[', "'id'", ']', '# create NSG rule for ssh, scp', 'nsg_rule', '=', "'ssh'", 'print', '(', "'Creating NSG rule: '", '+', 'nsg_rule', ')', 'rmreturn', '=', 'azurerm', '.', 'create_nsg_rule', '(', 'access_token', ',', 'subscription_id', ',', 'rgname', ',', 'nsg_name', ',', 'nsg_rule', ',', 'description', '=', "'ssh rule'", ',', 'destination_range', '=', "'22'", ')', 'if', 'rmreturn', '.', 'status_code', 'not', 'in', '[', '200', ',', '201', ']', ':', 'print', '(', "'Error '", '+', 'str', '(', 'rmreturn', '.', 'status_code', ')', '+', "' creating NSG rule. '", '+', 'rmreturn', '.', 'text', ')', 'sys', '.', 'exit', '(', ')', '# create NIC', 'nic_name', '=', 'name', '+', "'nic'", 'print', '(', "'Creating NIC: '", '+', 'nic_name', ')', 'rmreturn', '=', 'azurerm', '.', 'create_nic', '(', 'access_token', ',', 'subscription_id', ',', 'rgname', ',', 'nic_name', ',', 'ip_id', ',', 'subnet_id', ',', 'location', ',', 'nsg_id', '=', 'nsg_id', ')', 'if', 'rmreturn', '.', 'status_code', 'not', 'in', '[', '200', ',', '201', ']', ':', 'print', '(', "'Error '", '+', 'rmreturn', '.', 'status_code', '+', "' creating NSG rule. '", '+', 'rmreturn', '.', 'text', ')', 'sys', '.', 'exit', '(', ')', 'nic_id', '=', 'rmreturn', '.', 'json', '(', ')', '[', "'id'", ']', 'print', '(', "'Waiting for NIC provisioning..'", ')', 'waiting', '=', 'True', 'while', 'waiting', ':', 'nic', '=', 'azurerm', '.', 'get_nic', '(', 'access_token', ',', 'subscription_id', ',', 'rgname', ',', 'nic_name', ')', 'if', 'nic', '[', "'properties'", ']', '[', "'provisioningState'", ']', '==', "'Succeeded'", ':', 'waiting', '=', 'False', 'time', '.', 'sleep', '(', '1', ')', '# create VM', 'vm_name', '=', 'name', "#publisher = 'CoreOS'", "#offer = 'CoreOS'", "#sku = 'Stable'", 'publisher', '=', "'Canonical'", 'offer', '=', "'UbuntuServer'", 'sku', '=', "'16.04-LTS'", 'version', '=', "'latest'", 'print', '(', "'Creating VM: '", '+', 'vm_name', ')', 'if', 'use_password', 'is', 'True', ':', 'rmreturn', '=', 'azurerm', '.', 'create_vm', '(', 'access_token', ',', 'subscription_id', ',', 'rgname', ',', 'vm_name', ',', 'vmsize', ',', 'publisher', ',', 'offer', ',', 'sku', ',', 'version', ',', 'nic_id', ',', 'location', ',', 'username', '=', 'username', ',', 'password', '=', 'password', ')', 'else', ':', 'rmreturn', '=', 'azurerm', '.', 'create_vm', '(', 'access_token', ',', 'subscription_id', ',', 'rgname', ',', 'vm_name', ',', 'vmsize', ',', 'publisher', ',', 'offer', ',', 'sku', ',', 'version', ',', 'nic_id', ',', 'location', ',', 'username', '=', 'username', ',', 'public_key', '=', 'sshkey', ')', 'if', 'rmreturn', '.', 'status_code', '!=', '201', ':', 'sys', '.', 'exit', '(', "'Error '", '+', 'rmreturn', '.', 'status_code', '+', "' creating VM. '", '+', 'rmreturn', '.', 'text', ')', 'if', 'no_wait', 'is', 'False', ':', 'print', '(', "'Waiting for VM provisioning..'", ')', 'waiting', '=', 'True', 'while', 'waiting', ':', 'vm_model', '=', 'azurerm', '.', 'get_vm', '(', 'access_token', ',', 'subscription_id', ',', 'rgname', ',', 'vm_name', ')', 'if', 'vm_model', '[', "'properties'", ']', '[', "'provisioningState'", ']', '==', "'Succeeded'", ':', 'waiting', '=', 'False', 'time', '.', 'sleep', '(', '5', ')', 'print', '(', "'VM provisioning complete.'", ')', 'print', '(', "'Connect with:'", ')', 'print', '(', "'ssh '", '+', 'dns_label', '+', "'.'", '+', 'location', '+', "'.cloudapp.azure.com -l '", '+', 'username', ')'] | Main routine. | ['Main', 'routine', '.'] | train | https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/examples/jumpbox.py#L12-L245 |
3,866 | lionheart/django-pyodbc | django_pyodbc/introspection.py | DatabaseIntrospection.get_table_description | def get_table_description(self, cursor, table_name, identity_check=True):
"""Returns a description of the table, with DB-API cursor.description interface.
The 'auto_check' parameter has been added to the function argspec.
If set to True, the function will check each of the table's fields for the
IDENTITY property (the IDENTITY property is the MSSQL equivalent to an AutoField).
When a field is found with an IDENTITY property, it is given a custom field number
of SQL_AUTOFIELD, which maps to the 'AutoField' value in the DATA_TYPES_REVERSE dict.
"""
# map pyodbc's cursor.columns to db-api cursor description
columns = [[c[3], c[4], None, c[6], c[6], c[8], c[10]] for c in cursor.columns(table=table_name)]
items = []
for column in columns:
if identity_check and self._is_auto_field(cursor, table_name, column[0]):
column[1] = SQL_AUTOFIELD
# The conversion from TextField to CharField below is unwise.
# A SQLServer db field of type "Text" is not interchangeable with a CharField, no matter how short its max_length.
# For example, model.objects.values(<text_field_name>).count() will fail on a sqlserver 'text' field
if column[1] == Database.SQL_WVARCHAR and column[3] < 4000:
column[1] = Database.SQL_WCHAR
items.append(column)
return items | python | def get_table_description(self, cursor, table_name, identity_check=True):
"""Returns a description of the table, with DB-API cursor.description interface.
The 'auto_check' parameter has been added to the function argspec.
If set to True, the function will check each of the table's fields for the
IDENTITY property (the IDENTITY property is the MSSQL equivalent to an AutoField).
When a field is found with an IDENTITY property, it is given a custom field number
of SQL_AUTOFIELD, which maps to the 'AutoField' value in the DATA_TYPES_REVERSE dict.
"""
# map pyodbc's cursor.columns to db-api cursor description
columns = [[c[3], c[4], None, c[6], c[6], c[8], c[10]] for c in cursor.columns(table=table_name)]
items = []
for column in columns:
if identity_check and self._is_auto_field(cursor, table_name, column[0]):
column[1] = SQL_AUTOFIELD
# The conversion from TextField to CharField below is unwise.
# A SQLServer db field of type "Text" is not interchangeable with a CharField, no matter how short its max_length.
# For example, model.objects.values(<text_field_name>).count() will fail on a sqlserver 'text' field
if column[1] == Database.SQL_WVARCHAR and column[3] < 4000:
column[1] = Database.SQL_WCHAR
items.append(column)
return items | ['def', 'get_table_description', '(', 'self', ',', 'cursor', ',', 'table_name', ',', 'identity_check', '=', 'True', ')', ':', "# map pyodbc's cursor.columns to db-api cursor description", 'columns', '=', '[', '[', 'c', '[', '3', ']', ',', 'c', '[', '4', ']', ',', 'None', ',', 'c', '[', '6', ']', ',', 'c', '[', '6', ']', ',', 'c', '[', '8', ']', ',', 'c', '[', '10', ']', ']', 'for', 'c', 'in', 'cursor', '.', 'columns', '(', 'table', '=', 'table_name', ')', ']', 'items', '=', '[', ']', 'for', 'column', 'in', 'columns', ':', 'if', 'identity_check', 'and', 'self', '.', '_is_auto_field', '(', 'cursor', ',', 'table_name', ',', 'column', '[', '0', ']', ')', ':', 'column', '[', '1', ']', '=', 'SQL_AUTOFIELD', '# The conversion from TextField to CharField below is unwise.', '# A SQLServer db field of type "Text" is not interchangeable with a CharField, no matter how short its max_length.', "# For example, model.objects.values(<text_field_name>).count() will fail on a sqlserver 'text' field", 'if', 'column', '[', '1', ']', '==', 'Database', '.', 'SQL_WVARCHAR', 'and', 'column', '[', '3', ']', '<', '4000', ':', 'column', '[', '1', ']', '=', 'Database', '.', 'SQL_WCHAR', 'items', '.', 'append', '(', 'column', ')', 'return', 'items'] | Returns a description of the table, with DB-API cursor.description interface.
The 'auto_check' parameter has been added to the function argspec.
If set to True, the function will check each of the table's fields for the
IDENTITY property (the IDENTITY property is the MSSQL equivalent to an AutoField).
When a field is found with an IDENTITY property, it is given a custom field number
of SQL_AUTOFIELD, which maps to the 'AutoField' value in the DATA_TYPES_REVERSE dict. | ['Returns', 'a', 'description', 'of', 'the', 'table', 'with', 'DB', '-', 'API', 'cursor', '.', 'description', 'interface', '.'] | train | https://github.com/lionheart/django-pyodbc/blob/46adda7b0bfabfa2640f72592c6f6f407f78b363/django_pyodbc/introspection.py#L114-L137 |
3,867 | googlemaps/google-maps-services-python | googlemaps/places.py | _autocomplete | def _autocomplete(client, url_part, input_text, session_token=None,
offset=None, location=None, radius=None, language=None,
types=None, components=None, strict_bounds=False):
"""
Internal handler for ``autocomplete`` and ``autocomplete_query``.
See each method's docs for arg details.
"""
params = {"input": input_text}
if session_token:
params["sessiontoken"] = session_token
if offset:
params["offset"] = offset
if location:
params["location"] = convert.latlng(location)
if radius:
params["radius"] = radius
if language:
params["language"] = language
if types:
params["types"] = types
if components:
if len(components) != 1 or list(components.keys())[0] != "country":
raise ValueError("Only country components are supported")
params["components"] = convert.components(components)
if strict_bounds:
params["strictbounds"] = "true"
url = "/maps/api/place/%sautocomplete/json" % url_part
return client._request(url, params).get("predictions", []) | python | def _autocomplete(client, url_part, input_text, session_token=None,
offset=None, location=None, radius=None, language=None,
types=None, components=None, strict_bounds=False):
"""
Internal handler for ``autocomplete`` and ``autocomplete_query``.
See each method's docs for arg details.
"""
params = {"input": input_text}
if session_token:
params["sessiontoken"] = session_token
if offset:
params["offset"] = offset
if location:
params["location"] = convert.latlng(location)
if radius:
params["radius"] = radius
if language:
params["language"] = language
if types:
params["types"] = types
if components:
if len(components) != 1 or list(components.keys())[0] != "country":
raise ValueError("Only country components are supported")
params["components"] = convert.components(components)
if strict_bounds:
params["strictbounds"] = "true"
url = "/maps/api/place/%sautocomplete/json" % url_part
return client._request(url, params).get("predictions", []) | ['def', '_autocomplete', '(', 'client', ',', 'url_part', ',', 'input_text', ',', 'session_token', '=', 'None', ',', 'offset', '=', 'None', ',', 'location', '=', 'None', ',', 'radius', '=', 'None', ',', 'language', '=', 'None', ',', 'types', '=', 'None', ',', 'components', '=', 'None', ',', 'strict_bounds', '=', 'False', ')', ':', 'params', '=', '{', '"input"', ':', 'input_text', '}', 'if', 'session_token', ':', 'params', '[', '"sessiontoken"', ']', '=', 'session_token', 'if', 'offset', ':', 'params', '[', '"offset"', ']', '=', 'offset', 'if', 'location', ':', 'params', '[', '"location"', ']', '=', 'convert', '.', 'latlng', '(', 'location', ')', 'if', 'radius', ':', 'params', '[', '"radius"', ']', '=', 'radius', 'if', 'language', ':', 'params', '[', '"language"', ']', '=', 'language', 'if', 'types', ':', 'params', '[', '"types"', ']', '=', 'types', 'if', 'components', ':', 'if', 'len', '(', 'components', ')', '!=', '1', 'or', 'list', '(', 'components', '.', 'keys', '(', ')', ')', '[', '0', ']', '!=', '"country"', ':', 'raise', 'ValueError', '(', '"Only country components are supported"', ')', 'params', '[', '"components"', ']', '=', 'convert', '.', 'components', '(', 'components', ')', 'if', 'strict_bounds', ':', 'params', '[', '"strictbounds"', ']', '=', '"true"', 'url', '=', '"/maps/api/place/%sautocomplete/json"', '%', 'url_part', 'return', 'client', '.', '_request', '(', 'url', ',', 'params', ')', '.', 'get', '(', '"predictions"', ',', '[', ']', ')'] | Internal handler for ``autocomplete`` and ``autocomplete_query``.
See each method's docs for arg details. | ['Internal', 'handler', 'for', 'autocomplete', 'and', 'autocomplete_query', '.', 'See', 'each', 'method', 's', 'docs', 'for', 'arg', 'details', '.'] | train | https://github.com/googlemaps/google-maps-services-python/blob/7ed40b4d8df63479794c46ce29d03ed6083071d7/googlemaps/places.py#L517-L547 |
3,868 | harmsm/PyCmdMessenger | PyCmdMessenger/PyCmdMessenger.py | CmdMessenger._send_byte | def _send_byte(self,value):
"""
Convert a numerical value into an integer, then to a byte object. Check
bounds for byte.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > 255 or value < 0:
err = "Value {} exceeds the size of the board's byte.".format(value)
raise OverflowError(err)
return struct.pack("B",value) | python | def _send_byte(self,value):
"""
Convert a numerical value into an integer, then to a byte object. Check
bounds for byte.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > 255 or value < 0:
err = "Value {} exceeds the size of the board's byte.".format(value)
raise OverflowError(err)
return struct.pack("B",value) | ['def', '_send_byte', '(', 'self', ',', 'value', ')', ':', "# Coerce to int. This will throw a ValueError if the value can't", '# actually be converted.', 'if', 'type', '(', 'value', ')', '!=', 'int', ':', 'new_value', '=', 'int', '(', 'value', ')', 'if', 'self', '.', 'give_warnings', ':', 'w', '=', '"Coercing {} into int ({})"', '.', 'format', '(', 'value', ',', 'new_value', ')', 'warnings', '.', 'warn', '(', 'w', ',', 'Warning', ')', 'value', '=', 'new_value', '# Range check', 'if', 'value', '>', '255', 'or', 'value', '<', '0', ':', 'err', '=', '"Value {} exceeds the size of the board\'s byte."', '.', 'format', '(', 'value', ')', 'raise', 'OverflowError', '(', 'err', ')', 'return', 'struct', '.', 'pack', '(', '"B"', ',', 'value', ')'] | Convert a numerical value into an integer, then to a byte object. Check
bounds for byte. | ['Convert', 'a', 'numerical', 'value', 'into', 'an', 'integer', 'then', 'to', 'a', 'byte', 'object', '.', 'Check', 'bounds', 'for', 'byte', '.'] | train | https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L341-L362 |
3,869 | manahl/arctic | arctic/chunkstore/chunkstore.py | ChunkStore.__update | def __update(self, sym, item, metadata=None, combine_method=None, chunk_range=None, audit=None):
'''
helper method used by update and append since they very closely
resemble eachother. Really differ only by the combine method.
append will combine existing date with new data (within a chunk),
whereas update will replace existing data with new data (within a
chunk).
'''
if not isinstance(item, (DataFrame, Series)):
raise Exception("Can only chunk DataFrames and Series")
self._arctic_lib.check_quota()
symbol = sym[SYMBOL]
if chunk_range is not None:
self.delete(symbol, chunk_range)
sym = self._get_symbol_info(symbol)
ops = []
meta_ops = []
chunker = CHUNKER_MAP[sym[CHUNKER]]
appended = 0
new_chunks = 0
for start, end, _, record in chunker.to_chunks(item, chunk_size=sym[CHUNK_SIZE]):
# read out matching chunks
df = self.read(symbol, chunk_range=chunker.to_range(start, end), filter_data=False)
# assuming they exist, update them and store the original chunk
# range for later use
if len(df) > 0:
record = combine_method(df, record)
if record is None or record.equals(df):
continue
sym[APPEND_COUNT] += len(record) - len(df)
appended += len(record) - len(df)
sym[LEN] += len(record) - len(df)
else:
sym[CHUNK_COUNT] += 1
new_chunks += 1
sym[LEN] += len(record)
data = SER_MAP[sym[SERIALIZER]].serialize(record)
meta = data[METADATA]
chunk_count = int(len(data[DATA]) / MAX_CHUNK_SIZE + 1)
seg_count = mongo_count(self._collection, filter={SYMBOL: symbol, START: start, END: end})
# remove old segments for this chunk in case we now have less
# segments than we did before
if seg_count > chunk_count:
self._collection.delete_many({SYMBOL: symbol,
START: start,
END: end,
SEGMENT: {'$gte': chunk_count}})
for i in xrange(chunk_count):
chunk = {DATA: Binary(data[DATA][i * MAX_CHUNK_SIZE: (i + 1) * MAX_CHUNK_SIZE])}
chunk[SEGMENT] = i
chunk[START] = start
chunk[END] = end
chunk[SYMBOL] = symbol
dates = [chunker.chunk_to_str(start), chunker.chunk_to_str(end), str(chunk[SEGMENT]).encode('ascii')]
sha = self._checksum(dates, data[DATA])
chunk[SHA] = sha
ops.append(pymongo.UpdateOne({SYMBOL: symbol,
START: start,
END: end,
SEGMENT: chunk[SEGMENT]},
{'$set': chunk}, upsert=True))
meta_ops.append(pymongo.UpdateOne({SYMBOL: symbol,
START: start,
END: end},
{'$set': meta}, upsert=True))
if ops:
self._collection.bulk_write(ops, ordered=False)
self._mdata.bulk_write(meta_ops, ordered=False)
sym[USERMETA] = metadata
self._symbols.replace_one({SYMBOL: symbol}, sym)
if audit is not None:
if new_chunks > 0:
audit['new_chunks'] = new_chunks
if appended > 0:
audit['appended_rows'] = appended
self._audit.insert_one(audit) | python | def __update(self, sym, item, metadata=None, combine_method=None, chunk_range=None, audit=None):
'''
helper method used by update and append since they very closely
resemble eachother. Really differ only by the combine method.
append will combine existing date with new data (within a chunk),
whereas update will replace existing data with new data (within a
chunk).
'''
if not isinstance(item, (DataFrame, Series)):
raise Exception("Can only chunk DataFrames and Series")
self._arctic_lib.check_quota()
symbol = sym[SYMBOL]
if chunk_range is not None:
self.delete(symbol, chunk_range)
sym = self._get_symbol_info(symbol)
ops = []
meta_ops = []
chunker = CHUNKER_MAP[sym[CHUNKER]]
appended = 0
new_chunks = 0
for start, end, _, record in chunker.to_chunks(item, chunk_size=sym[CHUNK_SIZE]):
# read out matching chunks
df = self.read(symbol, chunk_range=chunker.to_range(start, end), filter_data=False)
# assuming they exist, update them and store the original chunk
# range for later use
if len(df) > 0:
record = combine_method(df, record)
if record is None or record.equals(df):
continue
sym[APPEND_COUNT] += len(record) - len(df)
appended += len(record) - len(df)
sym[LEN] += len(record) - len(df)
else:
sym[CHUNK_COUNT] += 1
new_chunks += 1
sym[LEN] += len(record)
data = SER_MAP[sym[SERIALIZER]].serialize(record)
meta = data[METADATA]
chunk_count = int(len(data[DATA]) / MAX_CHUNK_SIZE + 1)
seg_count = mongo_count(self._collection, filter={SYMBOL: symbol, START: start, END: end})
# remove old segments for this chunk in case we now have less
# segments than we did before
if seg_count > chunk_count:
self._collection.delete_many({SYMBOL: symbol,
START: start,
END: end,
SEGMENT: {'$gte': chunk_count}})
for i in xrange(chunk_count):
chunk = {DATA: Binary(data[DATA][i * MAX_CHUNK_SIZE: (i + 1) * MAX_CHUNK_SIZE])}
chunk[SEGMENT] = i
chunk[START] = start
chunk[END] = end
chunk[SYMBOL] = symbol
dates = [chunker.chunk_to_str(start), chunker.chunk_to_str(end), str(chunk[SEGMENT]).encode('ascii')]
sha = self._checksum(dates, data[DATA])
chunk[SHA] = sha
ops.append(pymongo.UpdateOne({SYMBOL: symbol,
START: start,
END: end,
SEGMENT: chunk[SEGMENT]},
{'$set': chunk}, upsert=True))
meta_ops.append(pymongo.UpdateOne({SYMBOL: symbol,
START: start,
END: end},
{'$set': meta}, upsert=True))
if ops:
self._collection.bulk_write(ops, ordered=False)
self._mdata.bulk_write(meta_ops, ordered=False)
sym[USERMETA] = metadata
self._symbols.replace_one({SYMBOL: symbol}, sym)
if audit is not None:
if new_chunks > 0:
audit['new_chunks'] = new_chunks
if appended > 0:
audit['appended_rows'] = appended
self._audit.insert_one(audit) | ['def', '__update', '(', 'self', ',', 'sym', ',', 'item', ',', 'metadata', '=', 'None', ',', 'combine_method', '=', 'None', ',', 'chunk_range', '=', 'None', ',', 'audit', '=', 'None', ')', ':', 'if', 'not', 'isinstance', '(', 'item', ',', '(', 'DataFrame', ',', 'Series', ')', ')', ':', 'raise', 'Exception', '(', '"Can only chunk DataFrames and Series"', ')', 'self', '.', '_arctic_lib', '.', 'check_quota', '(', ')', 'symbol', '=', 'sym', '[', 'SYMBOL', ']', 'if', 'chunk_range', 'is', 'not', 'None', ':', 'self', '.', 'delete', '(', 'symbol', ',', 'chunk_range', ')', 'sym', '=', 'self', '.', '_get_symbol_info', '(', 'symbol', ')', 'ops', '=', '[', ']', 'meta_ops', '=', '[', ']', 'chunker', '=', 'CHUNKER_MAP', '[', 'sym', '[', 'CHUNKER', ']', ']', 'appended', '=', '0', 'new_chunks', '=', '0', 'for', 'start', ',', 'end', ',', '_', ',', 'record', 'in', 'chunker', '.', 'to_chunks', '(', 'item', ',', 'chunk_size', '=', 'sym', '[', 'CHUNK_SIZE', ']', ')', ':', '# read out matching chunks', 'df', '=', 'self', '.', 'read', '(', 'symbol', ',', 'chunk_range', '=', 'chunker', '.', 'to_range', '(', 'start', ',', 'end', ')', ',', 'filter_data', '=', 'False', ')', '# assuming they exist, update them and store the original chunk', '# range for later use', 'if', 'len', '(', 'df', ')', '>', '0', ':', 'record', '=', 'combine_method', '(', 'df', ',', 'record', ')', 'if', 'record', 'is', 'None', 'or', 'record', '.', 'equals', '(', 'df', ')', ':', 'continue', 'sym', '[', 'APPEND_COUNT', ']', '+=', 'len', '(', 'record', ')', '-', 'len', '(', 'df', ')', 'appended', '+=', 'len', '(', 'record', ')', '-', 'len', '(', 'df', ')', 'sym', '[', 'LEN', ']', '+=', 'len', '(', 'record', ')', '-', 'len', '(', 'df', ')', 'else', ':', 'sym', '[', 'CHUNK_COUNT', ']', '+=', '1', 'new_chunks', '+=', '1', 'sym', '[', 'LEN', ']', '+=', 'len', '(', 'record', ')', 'data', '=', 'SER_MAP', '[', 'sym', '[', 'SERIALIZER', ']', ']', '.', 'serialize', '(', 'record', ')', 'meta', '=', 'data', '[', 'METADATA', ']', 'chunk_count', '=', 'int', '(', 'len', '(', 'data', '[', 'DATA', ']', ')', '/', 'MAX_CHUNK_SIZE', '+', '1', ')', 'seg_count', '=', 'mongo_count', '(', 'self', '.', '_collection', ',', 'filter', '=', '{', 'SYMBOL', ':', 'symbol', ',', 'START', ':', 'start', ',', 'END', ':', 'end', '}', ')', '# remove old segments for this chunk in case we now have less', '# segments than we did before', 'if', 'seg_count', '>', 'chunk_count', ':', 'self', '.', '_collection', '.', 'delete_many', '(', '{', 'SYMBOL', ':', 'symbol', ',', 'START', ':', 'start', ',', 'END', ':', 'end', ',', 'SEGMENT', ':', '{', "'$gte'", ':', 'chunk_count', '}', '}', ')', 'for', 'i', 'in', 'xrange', '(', 'chunk_count', ')', ':', 'chunk', '=', '{', 'DATA', ':', 'Binary', '(', 'data', '[', 'DATA', ']', '[', 'i', '*', 'MAX_CHUNK_SIZE', ':', '(', 'i', '+', '1', ')', '*', 'MAX_CHUNK_SIZE', ']', ')', '}', 'chunk', '[', 'SEGMENT', ']', '=', 'i', 'chunk', '[', 'START', ']', '=', 'start', 'chunk', '[', 'END', ']', '=', 'end', 'chunk', '[', 'SYMBOL', ']', '=', 'symbol', 'dates', '=', '[', 'chunker', '.', 'chunk_to_str', '(', 'start', ')', ',', 'chunker', '.', 'chunk_to_str', '(', 'end', ')', ',', 'str', '(', 'chunk', '[', 'SEGMENT', ']', ')', '.', 'encode', '(', "'ascii'", ')', ']', 'sha', '=', 'self', '.', '_checksum', '(', 'dates', ',', 'data', '[', 'DATA', ']', ')', 'chunk', '[', 'SHA', ']', '=', 'sha', 'ops', '.', 'append', '(', 'pymongo', '.', 'UpdateOne', '(', '{', 'SYMBOL', ':', 'symbol', ',', 'START', ':', 'start', ',', 'END', ':', 'end', ',', 'SEGMENT', ':', 'chunk', '[', 'SEGMENT', ']', '}', ',', '{', "'$set'", ':', 'chunk', '}', ',', 'upsert', '=', 'True', ')', ')', 'meta_ops', '.', 'append', '(', 'pymongo', '.', 'UpdateOne', '(', '{', 'SYMBOL', ':', 'symbol', ',', 'START', ':', 'start', ',', 'END', ':', 'end', '}', ',', '{', "'$set'", ':', 'meta', '}', ',', 'upsert', '=', 'True', ')', ')', 'if', 'ops', ':', 'self', '.', '_collection', '.', 'bulk_write', '(', 'ops', ',', 'ordered', '=', 'False', ')', 'self', '.', '_mdata', '.', 'bulk_write', '(', 'meta_ops', ',', 'ordered', '=', 'False', ')', 'sym', '[', 'USERMETA', ']', '=', 'metadata', 'self', '.', '_symbols', '.', 'replace_one', '(', '{', 'SYMBOL', ':', 'symbol', '}', ',', 'sym', ')', 'if', 'audit', 'is', 'not', 'None', ':', 'if', 'new_chunks', '>', '0', ':', 'audit', '[', "'new_chunks'", ']', '=', 'new_chunks', 'if', 'appended', '>', '0', ':', 'audit', '[', "'appended_rows'", ']', '=', 'appended', 'self', '.', '_audit', '.', 'insert_one', '(', 'audit', ')'] | helper method used by update and append since they very closely
resemble eachother. Really differ only by the combine method.
append will combine existing date with new data (within a chunk),
whereas update will replace existing data with new data (within a
chunk). | ['helper', 'method', 'used', 'by', 'update', 'and', 'append', 'since', 'they', 'very', 'closely', 'resemble', 'eachother', '.', 'Really', 'differ', 'only', 'by', 'the', 'combine', 'method', '.', 'append', 'will', 'combine', 'existing', 'date', 'with', 'new', 'data', '(', 'within', 'a', 'chunk', ')', 'whereas', 'update', 'will', 'replace', 'existing', 'data', 'with', 'new', 'data', '(', 'within', 'a', 'chunk', ')', '.'] | train | https://github.com/manahl/arctic/blob/57e110b6e182dbab00e7e214dc26f7d9ec47c120/arctic/chunkstore/chunkstore.py#L406-L491 |
3,870 | brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_vlan.py | brocade_vlan.vlan_classifier_group_rule_name | def vlan_classifier_group_rule_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vlan = ET.SubElement(config, "vlan", xmlns="urn:brocade.com:mgmt:brocade-vlan")
classifier = ET.SubElement(vlan, "classifier")
group = ET.SubElement(classifier, "group")
groupid_key = ET.SubElement(group, "groupid")
groupid_key.text = kwargs.pop('groupid')
oper_key = ET.SubElement(group, "oper")
oper_key.text = kwargs.pop('oper')
ruleid_key = ET.SubElement(group, "ruleid")
ruleid_key.text = kwargs.pop('ruleid')
rule_name = ET.SubElement(group, "rule-name")
rule_name.text = kwargs.pop('rule_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | python | def vlan_classifier_group_rule_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vlan = ET.SubElement(config, "vlan", xmlns="urn:brocade.com:mgmt:brocade-vlan")
classifier = ET.SubElement(vlan, "classifier")
group = ET.SubElement(classifier, "group")
groupid_key = ET.SubElement(group, "groupid")
groupid_key.text = kwargs.pop('groupid')
oper_key = ET.SubElement(group, "oper")
oper_key.text = kwargs.pop('oper')
ruleid_key = ET.SubElement(group, "ruleid")
ruleid_key.text = kwargs.pop('ruleid')
rule_name = ET.SubElement(group, "rule-name")
rule_name.text = kwargs.pop('rule_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | ['def', 'vlan_classifier_group_rule_name', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'vlan', '=', 'ET', '.', 'SubElement', '(', 'config', ',', '"vlan"', ',', 'xmlns', '=', '"urn:brocade.com:mgmt:brocade-vlan"', ')', 'classifier', '=', 'ET', '.', 'SubElement', '(', 'vlan', ',', '"classifier"', ')', 'group', '=', 'ET', '.', 'SubElement', '(', 'classifier', ',', '"group"', ')', 'groupid_key', '=', 'ET', '.', 'SubElement', '(', 'group', ',', '"groupid"', ')', 'groupid_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'groupid'", ')', 'oper_key', '=', 'ET', '.', 'SubElement', '(', 'group', ',', '"oper"', ')', 'oper_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'oper'", ')', 'ruleid_key', '=', 'ET', '.', 'SubElement', '(', 'group', ',', '"ruleid"', ')', 'ruleid_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'ruleid'", ')', 'rule_name', '=', 'ET', '.', 'SubElement', '(', 'group', ',', '"rule-name"', ')', 'rule_name', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'rule_name'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')'] | Auto Generated Code | ['Auto', 'Generated', 'Code'] | train | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_vlan.py#L117-L134 |
3,871 | mdsol/rwslib | rwslib/extras/audit_event/parser.py | parse | def parse(data, eventer):
"""Parse the XML data, firing events from the eventer"""
parser = etree.XMLParser(target=ODMTargetParser(eventer))
return etree.XML(data, parser) | python | def parse(data, eventer):
"""Parse the XML data, firing events from the eventer"""
parser = etree.XMLParser(target=ODMTargetParser(eventer))
return etree.XML(data, parser) | ['def', 'parse', '(', 'data', ',', 'eventer', ')', ':', 'parser', '=', 'etree', '.', 'XMLParser', '(', 'target', '=', 'ODMTargetParser', '(', 'eventer', ')', ')', 'return', 'etree', '.', 'XML', '(', 'data', ',', 'parser', ')'] | Parse the XML data, firing events from the eventer | ['Parse', 'the', 'XML', 'data', 'firing', 'events', 'from', 'the', 'eventer'] | train | https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/extras/audit_event/parser.py#L284-L287 |
3,872 | titusjan/argos | argos/repo/detailplugins/attr.py | AttributesPane._drawContents | def _drawContents(self, currentRti=None):
""" Draws the attributes of the currentRTI
"""
#logger.debug("_drawContents: {}".format(currentRti))
table = self.table
table.setUpdatesEnabled(False)
try:
table.clearContents()
verticalHeader = table.verticalHeader()
verticalHeader.setSectionResizeMode(QtWidgets.QHeaderView.Fixed)
attributes = currentRti.attributes if currentRti is not None else {}
table.setRowCount(len(attributes))
for row, (attrName, attrValue) in enumerate(sorted(attributes.items())):
attrStr = to_string(attrValue, decode_bytes='utf-8')
try:
type_str = type_name(attrValue)
except Exception as ex:
logger.exception(ex)
type_str = "<???>"
nameItem = QtWidgets.QTableWidgetItem(attrName)
nameItem.setToolTip(attrName)
table.setItem(row, self.COL_ATTR_NAME, nameItem)
valItem = QtWidgets.QTableWidgetItem(attrStr)
valItem.setToolTip(attrStr)
table.setItem(row, self.COL_VALUE, valItem)
table.setItem(row, self.COL_ELEM_TYPE, QtWidgets.QTableWidgetItem(type_str))
table.resizeRowToContents(row)
verticalHeader.setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
finally:
table.setUpdatesEnabled(True) | python | def _drawContents(self, currentRti=None):
""" Draws the attributes of the currentRTI
"""
#logger.debug("_drawContents: {}".format(currentRti))
table = self.table
table.setUpdatesEnabled(False)
try:
table.clearContents()
verticalHeader = table.verticalHeader()
verticalHeader.setSectionResizeMode(QtWidgets.QHeaderView.Fixed)
attributes = currentRti.attributes if currentRti is not None else {}
table.setRowCount(len(attributes))
for row, (attrName, attrValue) in enumerate(sorted(attributes.items())):
attrStr = to_string(attrValue, decode_bytes='utf-8')
try:
type_str = type_name(attrValue)
except Exception as ex:
logger.exception(ex)
type_str = "<???>"
nameItem = QtWidgets.QTableWidgetItem(attrName)
nameItem.setToolTip(attrName)
table.setItem(row, self.COL_ATTR_NAME, nameItem)
valItem = QtWidgets.QTableWidgetItem(attrStr)
valItem.setToolTip(attrStr)
table.setItem(row, self.COL_VALUE, valItem)
table.setItem(row, self.COL_ELEM_TYPE, QtWidgets.QTableWidgetItem(type_str))
table.resizeRowToContents(row)
verticalHeader.setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
finally:
table.setUpdatesEnabled(True) | ['def', '_drawContents', '(', 'self', ',', 'currentRti', '=', 'None', ')', ':', '#logger.debug("_drawContents: {}".format(currentRti))', 'table', '=', 'self', '.', 'table', 'table', '.', 'setUpdatesEnabled', '(', 'False', ')', 'try', ':', 'table', '.', 'clearContents', '(', ')', 'verticalHeader', '=', 'table', '.', 'verticalHeader', '(', ')', 'verticalHeader', '.', 'setSectionResizeMode', '(', 'QtWidgets', '.', 'QHeaderView', '.', 'Fixed', ')', 'attributes', '=', 'currentRti', '.', 'attributes', 'if', 'currentRti', 'is', 'not', 'None', 'else', '{', '}', 'table', '.', 'setRowCount', '(', 'len', '(', 'attributes', ')', ')', 'for', 'row', ',', '(', 'attrName', ',', 'attrValue', ')', 'in', 'enumerate', '(', 'sorted', '(', 'attributes', '.', 'items', '(', ')', ')', ')', ':', 'attrStr', '=', 'to_string', '(', 'attrValue', ',', 'decode_bytes', '=', "'utf-8'", ')', 'try', ':', 'type_str', '=', 'type_name', '(', 'attrValue', ')', 'except', 'Exception', 'as', 'ex', ':', 'logger', '.', 'exception', '(', 'ex', ')', 'type_str', '=', '"<???>"', 'nameItem', '=', 'QtWidgets', '.', 'QTableWidgetItem', '(', 'attrName', ')', 'nameItem', '.', 'setToolTip', '(', 'attrName', ')', 'table', '.', 'setItem', '(', 'row', ',', 'self', '.', 'COL_ATTR_NAME', ',', 'nameItem', ')', 'valItem', '=', 'QtWidgets', '.', 'QTableWidgetItem', '(', 'attrStr', ')', 'valItem', '.', 'setToolTip', '(', 'attrStr', ')', 'table', '.', 'setItem', '(', 'row', ',', 'self', '.', 'COL_VALUE', ',', 'valItem', ')', 'table', '.', 'setItem', '(', 'row', ',', 'self', '.', 'COL_ELEM_TYPE', ',', 'QtWidgets', '.', 'QTableWidgetItem', '(', 'type_str', ')', ')', 'table', '.', 'resizeRowToContents', '(', 'row', ')', 'verticalHeader', '.', 'setSectionResizeMode', '(', 'QtWidgets', '.', 'QHeaderView', '.', 'ResizeToContents', ')', 'finally', ':', 'table', '.', 'setUpdatesEnabled', '(', 'True', ')'] | Draws the attributes of the currentRTI | ['Draws', 'the', 'attributes', 'of', 'the', 'currentRTI'] | train | https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/repo/detailplugins/attr.py#L51-L86 |
3,873 | ungarj/tilematrix | tilematrix/tmx/main.py | tile | def tile(ctx, point, zoom):
"""Print Tile containing POINT.."""
tile = TilePyramid(
ctx.obj['grid'],
tile_size=ctx.obj['tile_size'],
metatiling=ctx.obj['metatiling']
).tile_from_xy(*point, zoom=zoom)
if ctx.obj['output_format'] == 'Tile':
click.echo('%s %s %s' % tile.id)
elif ctx.obj['output_format'] == 'WKT':
click.echo(tile.bbox(pixelbuffer=ctx.obj['pixelbuffer']))
elif ctx.obj['output_format'] == 'GeoJSON':
click.echo(
geojson.dumps(
geojson.FeatureCollection([
geojson.Feature(
geometry=tile.bbox(pixelbuffer=ctx.obj['pixelbuffer']),
properties=dict(
zoom=tile.zoom,
row=tile.row,
col=tile.col
)
)
])
)
) | python | def tile(ctx, point, zoom):
"""Print Tile containing POINT.."""
tile = TilePyramid(
ctx.obj['grid'],
tile_size=ctx.obj['tile_size'],
metatiling=ctx.obj['metatiling']
).tile_from_xy(*point, zoom=zoom)
if ctx.obj['output_format'] == 'Tile':
click.echo('%s %s %s' % tile.id)
elif ctx.obj['output_format'] == 'WKT':
click.echo(tile.bbox(pixelbuffer=ctx.obj['pixelbuffer']))
elif ctx.obj['output_format'] == 'GeoJSON':
click.echo(
geojson.dumps(
geojson.FeatureCollection([
geojson.Feature(
geometry=tile.bbox(pixelbuffer=ctx.obj['pixelbuffer']),
properties=dict(
zoom=tile.zoom,
row=tile.row,
col=tile.col
)
)
])
)
) | ['def', 'tile', '(', 'ctx', ',', 'point', ',', 'zoom', ')', ':', 'tile', '=', 'TilePyramid', '(', 'ctx', '.', 'obj', '[', "'grid'", ']', ',', 'tile_size', '=', 'ctx', '.', 'obj', '[', "'tile_size'", ']', ',', 'metatiling', '=', 'ctx', '.', 'obj', '[', "'metatiling'", ']', ')', '.', 'tile_from_xy', '(', '*', 'point', ',', 'zoom', '=', 'zoom', ')', 'if', 'ctx', '.', 'obj', '[', "'output_format'", ']', '==', "'Tile'", ':', 'click', '.', 'echo', '(', "'%s %s %s'", '%', 'tile', '.', 'id', ')', 'elif', 'ctx', '.', 'obj', '[', "'output_format'", ']', '==', "'WKT'", ':', 'click', '.', 'echo', '(', 'tile', '.', 'bbox', '(', 'pixelbuffer', '=', 'ctx', '.', 'obj', '[', "'pixelbuffer'", ']', ')', ')', 'elif', 'ctx', '.', 'obj', '[', "'output_format'", ']', '==', "'GeoJSON'", ':', 'click', '.', 'echo', '(', 'geojson', '.', 'dumps', '(', 'geojson', '.', 'FeatureCollection', '(', '[', 'geojson', '.', 'Feature', '(', 'geometry', '=', 'tile', '.', 'bbox', '(', 'pixelbuffer', '=', 'ctx', '.', 'obj', '[', "'pixelbuffer'", ']', ')', ',', 'properties', '=', 'dict', '(', 'zoom', '=', 'tile', '.', 'zoom', ',', 'row', '=', 'tile', '.', 'row', ',', 'col', '=', 'tile', '.', 'col', ')', ')', ']', ')', ')', ')'] | Print Tile containing POINT.. | ['Print', 'Tile', 'containing', 'POINT', '..'] | train | https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/tmx/main.py#L71-L96 |
3,874 | gem/oq-engine | openquake/hazardlib/near_fault.py | directp | def directp(node0, node1, node2, node3, hypocenter, reference, pp):
"""
Get the Direct Point and the corresponding E-path as described in
Spudich et al. (2013). This method also provides a logical variable
stating if the DPP calculation must consider the neighbouring patch.
To define the intersection point(Pd) of PpPh line segment and fault plane,
we obtain the intersection points(Pd) with each side of fault plan, and
check which intersection point(Pd) is the one fitting the definition in
the Chiou and Spudich(2014) directivity model.
Two possible locations for Pd, the first case, Pd locates on the side of
the fault patch when Pp is not inside the fault patch. The second case is
when Pp is inside the fault patch, then Pd=Pp.
For the first case, it follows three conditions:
1. the PpPh and PdPh line vector are the same,
2. PpPh >= PdPh,
3. Pd is not inside the fault patch.
If we can not find solution for all the four possible intersection points
for the first case, we check if the intersection point fit the second case
by checking if Pp is inside the fault patch.
Because of the coordinate system mapping(from geographic system to
Catestian system), we allow an error when we check the location. The allow
error will keep increasing after each loop when no solution in the two
cases are found, until the solution get obtained.
:param node0:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment.
:param node1:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment. Note, the order should be clockwise.
:param node2:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment. Note, the order should be clockwise.
:param node3:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment. Note, the order should be clockwise.
:param hypocenter:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of floating hypocenter on each segment
calculation. In the method, we take the direction point of the
previous fault patch as hypocentre for the current fault patch.
:param reference:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of reference point for projection
:param pp:
the projection of the site onto the plane containing the fault
slipped area. A numpy array.
:returns:
Pd, a numpy array, representing the location of direction point
E, the distance from direction point to hypocentre.
go_next_patch, flag indicates if the calculation goes on the next
fault patch. 1: yes, 0: no.
"""
# Find the intersection point Pd, by checking if the PdPh share the
# same vector with PpPh, and PpPh >= PdPh
# Transform to xyz coordinate
node0_xyz = get_xyz_from_ll(node0, reference)
node1_xyz = get_xyz_from_ll(node1, reference)
node2_xyz = get_xyz_from_ll(node2, reference)
node3_xyz = get_xyz_from_ll(node3, reference)
hypocenter_xyz = get_xyz_from_ll(hypocenter, reference)
hypocenter_xyz = np.array(hypocenter_xyz).flatten()
pp_xyz = pp
e = []
# Loop each segments on the patch to find Pd
segment_s = [node0_xyz, node1_xyz, node2_xyz, node3_xyz]
segment_e = [node1_xyz, node2_xyz, node3_xyz, node0_xyz]
# set buffering bu
buf = 0.0001
atol = 0.0001
loop = True
exit_flag = False
looptime = 0.
while loop:
x_min = np.min(np.array([node0_xyz[0], node1_xyz[0], node2_xyz[0],
node3_xyz[0]])) - buf
x_max = np.max(np.array([node0_xyz[0], node1_xyz[0], node2_xyz[0],
node3_xyz[0]])) + buf
y_min = np.min(np.array([node0_xyz[1], node1_xyz[1], node2_xyz[1],
node3_xyz[1]])) - buf
y_max = np.max(np.array([node0_xyz[1], node1_xyz[1], node2_xyz[1],
node3_xyz[1]])) + buf
n_seg = 0
exit_flag = False
for (seg_s, seg_e) in zip(segment_s, segment_e):
seg_s = np.array(seg_s).flatten()
seg_e = np.array(seg_e).flatten()
p_intersect, vector1, vector2, vector3, vector4 = _intersection(
seg_s, seg_e, pp_xyz, hypocenter_xyz)
ppph = dst.pdist([pp, hypocenter_xyz])
pdph = dst.pdist([p_intersect.flatten(), hypocenter_xyz])
n_seg = n_seg + 1
# Check that the direction of the hyp-pp and hyp-pd vectors
# have are the same.
if (np.allclose(vector1.flatten(), vector2,
atol=atol, rtol=0.)):
if ((np.allclose(vector3.flatten(), vector4, atol=atol,
rtol=0.))):
# Check if ppph >= pdph.
if (ppph >= pdph):
if (p_intersect[0] >= x_min) & (p_intersect[0] <=
x_max):
if (p_intersect[1] >= y_min) & (p_intersect[1]
<= y_max):
e = pdph
pd = p_intersect
exit_flag = True
break
# when the pp located within the fault rupture plane, e = ppph
if not e:
if (pp_xyz[0] >= x_min) & (pp_xyz[0] <= x_max):
if (pp_xyz[1] >= y_min) & (pp_xyz[1] <= y_max):
pd = pp_xyz
e = ppph
exit_flag = True
if exit_flag:
break
if not e:
looptime += 1
atol = 0.0001 * looptime
buf = 0.0001 * looptime
# if pd is located at 2nd fault segment, then the DPP calculation will
# keep going on the next fault patch
if n_seg == 2:
go_next_patch = True
else:
go_next_patch = False
return pd, e, go_next_patch | python | def directp(node0, node1, node2, node3, hypocenter, reference, pp):
"""
Get the Direct Point and the corresponding E-path as described in
Spudich et al. (2013). This method also provides a logical variable
stating if the DPP calculation must consider the neighbouring patch.
To define the intersection point(Pd) of PpPh line segment and fault plane,
we obtain the intersection points(Pd) with each side of fault plan, and
check which intersection point(Pd) is the one fitting the definition in
the Chiou and Spudich(2014) directivity model.
Two possible locations for Pd, the first case, Pd locates on the side of
the fault patch when Pp is not inside the fault patch. The second case is
when Pp is inside the fault patch, then Pd=Pp.
For the first case, it follows three conditions:
1. the PpPh and PdPh line vector are the same,
2. PpPh >= PdPh,
3. Pd is not inside the fault patch.
If we can not find solution for all the four possible intersection points
for the first case, we check if the intersection point fit the second case
by checking if Pp is inside the fault patch.
Because of the coordinate system mapping(from geographic system to
Catestian system), we allow an error when we check the location. The allow
error will keep increasing after each loop when no solution in the two
cases are found, until the solution get obtained.
:param node0:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment.
:param node1:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment. Note, the order should be clockwise.
:param node2:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment. Note, the order should be clockwise.
:param node3:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment. Note, the order should be clockwise.
:param hypocenter:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of floating hypocenter on each segment
calculation. In the method, we take the direction point of the
previous fault patch as hypocentre for the current fault patch.
:param reference:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of reference point for projection
:param pp:
the projection of the site onto the plane containing the fault
slipped area. A numpy array.
:returns:
Pd, a numpy array, representing the location of direction point
E, the distance from direction point to hypocentre.
go_next_patch, flag indicates if the calculation goes on the next
fault patch. 1: yes, 0: no.
"""
# Find the intersection point Pd, by checking if the PdPh share the
# same vector with PpPh, and PpPh >= PdPh
# Transform to xyz coordinate
node0_xyz = get_xyz_from_ll(node0, reference)
node1_xyz = get_xyz_from_ll(node1, reference)
node2_xyz = get_xyz_from_ll(node2, reference)
node3_xyz = get_xyz_from_ll(node3, reference)
hypocenter_xyz = get_xyz_from_ll(hypocenter, reference)
hypocenter_xyz = np.array(hypocenter_xyz).flatten()
pp_xyz = pp
e = []
# Loop each segments on the patch to find Pd
segment_s = [node0_xyz, node1_xyz, node2_xyz, node3_xyz]
segment_e = [node1_xyz, node2_xyz, node3_xyz, node0_xyz]
# set buffering bu
buf = 0.0001
atol = 0.0001
loop = True
exit_flag = False
looptime = 0.
while loop:
x_min = np.min(np.array([node0_xyz[0], node1_xyz[0], node2_xyz[0],
node3_xyz[0]])) - buf
x_max = np.max(np.array([node0_xyz[0], node1_xyz[0], node2_xyz[0],
node3_xyz[0]])) + buf
y_min = np.min(np.array([node0_xyz[1], node1_xyz[1], node2_xyz[1],
node3_xyz[1]])) - buf
y_max = np.max(np.array([node0_xyz[1], node1_xyz[1], node2_xyz[1],
node3_xyz[1]])) + buf
n_seg = 0
exit_flag = False
for (seg_s, seg_e) in zip(segment_s, segment_e):
seg_s = np.array(seg_s).flatten()
seg_e = np.array(seg_e).flatten()
p_intersect, vector1, vector2, vector3, vector4 = _intersection(
seg_s, seg_e, pp_xyz, hypocenter_xyz)
ppph = dst.pdist([pp, hypocenter_xyz])
pdph = dst.pdist([p_intersect.flatten(), hypocenter_xyz])
n_seg = n_seg + 1
# Check that the direction of the hyp-pp and hyp-pd vectors
# have are the same.
if (np.allclose(vector1.flatten(), vector2,
atol=atol, rtol=0.)):
if ((np.allclose(vector3.flatten(), vector4, atol=atol,
rtol=0.))):
# Check if ppph >= pdph.
if (ppph >= pdph):
if (p_intersect[0] >= x_min) & (p_intersect[0] <=
x_max):
if (p_intersect[1] >= y_min) & (p_intersect[1]
<= y_max):
e = pdph
pd = p_intersect
exit_flag = True
break
# when the pp located within the fault rupture plane, e = ppph
if not e:
if (pp_xyz[0] >= x_min) & (pp_xyz[0] <= x_max):
if (pp_xyz[1] >= y_min) & (pp_xyz[1] <= y_max):
pd = pp_xyz
e = ppph
exit_flag = True
if exit_flag:
break
if not e:
looptime += 1
atol = 0.0001 * looptime
buf = 0.0001 * looptime
# if pd is located at 2nd fault segment, then the DPP calculation will
# keep going on the next fault patch
if n_seg == 2:
go_next_patch = True
else:
go_next_patch = False
return pd, e, go_next_patch | ['def', 'directp', '(', 'node0', ',', 'node1', ',', 'node2', ',', 'node3', ',', 'hypocenter', ',', 'reference', ',', 'pp', ')', ':', '# Find the intersection point Pd, by checking if the PdPh share the', '# same vector with PpPh, and PpPh >= PdPh', '# Transform to xyz coordinate', 'node0_xyz', '=', 'get_xyz_from_ll', '(', 'node0', ',', 'reference', ')', 'node1_xyz', '=', 'get_xyz_from_ll', '(', 'node1', ',', 'reference', ')', 'node2_xyz', '=', 'get_xyz_from_ll', '(', 'node2', ',', 'reference', ')', 'node3_xyz', '=', 'get_xyz_from_ll', '(', 'node3', ',', 'reference', ')', 'hypocenter_xyz', '=', 'get_xyz_from_ll', '(', 'hypocenter', ',', 'reference', ')', 'hypocenter_xyz', '=', 'np', '.', 'array', '(', 'hypocenter_xyz', ')', '.', 'flatten', '(', ')', 'pp_xyz', '=', 'pp', 'e', '=', '[', ']', '# Loop each segments on the patch to find Pd', 'segment_s', '=', '[', 'node0_xyz', ',', 'node1_xyz', ',', 'node2_xyz', ',', 'node3_xyz', ']', 'segment_e', '=', '[', 'node1_xyz', ',', 'node2_xyz', ',', 'node3_xyz', ',', 'node0_xyz', ']', '# set buffering bu', 'buf', '=', '0.0001', 'atol', '=', '0.0001', 'loop', '=', 'True', 'exit_flag', '=', 'False', 'looptime', '=', '0.', 'while', 'loop', ':', 'x_min', '=', 'np', '.', 'min', '(', 'np', '.', 'array', '(', '[', 'node0_xyz', '[', '0', ']', ',', 'node1_xyz', '[', '0', ']', ',', 'node2_xyz', '[', '0', ']', ',', 'node3_xyz', '[', '0', ']', ']', ')', ')', '-', 'buf', 'x_max', '=', 'np', '.', 'max', '(', 'np', '.', 'array', '(', '[', 'node0_xyz', '[', '0', ']', ',', 'node1_xyz', '[', '0', ']', ',', 'node2_xyz', '[', '0', ']', ',', 'node3_xyz', '[', '0', ']', ']', ')', ')', '+', 'buf', 'y_min', '=', 'np', '.', 'min', '(', 'np', '.', 'array', '(', '[', 'node0_xyz', '[', '1', ']', ',', 'node1_xyz', '[', '1', ']', ',', 'node2_xyz', '[', '1', ']', ',', 'node3_xyz', '[', '1', ']', ']', ')', ')', '-', 'buf', 'y_max', '=', 'np', '.', 'max', '(', 'np', '.', 'array', '(', '[', 'node0_xyz', '[', '1', ']', ',', 'node1_xyz', '[', '1', ']', ',', 'node2_xyz', '[', '1', ']', ',', 'node3_xyz', '[', '1', ']', ']', ')', ')', '+', 'buf', 'n_seg', '=', '0', 'exit_flag', '=', 'False', 'for', '(', 'seg_s', ',', 'seg_e', ')', 'in', 'zip', '(', 'segment_s', ',', 'segment_e', ')', ':', 'seg_s', '=', 'np', '.', 'array', '(', 'seg_s', ')', '.', 'flatten', '(', ')', 'seg_e', '=', 'np', '.', 'array', '(', 'seg_e', ')', '.', 'flatten', '(', ')', 'p_intersect', ',', 'vector1', ',', 'vector2', ',', 'vector3', ',', 'vector4', '=', '_intersection', '(', 'seg_s', ',', 'seg_e', ',', 'pp_xyz', ',', 'hypocenter_xyz', ')', 'ppph', '=', 'dst', '.', 'pdist', '(', '[', 'pp', ',', 'hypocenter_xyz', ']', ')', 'pdph', '=', 'dst', '.', 'pdist', '(', '[', 'p_intersect', '.', 'flatten', '(', ')', ',', 'hypocenter_xyz', ']', ')', 'n_seg', '=', 'n_seg', '+', '1', '# Check that the direction of the hyp-pp and hyp-pd vectors', '# have are the same.', 'if', '(', 'np', '.', 'allclose', '(', 'vector1', '.', 'flatten', '(', ')', ',', 'vector2', ',', 'atol', '=', 'atol', ',', 'rtol', '=', '0.', ')', ')', ':', 'if', '(', '(', 'np', '.', 'allclose', '(', 'vector3', '.', 'flatten', '(', ')', ',', 'vector4', ',', 'atol', '=', 'atol', ',', 'rtol', '=', '0.', ')', ')', ')', ':', '# Check if ppph >= pdph.', 'if', '(', 'ppph', '>=', 'pdph', ')', ':', 'if', '(', 'p_intersect', '[', '0', ']', '>=', 'x_min', ')', '&', '(', 'p_intersect', '[', '0', ']', '<=', 'x_max', ')', ':', 'if', '(', 'p_intersect', '[', '1', ']', '>=', 'y_min', ')', '&', '(', 'p_intersect', '[', '1', ']', '<=', 'y_max', ')', ':', 'e', '=', 'pdph', 'pd', '=', 'p_intersect', 'exit_flag', '=', 'True', 'break', '# when the pp located within the fault rupture plane, e = ppph', 'if', 'not', 'e', ':', 'if', '(', 'pp_xyz', '[', '0', ']', '>=', 'x_min', ')', '&', '(', 'pp_xyz', '[', '0', ']', '<=', 'x_max', ')', ':', 'if', '(', 'pp_xyz', '[', '1', ']', '>=', 'y_min', ')', '&', '(', 'pp_xyz', '[', '1', ']', '<=', 'y_max', ')', ':', 'pd', '=', 'pp_xyz', 'e', '=', 'ppph', 'exit_flag', '=', 'True', 'if', 'exit_flag', ':', 'break', 'if', 'not', 'e', ':', 'looptime', '+=', '1', 'atol', '=', '0.0001', '*', 'looptime', 'buf', '=', '0.0001', '*', 'looptime', '# if pd is located at 2nd fault segment, then the DPP calculation will', '# keep going on the next fault patch', 'if', 'n_seg', '==', '2', ':', 'go_next_patch', '=', 'True', 'else', ':', 'go_next_patch', '=', 'False', 'return', 'pd', ',', 'e', ',', 'go_next_patch'] | Get the Direct Point and the corresponding E-path as described in
Spudich et al. (2013). This method also provides a logical variable
stating if the DPP calculation must consider the neighbouring patch.
To define the intersection point(Pd) of PpPh line segment and fault plane,
we obtain the intersection points(Pd) with each side of fault plan, and
check which intersection point(Pd) is the one fitting the definition in
the Chiou and Spudich(2014) directivity model.
Two possible locations for Pd, the first case, Pd locates on the side of
the fault patch when Pp is not inside the fault patch. The second case is
when Pp is inside the fault patch, then Pd=Pp.
For the first case, it follows three conditions:
1. the PpPh and PdPh line vector are the same,
2. PpPh >= PdPh,
3. Pd is not inside the fault patch.
If we can not find solution for all the four possible intersection points
for the first case, we check if the intersection point fit the second case
by checking if Pp is inside the fault patch.
Because of the coordinate system mapping(from geographic system to
Catestian system), we allow an error when we check the location. The allow
error will keep increasing after each loop when no solution in the two
cases are found, until the solution get obtained.
:param node0:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment.
:param node1:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment. Note, the order should be clockwise.
:param node2:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment. Note, the order should be clockwise.
:param node3:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment. Note, the order should be clockwise.
:param hypocenter:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of floating hypocenter on each segment
calculation. In the method, we take the direction point of the
previous fault patch as hypocentre for the current fault patch.
:param reference:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of reference point for projection
:param pp:
the projection of the site onto the plane containing the fault
slipped area. A numpy array.
:returns:
Pd, a numpy array, representing the location of direction point
E, the distance from direction point to hypocentre.
go_next_patch, flag indicates if the calculation goes on the next
fault patch. 1: yes, 0: no. | ['Get', 'the', 'Direct', 'Point', 'and', 'the', 'corresponding', 'E', '-', 'path', 'as', 'described', 'in', 'Spudich', 'et', 'al', '.', '(', '2013', ')', '.', 'This', 'method', 'also', 'provides', 'a', 'logical', 'variable', 'stating', 'if', 'the', 'DPP', 'calculation', 'must', 'consider', 'the', 'neighbouring', 'patch', '.', 'To', 'define', 'the', 'intersection', 'point', '(', 'Pd', ')', 'of', 'PpPh', 'line', 'segment', 'and', 'fault', 'plane', 'we', 'obtain', 'the', 'intersection', 'points', '(', 'Pd', ')', 'with', 'each', 'side', 'of', 'fault', 'plan', 'and', 'check', 'which', 'intersection', 'point', '(', 'Pd', ')', 'is', 'the', 'one', 'fitting', 'the', 'definition', 'in', 'the', 'Chiou', 'and', 'Spudich', '(', '2014', ')', 'directivity', 'model', '.', 'Two', 'possible', 'locations', 'for', 'Pd', 'the', 'first', 'case', 'Pd', 'locates', 'on', 'the', 'side', 'of', 'the', 'fault', 'patch', 'when', 'Pp', 'is', 'not', 'inside', 'the', 'fault', 'patch', '.', 'The', 'second', 'case', 'is', 'when', 'Pp', 'is', 'inside', 'the', 'fault', 'patch', 'then', 'Pd', '=', 'Pp', '.'] | train | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/near_fault.py#L338-L484 |
3,875 | danilobellini/audiolazy | audiolazy/lazy_itertools.py | accumulate | def accumulate(iterable):
" Return series of accumulated sums. "
iterator = iter(iterable)
sum_data = next(iterator)
yield sum_data
for el in iterator:
sum_data += el
yield sum_data | python | def accumulate(iterable):
" Return series of accumulated sums. "
iterator = iter(iterable)
sum_data = next(iterator)
yield sum_data
for el in iterator:
sum_data += el
yield sum_data | ['def', 'accumulate', '(', 'iterable', ')', ':', 'iterator', '=', 'iter', '(', 'iterable', ')', 'sum_data', '=', 'next', '(', 'iterator', ')', 'yield', 'sum_data', 'for', 'el', 'in', 'iterator', ':', 'sum_data', '+=', 'el', 'yield', 'sum_data'] | Return series of accumulated sums. | ['Return', 'series', 'of', 'accumulated', 'sums', '.'] | train | https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/audiolazy/lazy_itertools.py#L69-L76 |
3,876 | JustinLovinger/optimal | optimal/algorithms/baseline.py | RandomReal._generate_solution | def _generate_solution(self):
"""Return a single random solution."""
return common.random_real_solution(
self._solution_size, self._lower_bounds, self._upper_bounds) | python | def _generate_solution(self):
"""Return a single random solution."""
return common.random_real_solution(
self._solution_size, self._lower_bounds, self._upper_bounds) | ['def', '_generate_solution', '(', 'self', ')', ':', 'return', 'common', '.', 'random_real_solution', '(', 'self', '.', '_solution_size', ',', 'self', '.', '_lower_bounds', ',', 'self', '.', '_upper_bounds', ')'] | Return a single random solution. | ['Return', 'a', 'single', 'random', 'solution', '.'] | train | https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/baseline.py#L108-L111 |
3,877 | resonai/ybt | yabt/buildcontext.py | BuildContext.register_target | def register_target(self, target: Target):
"""Register a `target` instance in this build context.
A registered target is saved in the `targets` map and in the
`targets_by_module` map, but is not added to the target graph until
target extraction is completed (thread safety considerations).
"""
if target.name in self.targets:
first = self.targets[target.name]
raise NameError(
'Target with name "{0.name}" ({0.builder_name} from module '
'"{1}") already exists - defined first as '
'{2.builder_name} in module "{3}"'.format(
target, split_build_module(target.name),
first, split_build_module(first.name)))
self.targets[target.name] = target
self.targets_by_module[split_build_module(target.name)].add(
target.name) | python | def register_target(self, target: Target):
"""Register a `target` instance in this build context.
A registered target is saved in the `targets` map and in the
`targets_by_module` map, but is not added to the target graph until
target extraction is completed (thread safety considerations).
"""
if target.name in self.targets:
first = self.targets[target.name]
raise NameError(
'Target with name "{0.name}" ({0.builder_name} from module '
'"{1}") already exists - defined first as '
'{2.builder_name} in module "{3}"'.format(
target, split_build_module(target.name),
first, split_build_module(first.name)))
self.targets[target.name] = target
self.targets_by_module[split_build_module(target.name)].add(
target.name) | ['def', 'register_target', '(', 'self', ',', 'target', ':', 'Target', ')', ':', 'if', 'target', '.', 'name', 'in', 'self', '.', 'targets', ':', 'first', '=', 'self', '.', 'targets', '[', 'target', '.', 'name', ']', 'raise', 'NameError', '(', '\'Target with name "{0.name}" ({0.builder_name} from module \'', '\'"{1}") already exists - defined first as \'', '\'{2.builder_name} in module "{3}"\'', '.', 'format', '(', 'target', ',', 'split_build_module', '(', 'target', '.', 'name', ')', ',', 'first', ',', 'split_build_module', '(', 'first', '.', 'name', ')', ')', ')', 'self', '.', 'targets', '[', 'target', '.', 'name', ']', '=', 'target', 'self', '.', 'targets_by_module', '[', 'split_build_module', '(', 'target', '.', 'name', ')', ']', '.', 'add', '(', 'target', '.', 'name', ')'] | Register a `target` instance in this build context.
A registered target is saved in the `targets` map and in the
`targets_by_module` map, but is not added to the target graph until
target extraction is completed (thread safety considerations). | ['Register', 'a', 'target', 'instance', 'in', 'this', 'build', 'context', '.'] | train | https://github.com/resonai/ybt/blob/5b40df0922ef3383eb85f2b04a26a2db4b81b3fd/yabt/buildcontext.py#L148-L165 |
3,878 | mozilla/django-tidings | tidings/models.py | multi_raw | def multi_raw(query, params, models, model_to_fields):
"""Scoop multiple model instances out of the DB at once, given a query that
returns all fields of each.
Return an iterable of sequences of model instances parallel to the
``models`` sequence of classes. For example::
[(<User such-and-such>, <Watch such-and-such>), ...]
"""
cursor = connections[router.db_for_read(models[0])].cursor()
cursor.execute(query, params)
rows = cursor.fetchall()
for row in rows:
row_iter = iter(row)
yield [model_class(**dict((a, next(row_iter))
for a in model_to_fields[model_class]))
for model_class in models] | python | def multi_raw(query, params, models, model_to_fields):
"""Scoop multiple model instances out of the DB at once, given a query that
returns all fields of each.
Return an iterable of sequences of model instances parallel to the
``models`` sequence of classes. For example::
[(<User such-and-such>, <Watch such-and-such>), ...]
"""
cursor = connections[router.db_for_read(models[0])].cursor()
cursor.execute(query, params)
rows = cursor.fetchall()
for row in rows:
row_iter = iter(row)
yield [model_class(**dict((a, next(row_iter))
for a in model_to_fields[model_class]))
for model_class in models] | ['def', 'multi_raw', '(', 'query', ',', 'params', ',', 'models', ',', 'model_to_fields', ')', ':', 'cursor', '=', 'connections', '[', 'router', '.', 'db_for_read', '(', 'models', '[', '0', ']', ')', ']', '.', 'cursor', '(', ')', 'cursor', '.', 'execute', '(', 'query', ',', 'params', ')', 'rows', '=', 'cursor', '.', 'fetchall', '(', ')', 'for', 'row', 'in', 'rows', ':', 'row_iter', '=', 'iter', '(', 'row', ')', 'yield', '[', 'model_class', '(', '*', '*', 'dict', '(', '(', 'a', ',', 'next', '(', 'row_iter', ')', ')', 'for', 'a', 'in', 'model_to_fields', '[', 'model_class', ']', ')', ')', 'for', 'model_class', 'in', 'models', ']'] | Scoop multiple model instances out of the DB at once, given a query that
returns all fields of each.
Return an iterable of sequences of model instances parallel to the
``models`` sequence of classes. For example::
[(<User such-and-such>, <Watch such-and-such>), ...] | ['Scoop', 'multiple', 'model', 'instances', 'out', 'of', 'the', 'DB', 'at', 'once', 'given', 'a', 'query', 'that', 'returns', 'all', 'fields', 'of', 'each', '.'] | train | https://github.com/mozilla/django-tidings/blob/b2895b3cdec6aae18315afcceb92bb16317f0f96/tidings/models.py#L16-L34 |
3,879 | swistakm/graceful | src/graceful/authentication.py | BaseAuthenticationMiddleware.process_resource | def process_resource(self, req, resp, resource, uri_kwargs=None):
"""Process resource after routing to it.
This is basic falcon middleware handler.
Args:
req (falcon.Request): request object
resp (falcon.Response): response object
resource (object): resource object matched by falcon router
uri_kwargs (dict): additional keyword argument from uri template.
For ``falcon<1.0.0`` this is always ``None``
"""
if 'user' in req.context:
return
identifier = self.identify(req, resp, resource, uri_kwargs)
user = self.try_storage(identifier, req, resp, resource, uri_kwargs)
if user is not None:
req.context['user'] = user
# if did not succeed then we need to add this to list of available
# challenges.
elif self.challenge is not None:
req.context.setdefault(
'challenges', list()
).append(self.challenge) | python | def process_resource(self, req, resp, resource, uri_kwargs=None):
"""Process resource after routing to it.
This is basic falcon middleware handler.
Args:
req (falcon.Request): request object
resp (falcon.Response): response object
resource (object): resource object matched by falcon router
uri_kwargs (dict): additional keyword argument from uri template.
For ``falcon<1.0.0`` this is always ``None``
"""
if 'user' in req.context:
return
identifier = self.identify(req, resp, resource, uri_kwargs)
user = self.try_storage(identifier, req, resp, resource, uri_kwargs)
if user is not None:
req.context['user'] = user
# if did not succeed then we need to add this to list of available
# challenges.
elif self.challenge is not None:
req.context.setdefault(
'challenges', list()
).append(self.challenge) | ['def', 'process_resource', '(', 'self', ',', 'req', ',', 'resp', ',', 'resource', ',', 'uri_kwargs', '=', 'None', ')', ':', 'if', "'user'", 'in', 'req', '.', 'context', ':', 'return', 'identifier', '=', 'self', '.', 'identify', '(', 'req', ',', 'resp', ',', 'resource', ',', 'uri_kwargs', ')', 'user', '=', 'self', '.', 'try_storage', '(', 'identifier', ',', 'req', ',', 'resp', ',', 'resource', ',', 'uri_kwargs', ')', 'if', 'user', 'is', 'not', 'None', ':', 'req', '.', 'context', '[', "'user'", ']', '=', 'user', '# if did not succeed then we need to add this to list of available', '# challenges.', 'elif', 'self', '.', 'challenge', 'is', 'not', 'None', ':', 'req', '.', 'context', '.', 'setdefault', '(', "'challenges'", ',', 'list', '(', ')', ')', '.', 'append', '(', 'self', '.', 'challenge', ')'] | Process resource after routing to it.
This is basic falcon middleware handler.
Args:
req (falcon.Request): request object
resp (falcon.Response): response object
resource (object): resource object matched by falcon router
uri_kwargs (dict): additional keyword argument from uri template.
For ``falcon<1.0.0`` this is always ``None`` | ['Process', 'resource', 'after', 'routing', 'to', 'it', '.'] | train | https://github.com/swistakm/graceful/blob/d4678cb6349a5c843a5e58002fc80140821609e4/src/graceful/authentication.py#L288-L314 |
3,880 | edx/event-tracking | eventtracking/django/__init__.py | DjangoTracker.create_backends_from_settings | def create_backends_from_settings(self):
"""
Expects the Django setting "EVENT_TRACKING_BACKENDS" to be defined and point
to a dictionary of backend engine configurations.
Example::
EVENT_TRACKING_BACKENDS = {
'default': {
'ENGINE': 'some.arbitrary.Backend',
'OPTIONS': {
'endpoint': 'http://something/event'
}
},
'another_engine': {
'ENGINE': 'some.arbitrary.OtherBackend',
'OPTIONS': {
'user': 'foo'
}
},
}
"""
config = getattr(settings, DJANGO_BACKEND_SETTING_NAME, {})
backends = self.instantiate_objects(config)
return backends | python | def create_backends_from_settings(self):
"""
Expects the Django setting "EVENT_TRACKING_BACKENDS" to be defined and point
to a dictionary of backend engine configurations.
Example::
EVENT_TRACKING_BACKENDS = {
'default': {
'ENGINE': 'some.arbitrary.Backend',
'OPTIONS': {
'endpoint': 'http://something/event'
}
},
'another_engine': {
'ENGINE': 'some.arbitrary.OtherBackend',
'OPTIONS': {
'user': 'foo'
}
},
}
"""
config = getattr(settings, DJANGO_BACKEND_SETTING_NAME, {})
backends = self.instantiate_objects(config)
return backends | ['def', 'create_backends_from_settings', '(', 'self', ')', ':', 'config', '=', 'getattr', '(', 'settings', ',', 'DJANGO_BACKEND_SETTING_NAME', ',', '{', '}', ')', 'backends', '=', 'self', '.', 'instantiate_objects', '(', 'config', ')', 'return', 'backends'] | Expects the Django setting "EVENT_TRACKING_BACKENDS" to be defined and point
to a dictionary of backend engine configurations.
Example::
EVENT_TRACKING_BACKENDS = {
'default': {
'ENGINE': 'some.arbitrary.Backend',
'OPTIONS': {
'endpoint': 'http://something/event'
}
},
'another_engine': {
'ENGINE': 'some.arbitrary.OtherBackend',
'OPTIONS': {
'user': 'foo'
}
},
} | ['Expects', 'the', 'Django', 'setting', 'EVENT_TRACKING_BACKENDS', 'to', 'be', 'defined', 'and', 'point', 'to', 'a', 'dictionary', 'of', 'backend', 'engine', 'configurations', '.'] | train | https://github.com/edx/event-tracking/blob/8f993560545061d77f11615f5e3865b3916d5ea9/eventtracking/django/__init__.py#L31-L57 |
3,881 | cmap/cmapPy | cmapPy/set_io/gmt.py | write | def write(gmt, out_path):
""" Write a GMT to a text file.
Args:
gmt (GMT object): list of dicts
out_path (string): output path
Returns:
None
"""
with open(out_path, 'w') as f:
for _, each_dict in enumerate(gmt):
f.write(each_dict[SET_IDENTIFIER_FIELD] + '\t')
f.write(each_dict[SET_DESC_FIELD] + '\t')
f.write('\t'.join([str(entry) for entry in each_dict[SET_MEMBERS_FIELD]]))
f.write('\n') | python | def write(gmt, out_path):
""" Write a GMT to a text file.
Args:
gmt (GMT object): list of dicts
out_path (string): output path
Returns:
None
"""
with open(out_path, 'w') as f:
for _, each_dict in enumerate(gmt):
f.write(each_dict[SET_IDENTIFIER_FIELD] + '\t')
f.write(each_dict[SET_DESC_FIELD] + '\t')
f.write('\t'.join([str(entry) for entry in each_dict[SET_MEMBERS_FIELD]]))
f.write('\n') | ['def', 'write', '(', 'gmt', ',', 'out_path', ')', ':', 'with', 'open', '(', 'out_path', ',', "'w'", ')', 'as', 'f', ':', 'for', '_', ',', 'each_dict', 'in', 'enumerate', '(', 'gmt', ')', ':', 'f', '.', 'write', '(', 'each_dict', '[', 'SET_IDENTIFIER_FIELD', ']', '+', "'\\t'", ')', 'f', '.', 'write', '(', 'each_dict', '[', 'SET_DESC_FIELD', ']', '+', "'\\t'", ')', 'f', '.', 'write', '(', "'\\t'", '.', 'join', '(', '[', 'str', '(', 'entry', ')', 'for', 'entry', 'in', 'each_dict', '[', 'SET_MEMBERS_FIELD', ']', ']', ')', ')', 'f', '.', 'write', '(', "'\\n'", ')'] | Write a GMT to a text file.
Args:
gmt (GMT object): list of dicts
out_path (string): output path
Returns:
None | ['Write', 'a', 'GMT', 'to', 'a', 'text', 'file', '.'] | train | https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/set_io/gmt.py#L93-L109 |
3,882 | gagneurlab/concise | concise/eval_metrics.py | tnr | def tnr(y_true, y_pred, round=True):
"""True negative rate `tn / (tn + fp)`
"""
y_true, y_pred = _mask_value_nan(y_true, y_pred)
if round:
y_true = np.round(y_true)
y_pred = np.round(y_pred)
c = skm.confusion_matrix(y_true, y_pred)
return c[0, 0] / c[0].sum() | python | def tnr(y_true, y_pred, round=True):
"""True negative rate `tn / (tn + fp)`
"""
y_true, y_pred = _mask_value_nan(y_true, y_pred)
if round:
y_true = np.round(y_true)
y_pred = np.round(y_pred)
c = skm.confusion_matrix(y_true, y_pred)
return c[0, 0] / c[0].sum() | ['def', 'tnr', '(', 'y_true', ',', 'y_pred', ',', 'round', '=', 'True', ')', ':', 'y_true', ',', 'y_pred', '=', '_mask_value_nan', '(', 'y_true', ',', 'y_pred', ')', 'if', 'round', ':', 'y_true', '=', 'np', '.', 'round', '(', 'y_true', ')', 'y_pred', '=', 'np', '.', 'round', '(', 'y_pred', ')', 'c', '=', 'skm', '.', 'confusion_matrix', '(', 'y_true', ',', 'y_pred', ')', 'return', 'c', '[', '0', ',', '0', ']', '/', 'c', '[', '0', ']', '.', 'sum', '(', ')'] | True negative rate `tn / (tn + fp)` | ['True', 'negative', 'rate', 'tn', '/', '(', 'tn', '+', 'fp', ')'] | train | https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/eval_metrics.py#L90-L98 |
3,883 | T-002/pycast | pycast/methods/exponentialsmoothing.py | HoltWintersMethod.initialTrendSmoothingFactors | def initialTrendSmoothingFactors(self, timeSeries):
""" Calculate the initial Trend smoothing Factor b0.
Explanation:
http://en.wikipedia.org/wiki/Exponential_smoothing#Triple_exponential_smoothing
:return: Returns the initial trend smoothing factor b0
"""
result = 0.0
seasonLength = self.get_parameter("seasonLength")
k = min(len(timeSeries) - seasonLength, seasonLength) #In case of only one full season, use average trend of the months that we have twice
for i in xrange(0, k):
result += (timeSeries[seasonLength + i][1] - timeSeries[i][1]) / seasonLength
return result / k | python | def initialTrendSmoothingFactors(self, timeSeries):
""" Calculate the initial Trend smoothing Factor b0.
Explanation:
http://en.wikipedia.org/wiki/Exponential_smoothing#Triple_exponential_smoothing
:return: Returns the initial trend smoothing factor b0
"""
result = 0.0
seasonLength = self.get_parameter("seasonLength")
k = min(len(timeSeries) - seasonLength, seasonLength) #In case of only one full season, use average trend of the months that we have twice
for i in xrange(0, k):
result += (timeSeries[seasonLength + i][1] - timeSeries[i][1]) / seasonLength
return result / k | ['def', 'initialTrendSmoothingFactors', '(', 'self', ',', 'timeSeries', ')', ':', 'result', '=', '0.0', 'seasonLength', '=', 'self', '.', 'get_parameter', '(', '"seasonLength"', ')', 'k', '=', 'min', '(', 'len', '(', 'timeSeries', ')', '-', 'seasonLength', ',', 'seasonLength', ')', '#In case of only one full season, use average trend of the months that we have twice', 'for', 'i', 'in', 'xrange', '(', '0', ',', 'k', ')', ':', 'result', '+=', '(', 'timeSeries', '[', 'seasonLength', '+', 'i', ']', '[', '1', ']', '-', 'timeSeries', '[', 'i', ']', '[', '1', ']', ')', '/', 'seasonLength', 'return', 'result', '/', 'k'] | Calculate the initial Trend smoothing Factor b0.
Explanation:
http://en.wikipedia.org/wiki/Exponential_smoothing#Triple_exponential_smoothing
:return: Returns the initial trend smoothing factor b0 | ['Calculate', 'the', 'initial', 'Trend', 'smoothing', 'Factor', 'b0', '.'] | train | https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/methods/exponentialsmoothing.py#L453-L467 |
3,884 | radjkarl/fancyWidgets | fancywidgets/pyQtBased/Dialogs.py | Dialogs.getSaveFileName | def getSaveFileName(self, *args, **kwargs):
"""
analogue to QtWidgets.QFileDialog.getSaveFileNameAndFilter
but returns the filename + chosen file ending even if not typed in gui
"""
if 'directory' not in kwargs:
if self.opts['save']:
if self.opts['save']:
kwargs['directory'] = self.opts['save']
fname = QtWidgets.QFileDialog.getSaveFileName(**kwargs)
if fname:
if type(fname) == tuple:
#only happened since qt5
#getSaveFileName returns (path, ftype)
if not fname[0]:
return
p = PathStr(fname[0])
if not p.filetype():
ftyp = self._extractFtype(fname[1])
p = p.setFiletype(ftyp)
else:
p = PathStr(fname)
self.opts['save'] = p.dirname()
if self.opts['open'] is None:
self.opts['open'] = self.opts['save']
return p | python | def getSaveFileName(self, *args, **kwargs):
"""
analogue to QtWidgets.QFileDialog.getSaveFileNameAndFilter
but returns the filename + chosen file ending even if not typed in gui
"""
if 'directory' not in kwargs:
if self.opts['save']:
if self.opts['save']:
kwargs['directory'] = self.opts['save']
fname = QtWidgets.QFileDialog.getSaveFileName(**kwargs)
if fname:
if type(fname) == tuple:
#only happened since qt5
#getSaveFileName returns (path, ftype)
if not fname[0]:
return
p = PathStr(fname[0])
if not p.filetype():
ftyp = self._extractFtype(fname[1])
p = p.setFiletype(ftyp)
else:
p = PathStr(fname)
self.opts['save'] = p.dirname()
if self.opts['open'] is None:
self.opts['open'] = self.opts['save']
return p | ['def', 'getSaveFileName', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'if', "'directory'", 'not', 'in', 'kwargs', ':', 'if', 'self', '.', 'opts', '[', "'save'", ']', ':', 'if', 'self', '.', 'opts', '[', "'save'", ']', ':', 'kwargs', '[', "'directory'", ']', '=', 'self', '.', 'opts', '[', "'save'", ']', 'fname', '=', 'QtWidgets', '.', 'QFileDialog', '.', 'getSaveFileName', '(', '*', '*', 'kwargs', ')', 'if', 'fname', ':', 'if', 'type', '(', 'fname', ')', '==', 'tuple', ':', '#only happened since qt5', '#getSaveFileName returns (path, ftype)', 'if', 'not', 'fname', '[', '0', ']', ':', 'return', 'p', '=', 'PathStr', '(', 'fname', '[', '0', ']', ')', 'if', 'not', 'p', '.', 'filetype', '(', ')', ':', 'ftyp', '=', 'self', '.', '_extractFtype', '(', 'fname', '[', '1', ']', ')', 'p', '=', 'p', '.', 'setFiletype', '(', 'ftyp', ')', 'else', ':', 'p', '=', 'PathStr', '(', 'fname', ')', 'self', '.', 'opts', '[', "'save'", ']', '=', 'p', '.', 'dirname', '(', ')', 'if', 'self', '.', 'opts', '[', "'open'", ']', 'is', 'None', ':', 'self', '.', 'opts', '[', "'open'", ']', '=', 'self', '.', 'opts', '[', "'save'", ']', 'return', 'p'] | analogue to QtWidgets.QFileDialog.getSaveFileNameAndFilter
but returns the filename + chosen file ending even if not typed in gui | ['analogue', 'to', 'QtWidgets', '.', 'QFileDialog', '.', 'getSaveFileNameAndFilter', 'but', 'returns', 'the', 'filename', '+', 'chosen', 'file', 'ending', 'even', 'if', 'not', 'typed', 'in', 'gui'] | train | https://github.com/radjkarl/fancyWidgets/blob/ffe0d5747c5296c78575f0e0909af915a4a5698f/fancywidgets/pyQtBased/Dialogs.py#L34-L59 |
3,885 | AtteqCom/zsl | src/zsl/task/task_decorator.py | jsonp_wrap | def jsonp_wrap(callback_key='callback'):
"""
Format response to jsonp and add a callback to JSON data - a jsonp request
"""
def decorator_fn(f):
@wraps(f)
def jsonp_output_decorator(*args, **kwargs):
task_data = _get_data_from_args(args)
data = task_data.get_data()
if callback_key not in data:
raise KeyError(
'Missing required parameter "{0}" for task.'.format(
callback_key))
callback = data[callback_key]
jsonp = f(*args, **kwargs)
if isinstance(JobContext.get_current_context(), WebJobContext):
JobContext.get_current_context().add_responder(
MimeSetterWebTaskResponder('application/javascript'))
jsonp = "{callback}({data})".format(callback=callback, data=jsonp)
return jsonp
return jsonp_output_decorator
return decorator_fn | python | def jsonp_wrap(callback_key='callback'):
"""
Format response to jsonp and add a callback to JSON data - a jsonp request
"""
def decorator_fn(f):
@wraps(f)
def jsonp_output_decorator(*args, **kwargs):
task_data = _get_data_from_args(args)
data = task_data.get_data()
if callback_key not in data:
raise KeyError(
'Missing required parameter "{0}" for task.'.format(
callback_key))
callback = data[callback_key]
jsonp = f(*args, **kwargs)
if isinstance(JobContext.get_current_context(), WebJobContext):
JobContext.get_current_context().add_responder(
MimeSetterWebTaskResponder('application/javascript'))
jsonp = "{callback}({data})".format(callback=callback, data=jsonp)
return jsonp
return jsonp_output_decorator
return decorator_fn | ['def', 'jsonp_wrap', '(', 'callback_key', '=', "'callback'", ')', ':', 'def', 'decorator_fn', '(', 'f', ')', ':', '@', 'wraps', '(', 'f', ')', 'def', 'jsonp_output_decorator', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'task_data', '=', '_get_data_from_args', '(', 'args', ')', 'data', '=', 'task_data', '.', 'get_data', '(', ')', 'if', 'callback_key', 'not', 'in', 'data', ':', 'raise', 'KeyError', '(', '\'Missing required parameter "{0}" for task.\'', '.', 'format', '(', 'callback_key', ')', ')', 'callback', '=', 'data', '[', 'callback_key', ']', 'jsonp', '=', 'f', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'if', 'isinstance', '(', 'JobContext', '.', 'get_current_context', '(', ')', ',', 'WebJobContext', ')', ':', 'JobContext', '.', 'get_current_context', '(', ')', '.', 'add_responder', '(', 'MimeSetterWebTaskResponder', '(', "'application/javascript'", ')', ')', 'jsonp', '=', '"{callback}({data})"', '.', 'format', '(', 'callback', '=', 'callback', ',', 'data', '=', 'jsonp', ')', 'return', 'jsonp', 'return', 'jsonp_output_decorator', 'return', 'decorator_fn'] | Format response to jsonp and add a callback to JSON data - a jsonp request | ['Format', 'response', 'to', 'jsonp', 'and', 'add', 'a', 'callback', 'to', 'JSON', 'data', '-', 'a', 'jsonp', 'request'] | train | https://github.com/AtteqCom/zsl/blob/ab51a96da1780ff642912396d4b85bdcb72560c1/src/zsl/task/task_decorator.py#L134-L162 |
3,886 | Spinmob/spinmob | egg/_gui.py | DataboxPlot.plot | def plot(self):
"""
Sets the internal databox to the supplied value and plots it.
If databox=None, this will plot the internal databox.
"""
# if we're disabled or have no data columns, clear everything!
if not self.button_enabled.is_checked() or len(self) == 0:
self._set_number_of_plots(0)
return self
# if there is no script, create a default
if not self.combo_autoscript.get_index()==0:
self.script.set_text(self._autoscript())
##### Try the script and make the curves / plots match
try:
# get globals for sin, cos etc
g = _n.__dict__
g.update(dict(d=self))
g.update(dict(xlabels='x', ylabels='y'))
# run the script.
exec(self.script.get_text(), g)
# x & y should now be data arrays, lists of data arrays or Nones
x = g['x']
y = g['y']
# make it the right shape
if x == None: x = [None]
if y == None: y = [None]
if not _spinmob.fun.is_iterable(x[0]) and not x[0] == None: x = [x]
if not _spinmob.fun.is_iterable(y[0]) and not y[0] == None: y = [y]
if len(x) == 1 and not len(y) == 1: x = x*len(y)
if len(y) == 1 and not len(x) == 1: y = y*len(x)
# xlabels and ylabels should be strings or lists of strings
xlabels = g['xlabels']
ylabels = g['ylabels']
# make sure we have exactly the right number of plots
self._set_number_of_plots(len(x))
self._update_linked_axes()
# return if there is nothing.
if len(x) == 0: return
# now plot everything
for n in range(max(len(x),len(y))-1,-1,-1):
# Create data for "None" cases.
if x[n] is None: x[n] = list(range(len(y[n])))
if y[n] is None: y[n] = list(range(len(x[n])))
self._curves[n].setData(x[n],y[n])
# get the labels for the curves
# if it's a string, use the same label for all axes
if type(xlabels) in [str,type(None)]: xlabel = xlabels
elif n < len(xlabels): xlabel = xlabels[n]
else: xlabel = ''
if type(ylabels) in [str,type(None)]: ylabel = ylabels
elif n < len(ylabels): ylabel = ylabels[n]
else: ylabel = ''
# set the labels
i = min(n, len(self.plot_widgets)-1)
self.plot_widgets[i].setLabel('left', ylabel)
self.plot_widgets[i].setLabel('bottom', xlabel)
# special case: hide if None
if xlabel == None: self.plot_widgets[i].getAxis('bottom').showLabel(False)
if ylabel == None: self.plot_widgets[i].getAxis('left') .showLabel(False)
# unpink the script, since it seems to have worked
self.script.set_colors('black','white')
# otherwise, look angry and don't autosave
except: self.script.set_colors('black','pink')
return self | python | def plot(self):
"""
Sets the internal databox to the supplied value and plots it.
If databox=None, this will plot the internal databox.
"""
# if we're disabled or have no data columns, clear everything!
if not self.button_enabled.is_checked() or len(self) == 0:
self._set_number_of_plots(0)
return self
# if there is no script, create a default
if not self.combo_autoscript.get_index()==0:
self.script.set_text(self._autoscript())
##### Try the script and make the curves / plots match
try:
# get globals for sin, cos etc
g = _n.__dict__
g.update(dict(d=self))
g.update(dict(xlabels='x', ylabels='y'))
# run the script.
exec(self.script.get_text(), g)
# x & y should now be data arrays, lists of data arrays or Nones
x = g['x']
y = g['y']
# make it the right shape
if x == None: x = [None]
if y == None: y = [None]
if not _spinmob.fun.is_iterable(x[0]) and not x[0] == None: x = [x]
if not _spinmob.fun.is_iterable(y[0]) and not y[0] == None: y = [y]
if len(x) == 1 and not len(y) == 1: x = x*len(y)
if len(y) == 1 and not len(x) == 1: y = y*len(x)
# xlabels and ylabels should be strings or lists of strings
xlabels = g['xlabels']
ylabels = g['ylabels']
# make sure we have exactly the right number of plots
self._set_number_of_plots(len(x))
self._update_linked_axes()
# return if there is nothing.
if len(x) == 0: return
# now plot everything
for n in range(max(len(x),len(y))-1,-1,-1):
# Create data for "None" cases.
if x[n] is None: x[n] = list(range(len(y[n])))
if y[n] is None: y[n] = list(range(len(x[n])))
self._curves[n].setData(x[n],y[n])
# get the labels for the curves
# if it's a string, use the same label for all axes
if type(xlabels) in [str,type(None)]: xlabel = xlabels
elif n < len(xlabels): xlabel = xlabels[n]
else: xlabel = ''
if type(ylabels) in [str,type(None)]: ylabel = ylabels
elif n < len(ylabels): ylabel = ylabels[n]
else: ylabel = ''
# set the labels
i = min(n, len(self.plot_widgets)-1)
self.plot_widgets[i].setLabel('left', ylabel)
self.plot_widgets[i].setLabel('bottom', xlabel)
# special case: hide if None
if xlabel == None: self.plot_widgets[i].getAxis('bottom').showLabel(False)
if ylabel == None: self.plot_widgets[i].getAxis('left') .showLabel(False)
# unpink the script, since it seems to have worked
self.script.set_colors('black','white')
# otherwise, look angry and don't autosave
except: self.script.set_colors('black','pink')
return self | ['def', 'plot', '(', 'self', ')', ':', "# if we're disabled or have no data columns, clear everything!", 'if', 'not', 'self', '.', 'button_enabled', '.', 'is_checked', '(', ')', 'or', 'len', '(', 'self', ')', '==', '0', ':', 'self', '.', '_set_number_of_plots', '(', '0', ')', 'return', 'self', '# if there is no script, create a default', 'if', 'not', 'self', '.', 'combo_autoscript', '.', 'get_index', '(', ')', '==', '0', ':', 'self', '.', 'script', '.', 'set_text', '(', 'self', '.', '_autoscript', '(', ')', ')', '##### Try the script and make the curves / plots match', 'try', ':', '# get globals for sin, cos etc', 'g', '=', '_n', '.', '__dict__', 'g', '.', 'update', '(', 'dict', '(', 'd', '=', 'self', ')', ')', 'g', '.', 'update', '(', 'dict', '(', 'xlabels', '=', "'x'", ',', 'ylabels', '=', "'y'", ')', ')', '# run the script.', 'exec', '(', 'self', '.', 'script', '.', 'get_text', '(', ')', ',', 'g', ')', '# x & y should now be data arrays, lists of data arrays or Nones', 'x', '=', 'g', '[', "'x'", ']', 'y', '=', 'g', '[', "'y'", ']', '# make it the right shape', 'if', 'x', '==', 'None', ':', 'x', '=', '[', 'None', ']', 'if', 'y', '==', 'None', ':', 'y', '=', '[', 'None', ']', 'if', 'not', '_spinmob', '.', 'fun', '.', 'is_iterable', '(', 'x', '[', '0', ']', ')', 'and', 'not', 'x', '[', '0', ']', '==', 'None', ':', 'x', '=', '[', 'x', ']', 'if', 'not', '_spinmob', '.', 'fun', '.', 'is_iterable', '(', 'y', '[', '0', ']', ')', 'and', 'not', 'y', '[', '0', ']', '==', 'None', ':', 'y', '=', '[', 'y', ']', 'if', 'len', '(', 'x', ')', '==', '1', 'and', 'not', 'len', '(', 'y', ')', '==', '1', ':', 'x', '=', 'x', '*', 'len', '(', 'y', ')', 'if', 'len', '(', 'y', ')', '==', '1', 'and', 'not', 'len', '(', 'x', ')', '==', '1', ':', 'y', '=', 'y', '*', 'len', '(', 'x', ')', '# xlabels and ylabels should be strings or lists of strings', 'xlabels', '=', 'g', '[', "'xlabels'", ']', 'ylabels', '=', 'g', '[', "'ylabels'", ']', '# make sure we have exactly the right number of plots', 'self', '.', '_set_number_of_plots', '(', 'len', '(', 'x', ')', ')', 'self', '.', '_update_linked_axes', '(', ')', '# return if there is nothing.', 'if', 'len', '(', 'x', ')', '==', '0', ':', 'return', '# now plot everything', 'for', 'n', 'in', 'range', '(', 'max', '(', 'len', '(', 'x', ')', ',', 'len', '(', 'y', ')', ')', '-', '1', ',', '-', '1', ',', '-', '1', ')', ':', '# Create data for "None" cases.', 'if', 'x', '[', 'n', ']', 'is', 'None', ':', 'x', '[', 'n', ']', '=', 'list', '(', 'range', '(', 'len', '(', 'y', '[', 'n', ']', ')', ')', ')', 'if', 'y', '[', 'n', ']', 'is', 'None', ':', 'y', '[', 'n', ']', '=', 'list', '(', 'range', '(', 'len', '(', 'x', '[', 'n', ']', ')', ')', ')', 'self', '.', '_curves', '[', 'n', ']', '.', 'setData', '(', 'x', '[', 'n', ']', ',', 'y', '[', 'n', ']', ')', '# get the labels for the curves', "# if it's a string, use the same label for all axes", 'if', 'type', '(', 'xlabels', ')', 'in', '[', 'str', ',', 'type', '(', 'None', ')', ']', ':', 'xlabel', '=', 'xlabels', 'elif', 'n', '<', 'len', '(', 'xlabels', ')', ':', 'xlabel', '=', 'xlabels', '[', 'n', ']', 'else', ':', 'xlabel', '=', "''", 'if', 'type', '(', 'ylabels', ')', 'in', '[', 'str', ',', 'type', '(', 'None', ')', ']', ':', 'ylabel', '=', 'ylabels', 'elif', 'n', '<', 'len', '(', 'ylabels', ')', ':', 'ylabel', '=', 'ylabels', '[', 'n', ']', 'else', ':', 'ylabel', '=', "''", '# set the labels', 'i', '=', 'min', '(', 'n', ',', 'len', '(', 'self', '.', 'plot_widgets', ')', '-', '1', ')', 'self', '.', 'plot_widgets', '[', 'i', ']', '.', 'setLabel', '(', "'left'", ',', 'ylabel', ')', 'self', '.', 'plot_widgets', '[', 'i', ']', '.', 'setLabel', '(', "'bottom'", ',', 'xlabel', ')', '# special case: hide if None', 'if', 'xlabel', '==', 'None', ':', 'self', '.', 'plot_widgets', '[', 'i', ']', '.', 'getAxis', '(', "'bottom'", ')', '.', 'showLabel', '(', 'False', ')', 'if', 'ylabel', '==', 'None', ':', 'self', '.', 'plot_widgets', '[', 'i', ']', '.', 'getAxis', '(', "'left'", ')', '.', 'showLabel', '(', 'False', ')', '# unpink the script, since it seems to have worked', 'self', '.', 'script', '.', 'set_colors', '(', "'black'", ',', "'white'", ')', "# otherwise, look angry and don't autosave", 'except', ':', 'self', '.', 'script', '.', 'set_colors', '(', "'black'", ',', "'pink'", ')', 'return', 'self'] | Sets the internal databox to the supplied value and plots it.
If databox=None, this will plot the internal databox. | ['Sets', 'the', 'internal', 'databox', 'to', 'the', 'supplied', 'value', 'and', 'plots', 'it', '.', 'If', 'databox', '=', 'None', 'this', 'will', 'plot', 'the', 'internal', 'databox', '.'] | train | https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/egg/_gui.py#L2756-L2840 |
3,887 | tuxu/python-samplerate | samplerate/converters.py | resample | def resample(input_data, ratio, converter_type='sinc_best', verbose=False):
"""Resample the signal in `input_data` at once.
Parameters
----------
input_data : ndarray
Input data. A single channel is provided as a 1D array of `num_frames` length.
Input data with several channels is represented as a 2D array of shape
(`num_frames`, `num_channels`). For use with `libsamplerate`, `input_data`
is converted to 32-bit float and C (row-major) memory order.
ratio : float
Conversion ratio = output sample rate / input sample rate.
converter_type : ConverterType, str, or int
Sample rate converter.
verbose : bool
If `True`, print additional information about the conversion.
Returns
-------
output_data : ndarray
Resampled input data.
Note
----
If samples are to be processed in chunks, `Resampler` and
`CallbackResampler` will provide better results and allow for variable
conversion ratios.
"""
from samplerate.lowlevel import src_simple
from samplerate.exceptions import ResamplingError
input_data = np.require(input_data, requirements='C', dtype=np.float32)
if input_data.ndim == 2:
num_frames, channels = input_data.shape
output_shape = (int(num_frames * ratio), channels)
elif input_data.ndim == 1:
num_frames, channels = input_data.size, 1
output_shape = (int(num_frames * ratio), )
else:
raise ValueError('rank > 2 not supported')
output_data = np.empty(output_shape, dtype=np.float32)
converter_type = _get_converter_type(converter_type)
(error, input_frames_used, output_frames_gen) \
= src_simple(input_data, output_data, ratio,
converter_type.value, channels)
if error != 0:
raise ResamplingError(error)
if verbose:
info = ('samplerate info:\n'
'{} input frames used\n'
'{} output frames generated\n'
.format(input_frames_used, output_frames_gen))
print(info)
return (output_data[:output_frames_gen, :]
if channels > 1 else output_data[:output_frames_gen]) | python | def resample(input_data, ratio, converter_type='sinc_best', verbose=False):
"""Resample the signal in `input_data` at once.
Parameters
----------
input_data : ndarray
Input data. A single channel is provided as a 1D array of `num_frames` length.
Input data with several channels is represented as a 2D array of shape
(`num_frames`, `num_channels`). For use with `libsamplerate`, `input_data`
is converted to 32-bit float and C (row-major) memory order.
ratio : float
Conversion ratio = output sample rate / input sample rate.
converter_type : ConverterType, str, or int
Sample rate converter.
verbose : bool
If `True`, print additional information about the conversion.
Returns
-------
output_data : ndarray
Resampled input data.
Note
----
If samples are to be processed in chunks, `Resampler` and
`CallbackResampler` will provide better results and allow for variable
conversion ratios.
"""
from samplerate.lowlevel import src_simple
from samplerate.exceptions import ResamplingError
input_data = np.require(input_data, requirements='C', dtype=np.float32)
if input_data.ndim == 2:
num_frames, channels = input_data.shape
output_shape = (int(num_frames * ratio), channels)
elif input_data.ndim == 1:
num_frames, channels = input_data.size, 1
output_shape = (int(num_frames * ratio), )
else:
raise ValueError('rank > 2 not supported')
output_data = np.empty(output_shape, dtype=np.float32)
converter_type = _get_converter_type(converter_type)
(error, input_frames_used, output_frames_gen) \
= src_simple(input_data, output_data, ratio,
converter_type.value, channels)
if error != 0:
raise ResamplingError(error)
if verbose:
info = ('samplerate info:\n'
'{} input frames used\n'
'{} output frames generated\n'
.format(input_frames_used, output_frames_gen))
print(info)
return (output_data[:output_frames_gen, :]
if channels > 1 else output_data[:output_frames_gen]) | ['def', 'resample', '(', 'input_data', ',', 'ratio', ',', 'converter_type', '=', "'sinc_best'", ',', 'verbose', '=', 'False', ')', ':', 'from', 'samplerate', '.', 'lowlevel', 'import', 'src_simple', 'from', 'samplerate', '.', 'exceptions', 'import', 'ResamplingError', 'input_data', '=', 'np', '.', 'require', '(', 'input_data', ',', 'requirements', '=', "'C'", ',', 'dtype', '=', 'np', '.', 'float32', ')', 'if', 'input_data', '.', 'ndim', '==', '2', ':', 'num_frames', ',', 'channels', '=', 'input_data', '.', 'shape', 'output_shape', '=', '(', 'int', '(', 'num_frames', '*', 'ratio', ')', ',', 'channels', ')', 'elif', 'input_data', '.', 'ndim', '==', '1', ':', 'num_frames', ',', 'channels', '=', 'input_data', '.', 'size', ',', '1', 'output_shape', '=', '(', 'int', '(', 'num_frames', '*', 'ratio', ')', ',', ')', 'else', ':', 'raise', 'ValueError', '(', "'rank > 2 not supported'", ')', 'output_data', '=', 'np', '.', 'empty', '(', 'output_shape', ',', 'dtype', '=', 'np', '.', 'float32', ')', 'converter_type', '=', '_get_converter_type', '(', 'converter_type', ')', '(', 'error', ',', 'input_frames_used', ',', 'output_frames_gen', ')', '=', 'src_simple', '(', 'input_data', ',', 'output_data', ',', 'ratio', ',', 'converter_type', '.', 'value', ',', 'channels', ')', 'if', 'error', '!=', '0', ':', 'raise', 'ResamplingError', '(', 'error', ')', 'if', 'verbose', ':', 'info', '=', '(', "'samplerate info:\\n'", "'{} input frames used\\n'", "'{} output frames generated\\n'", '.', 'format', '(', 'input_frames_used', ',', 'output_frames_gen', ')', ')', 'print', '(', 'info', ')', 'return', '(', 'output_data', '[', ':', 'output_frames_gen', ',', ':', ']', 'if', 'channels', '>', '1', 'else', 'output_data', '[', ':', 'output_frames_gen', ']', ')'] | Resample the signal in `input_data` at once.
Parameters
----------
input_data : ndarray
Input data. A single channel is provided as a 1D array of `num_frames` length.
Input data with several channels is represented as a 2D array of shape
(`num_frames`, `num_channels`). For use with `libsamplerate`, `input_data`
is converted to 32-bit float and C (row-major) memory order.
ratio : float
Conversion ratio = output sample rate / input sample rate.
converter_type : ConverterType, str, or int
Sample rate converter.
verbose : bool
If `True`, print additional information about the conversion.
Returns
-------
output_data : ndarray
Resampled input data.
Note
----
If samples are to be processed in chunks, `Resampler` and
`CallbackResampler` will provide better results and allow for variable
conversion ratios. | ['Resample', 'the', 'signal', 'in', 'input_data', 'at', 'once', '.'] | train | https://github.com/tuxu/python-samplerate/blob/ed73d7a39e61bfb34b03dade14ffab59aa27922a/samplerate/converters.py#L31-L90 |
3,888 | googlefonts/ufo2ft | Lib/ufo2ft/outlineCompiler.py | BaseOutlineCompiler._setupTable_hhea_or_vhea | def _setupTable_hhea_or_vhea(self, tag):
"""
Make the hhea table or the vhea table. This assume the hmtx or
the vmtx were respectively made first.
"""
if tag not in self.tables:
return
if tag == "hhea":
isHhea = True
else:
isHhea = False
self.otf[tag] = table = newTable(tag)
mtxTable = self.otf.get(tag[0] + "mtx")
font = self.ufo
if isHhea:
table.tableVersion = 0x00010000
else:
table.tableVersion = 0x00011000
# Vertical metrics in hhea, horizontal metrics in vhea
# and caret info.
# The hhea metrics names are formed as:
# "openType" + tag.title() + "Ascender", etc.
# While vhea metrics names are formed as:
# "openType" + tag.title() + "VertTypo" + "Ascender", etc.
# Caret info names only differ by tag.title().
commonPrefix = "openType%s" % tag.title()
if isHhea:
metricsPrefix = commonPrefix
else:
metricsPrefix = "openType%sVertTypo" % tag.title()
metricsDict = {
"ascent": "%sAscender" % metricsPrefix,
"descent": "%sDescender" % metricsPrefix,
"lineGap": "%sLineGap" % metricsPrefix,
"caretSlopeRise": "%sCaretSlopeRise" % commonPrefix,
"caretSlopeRun": "%sCaretSlopeRun" % commonPrefix,
"caretOffset": "%sCaretOffset" % commonPrefix,
}
for otfName, ufoName in metricsDict.items():
setattr(table, otfName,
otRound(getAttrWithFallback(font.info, ufoName)))
# Horizontal metrics in hhea, vertical metrics in vhea
advances = [] # width in hhea, height in vhea
firstSideBearings = [] # left in hhea, top in vhea
secondSideBearings = [] # right in hhea, bottom in vhea
extents = []
if mtxTable is not None:
for glyphName in self.allGlyphs:
advance, firstSideBearing = mtxTable[glyphName]
advances.append(advance)
bounds = self.glyphBoundingBoxes[glyphName]
if bounds is None:
continue
if isHhea:
boundsAdvance = (bounds.xMax - bounds.xMin)
# equation from the hhea spec for calculating xMaxExtent:
# Max(lsb + (xMax - xMin))
extent = firstSideBearing + boundsAdvance
else:
boundsAdvance = (bounds.yMax - bounds.yMin)
# equation from the vhea spec for calculating yMaxExtent:
# Max(tsb + (yMax - yMin)).
extent = firstSideBearing + boundsAdvance
secondSideBearing = advance - firstSideBearing - boundsAdvance
firstSideBearings.append(firstSideBearing)
secondSideBearings.append(secondSideBearing)
extents.append(extent)
setattr(table,
"advance%sMax" % ("Width" if isHhea else "Height"),
max(advances) if advances else 0)
setattr(table,
"min%sSideBearing" % ("Left" if isHhea else "Top"),
min(firstSideBearings) if firstSideBearings else 0)
setattr(table,
"min%sSideBearing" % ("Right" if isHhea else "Bottom"),
min(secondSideBearings) if secondSideBearings else 0)
setattr(table,
"%sMaxExtent" % ("x" if isHhea else "y"),
max(extents) if extents else 0)
if isHhea:
reserved = range(4)
else:
# vhea.reserved0 is caretOffset for legacy reasons
reserved = range(1, 5)
for i in reserved:
setattr(table, "reserved%i" % i, 0)
table.metricDataFormat = 0
# glyph count
setattr(table,
"numberOf%sMetrics" % ("H" if isHhea else "V"),
len(self.allGlyphs)) | python | def _setupTable_hhea_or_vhea(self, tag):
"""
Make the hhea table or the vhea table. This assume the hmtx or
the vmtx were respectively made first.
"""
if tag not in self.tables:
return
if tag == "hhea":
isHhea = True
else:
isHhea = False
self.otf[tag] = table = newTable(tag)
mtxTable = self.otf.get(tag[0] + "mtx")
font = self.ufo
if isHhea:
table.tableVersion = 0x00010000
else:
table.tableVersion = 0x00011000
# Vertical metrics in hhea, horizontal metrics in vhea
# and caret info.
# The hhea metrics names are formed as:
# "openType" + tag.title() + "Ascender", etc.
# While vhea metrics names are formed as:
# "openType" + tag.title() + "VertTypo" + "Ascender", etc.
# Caret info names only differ by tag.title().
commonPrefix = "openType%s" % tag.title()
if isHhea:
metricsPrefix = commonPrefix
else:
metricsPrefix = "openType%sVertTypo" % tag.title()
metricsDict = {
"ascent": "%sAscender" % metricsPrefix,
"descent": "%sDescender" % metricsPrefix,
"lineGap": "%sLineGap" % metricsPrefix,
"caretSlopeRise": "%sCaretSlopeRise" % commonPrefix,
"caretSlopeRun": "%sCaretSlopeRun" % commonPrefix,
"caretOffset": "%sCaretOffset" % commonPrefix,
}
for otfName, ufoName in metricsDict.items():
setattr(table, otfName,
otRound(getAttrWithFallback(font.info, ufoName)))
# Horizontal metrics in hhea, vertical metrics in vhea
advances = [] # width in hhea, height in vhea
firstSideBearings = [] # left in hhea, top in vhea
secondSideBearings = [] # right in hhea, bottom in vhea
extents = []
if mtxTable is not None:
for glyphName in self.allGlyphs:
advance, firstSideBearing = mtxTable[glyphName]
advances.append(advance)
bounds = self.glyphBoundingBoxes[glyphName]
if bounds is None:
continue
if isHhea:
boundsAdvance = (bounds.xMax - bounds.xMin)
# equation from the hhea spec for calculating xMaxExtent:
# Max(lsb + (xMax - xMin))
extent = firstSideBearing + boundsAdvance
else:
boundsAdvance = (bounds.yMax - bounds.yMin)
# equation from the vhea spec for calculating yMaxExtent:
# Max(tsb + (yMax - yMin)).
extent = firstSideBearing + boundsAdvance
secondSideBearing = advance - firstSideBearing - boundsAdvance
firstSideBearings.append(firstSideBearing)
secondSideBearings.append(secondSideBearing)
extents.append(extent)
setattr(table,
"advance%sMax" % ("Width" if isHhea else "Height"),
max(advances) if advances else 0)
setattr(table,
"min%sSideBearing" % ("Left" if isHhea else "Top"),
min(firstSideBearings) if firstSideBearings else 0)
setattr(table,
"min%sSideBearing" % ("Right" if isHhea else "Bottom"),
min(secondSideBearings) if secondSideBearings else 0)
setattr(table,
"%sMaxExtent" % ("x" if isHhea else "y"),
max(extents) if extents else 0)
if isHhea:
reserved = range(4)
else:
# vhea.reserved0 is caretOffset for legacy reasons
reserved = range(1, 5)
for i in reserved:
setattr(table, "reserved%i" % i, 0)
table.metricDataFormat = 0
# glyph count
setattr(table,
"numberOf%sMetrics" % ("H" if isHhea else "V"),
len(self.allGlyphs)) | ['def', '_setupTable_hhea_or_vhea', '(', 'self', ',', 'tag', ')', ':', 'if', 'tag', 'not', 'in', 'self', '.', 'tables', ':', 'return', 'if', 'tag', '==', '"hhea"', ':', 'isHhea', '=', 'True', 'else', ':', 'isHhea', '=', 'False', 'self', '.', 'otf', '[', 'tag', ']', '=', 'table', '=', 'newTable', '(', 'tag', ')', 'mtxTable', '=', 'self', '.', 'otf', '.', 'get', '(', 'tag', '[', '0', ']', '+', '"mtx"', ')', 'font', '=', 'self', '.', 'ufo', 'if', 'isHhea', ':', 'table', '.', 'tableVersion', '=', '0x00010000', 'else', ':', 'table', '.', 'tableVersion', '=', '0x00011000', '# Vertical metrics in hhea, horizontal metrics in vhea', '# and caret info.', '# The hhea metrics names are formed as:', '# "openType" + tag.title() + "Ascender", etc.', '# While vhea metrics names are formed as:', '# "openType" + tag.title() + "VertTypo" + "Ascender", etc.', '# Caret info names only differ by tag.title().', 'commonPrefix', '=', '"openType%s"', '%', 'tag', '.', 'title', '(', ')', 'if', 'isHhea', ':', 'metricsPrefix', '=', 'commonPrefix', 'else', ':', 'metricsPrefix', '=', '"openType%sVertTypo"', '%', 'tag', '.', 'title', '(', ')', 'metricsDict', '=', '{', '"ascent"', ':', '"%sAscender"', '%', 'metricsPrefix', ',', '"descent"', ':', '"%sDescender"', '%', 'metricsPrefix', ',', '"lineGap"', ':', '"%sLineGap"', '%', 'metricsPrefix', ',', '"caretSlopeRise"', ':', '"%sCaretSlopeRise"', '%', 'commonPrefix', ',', '"caretSlopeRun"', ':', '"%sCaretSlopeRun"', '%', 'commonPrefix', ',', '"caretOffset"', ':', '"%sCaretOffset"', '%', 'commonPrefix', ',', '}', 'for', 'otfName', ',', 'ufoName', 'in', 'metricsDict', '.', 'items', '(', ')', ':', 'setattr', '(', 'table', ',', 'otfName', ',', 'otRound', '(', 'getAttrWithFallback', '(', 'font', '.', 'info', ',', 'ufoName', ')', ')', ')', '# Horizontal metrics in hhea, vertical metrics in vhea', 'advances', '=', '[', ']', '# width in hhea, height in vhea', 'firstSideBearings', '=', '[', ']', '# left in hhea, top in vhea', 'secondSideBearings', '=', '[', ']', '# right in hhea, bottom in vhea', 'extents', '=', '[', ']', 'if', 'mtxTable', 'is', 'not', 'None', ':', 'for', 'glyphName', 'in', 'self', '.', 'allGlyphs', ':', 'advance', ',', 'firstSideBearing', '=', 'mtxTable', '[', 'glyphName', ']', 'advances', '.', 'append', '(', 'advance', ')', 'bounds', '=', 'self', '.', 'glyphBoundingBoxes', '[', 'glyphName', ']', 'if', 'bounds', 'is', 'None', ':', 'continue', 'if', 'isHhea', ':', 'boundsAdvance', '=', '(', 'bounds', '.', 'xMax', '-', 'bounds', '.', 'xMin', ')', '# equation from the hhea spec for calculating xMaxExtent:', '# Max(lsb + (xMax - xMin))', 'extent', '=', 'firstSideBearing', '+', 'boundsAdvance', 'else', ':', 'boundsAdvance', '=', '(', 'bounds', '.', 'yMax', '-', 'bounds', '.', 'yMin', ')', '# equation from the vhea spec for calculating yMaxExtent:', '# Max(tsb + (yMax - yMin)).', 'extent', '=', 'firstSideBearing', '+', 'boundsAdvance', 'secondSideBearing', '=', 'advance', '-', 'firstSideBearing', '-', 'boundsAdvance', 'firstSideBearings', '.', 'append', '(', 'firstSideBearing', ')', 'secondSideBearings', '.', 'append', '(', 'secondSideBearing', ')', 'extents', '.', 'append', '(', 'extent', ')', 'setattr', '(', 'table', ',', '"advance%sMax"', '%', '(', '"Width"', 'if', 'isHhea', 'else', '"Height"', ')', ',', 'max', '(', 'advances', ')', 'if', 'advances', 'else', '0', ')', 'setattr', '(', 'table', ',', '"min%sSideBearing"', '%', '(', '"Left"', 'if', 'isHhea', 'else', '"Top"', ')', ',', 'min', '(', 'firstSideBearings', ')', 'if', 'firstSideBearings', 'else', '0', ')', 'setattr', '(', 'table', ',', '"min%sSideBearing"', '%', '(', '"Right"', 'if', 'isHhea', 'else', '"Bottom"', ')', ',', 'min', '(', 'secondSideBearings', ')', 'if', 'secondSideBearings', 'else', '0', ')', 'setattr', '(', 'table', ',', '"%sMaxExtent"', '%', '(', '"x"', 'if', 'isHhea', 'else', '"y"', ')', ',', 'max', '(', 'extents', ')', 'if', 'extents', 'else', '0', ')', 'if', 'isHhea', ':', 'reserved', '=', 'range', '(', '4', ')', 'else', ':', '# vhea.reserved0 is caretOffset for legacy reasons', 'reserved', '=', 'range', '(', '1', ',', '5', ')', 'for', 'i', 'in', 'reserved', ':', 'setattr', '(', 'table', ',', '"reserved%i"', '%', 'i', ',', '0', ')', 'table', '.', 'metricDataFormat', '=', '0', '# glyph count', 'setattr', '(', 'table', ',', '"numberOf%sMetrics"', '%', '(', '"H"', 'if', 'isHhea', 'else', '"V"', ')', ',', 'len', '(', 'self', '.', 'allGlyphs', ')', ')'] | Make the hhea table or the vhea table. This assume the hmtx or
the vmtx were respectively made first. | ['Make', 'the', 'hhea', 'table', 'or', 'the', 'vhea', 'table', '.', 'This', 'assume', 'the', 'hmtx', 'or', 'the', 'vmtx', 'were', 'respectively', 'made', 'first', '.'] | train | https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/outlineCompiler.py#L635-L727 |
3,889 | klahnakoski/pyLibrary | mo_json_config/__init__.py | get | def get(url):
"""
USE json.net CONVENTIONS TO LINK TO INLINE OTHER JSON
"""
url = text_type(url)
if url.find("://") == -1:
Log.error("{{url}} must have a prototcol (eg http://) declared", url=url)
base = URL("")
if url.startswith("file://") and url[7] != "/":
if os.sep=="\\":
base = URL("file:///" + os.getcwd().replace(os.sep, "/").rstrip("/") + "/.")
else:
base = URL("file://" + os.getcwd().rstrip("/") + "/.")
elif url[url.find("://") + 3] != "/":
Log.error("{{url}} must be absolute", url=url)
phase1 = _replace_ref(wrap({"$ref": url}), base) # BLANK URL ONLY WORKS IF url IS ABSOLUTE
try:
phase2 = _replace_locals(phase1, [phase1])
return wrap(phase2)
except Exception as e:
Log.error("problem replacing locals in\n{{phase1}}", phase1=phase1, cause=e) | python | def get(url):
"""
USE json.net CONVENTIONS TO LINK TO INLINE OTHER JSON
"""
url = text_type(url)
if url.find("://") == -1:
Log.error("{{url}} must have a prototcol (eg http://) declared", url=url)
base = URL("")
if url.startswith("file://") and url[7] != "/":
if os.sep=="\\":
base = URL("file:///" + os.getcwd().replace(os.sep, "/").rstrip("/") + "/.")
else:
base = URL("file://" + os.getcwd().rstrip("/") + "/.")
elif url[url.find("://") + 3] != "/":
Log.error("{{url}} must be absolute", url=url)
phase1 = _replace_ref(wrap({"$ref": url}), base) # BLANK URL ONLY WORKS IF url IS ABSOLUTE
try:
phase2 = _replace_locals(phase1, [phase1])
return wrap(phase2)
except Exception as e:
Log.error("problem replacing locals in\n{{phase1}}", phase1=phase1, cause=e) | ['def', 'get', '(', 'url', ')', ':', 'url', '=', 'text_type', '(', 'url', ')', 'if', 'url', '.', 'find', '(', '"://"', ')', '==', '-', '1', ':', 'Log', '.', 'error', '(', '"{{url}} must have a prototcol (eg http://) declared"', ',', 'url', '=', 'url', ')', 'base', '=', 'URL', '(', '""', ')', 'if', 'url', '.', 'startswith', '(', '"file://"', ')', 'and', 'url', '[', '7', ']', '!=', '"/"', ':', 'if', 'os', '.', 'sep', '==', '"\\\\"', ':', 'base', '=', 'URL', '(', '"file:///"', '+', 'os', '.', 'getcwd', '(', ')', '.', 'replace', '(', 'os', '.', 'sep', ',', '"/"', ')', '.', 'rstrip', '(', '"/"', ')', '+', '"/."', ')', 'else', ':', 'base', '=', 'URL', '(', '"file://"', '+', 'os', '.', 'getcwd', '(', ')', '.', 'rstrip', '(', '"/"', ')', '+', '"/."', ')', 'elif', 'url', '[', 'url', '.', 'find', '(', '"://"', ')', '+', '3', ']', '!=', '"/"', ':', 'Log', '.', 'error', '(', '"{{url}} must be absolute"', ',', 'url', '=', 'url', ')', 'phase1', '=', '_replace_ref', '(', 'wrap', '(', '{', '"$ref"', ':', 'url', '}', ')', ',', 'base', ')', '# BLANK URL ONLY WORKS IF url IS ABSOLUTE', 'try', ':', 'phase2', '=', '_replace_locals', '(', 'phase1', ',', '[', 'phase1', ']', ')', 'return', 'wrap', '(', 'phase2', ')', 'except', 'Exception', 'as', 'e', ':', 'Log', '.', 'error', '(', '"problem replacing locals in\\n{{phase1}}"', ',', 'phase1', '=', 'phase1', ',', 'cause', '=', 'e', ')'] | USE json.net CONVENTIONS TO LINK TO INLINE OTHER JSON | ['USE', 'json', '.', 'net', 'CONVENTIONS', 'TO', 'LINK', 'TO', 'INLINE', 'OTHER', 'JSON'] | train | https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/mo_json_config/__init__.py#L36-L58 |
3,890 | eyurtsev/FlowCytometryTools | FlowCytometryTools/core/bases.py | MeasurementCollection.filter_by_IDs | def filter_by_IDs(self, ids, ID=None):
"""
Keep only Measurements with given IDs.
"""
fil = lambda x: x in ids
return self.filter_by_attr('ID', fil, ID) | python | def filter_by_IDs(self, ids, ID=None):
"""
Keep only Measurements with given IDs.
"""
fil = lambda x: x in ids
return self.filter_by_attr('ID', fil, ID) | ['def', 'filter_by_IDs', '(', 'self', ',', 'ids', ',', 'ID', '=', 'None', ')', ':', 'fil', '=', 'lambda', 'x', ':', 'x', 'in', 'ids', 'return', 'self', '.', 'filter_by_attr', '(', "'ID'", ',', 'fil', ',', 'ID', ')'] | Keep only Measurements with given IDs. | ['Keep', 'only', 'Measurements', 'with', 'given', 'IDs', '.'] | train | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/bases.py#L658-L663 |
3,891 | oscarlazoarjona/fast | fast/bloch.py | observable | def observable(operator, rho, unfolding, complex=False):
r"""Return an observable ammount.
INPUT:
- ``operator`` - An square matrix representing a hermitian operator \
in thesame basis as the density matrix.
- ``rho`` - A density matrix in unfolded format, or a list of such \
density matrices.
- ``unfolding`` - A mapping from matrix element indices to unfolded \
indices.
>>> Ne = 2
>>> unfolding = Unfolding(Ne, True, True, True)
>>> rho = np.array([[0.6, 1+2j], [1-2j, 0.4]])
>>> rho = unfolding(rho)
>>> sx = np.array([[0, 1], [1, 0]])
>>> print(observable(sx, rho, unfolding))
2.0
"""
if len(rho.shape) == 2:
return np.array([observable(operator, i, unfolding) for i in rho])
Ne = unfolding.Ne
Mu = unfolding.Mu
obs = 0
if unfolding.normalized:
rho11 = 1 - sum([rho[Mu(1, i, i)] for i in range(1, Ne)])
for i in range(Ne):
for k in range(Ne):
if unfolding.real:
if k == 0 and i == 0:
obs += operator[i, k]*rho11
else:
if k < i:
u, v = (i, k)
else:
u, v = (k, i)
obs += operator[i, k]*rho[Mu(1, u, v)]
if k != i:
if k < i:
obs += 1j*operator[i, k]*rho[Mu(-1, u, v)]
else:
obs += -1j*operator[i, k]*rho[Mu(-1, u, v)]
else:
if k == 0 and i == 0:
obs += operator[i, k]*rho11
else:
obs += operator[i, k]*rho[Mu(0, k, i)]
if not complex:
obs = np.real(obs)
return obs | python | def observable(operator, rho, unfolding, complex=False):
r"""Return an observable ammount.
INPUT:
- ``operator`` - An square matrix representing a hermitian operator \
in thesame basis as the density matrix.
- ``rho`` - A density matrix in unfolded format, or a list of such \
density matrices.
- ``unfolding`` - A mapping from matrix element indices to unfolded \
indices.
>>> Ne = 2
>>> unfolding = Unfolding(Ne, True, True, True)
>>> rho = np.array([[0.6, 1+2j], [1-2j, 0.4]])
>>> rho = unfolding(rho)
>>> sx = np.array([[0, 1], [1, 0]])
>>> print(observable(sx, rho, unfolding))
2.0
"""
if len(rho.shape) == 2:
return np.array([observable(operator, i, unfolding) for i in rho])
Ne = unfolding.Ne
Mu = unfolding.Mu
obs = 0
if unfolding.normalized:
rho11 = 1 - sum([rho[Mu(1, i, i)] for i in range(1, Ne)])
for i in range(Ne):
for k in range(Ne):
if unfolding.real:
if k == 0 and i == 0:
obs += operator[i, k]*rho11
else:
if k < i:
u, v = (i, k)
else:
u, v = (k, i)
obs += operator[i, k]*rho[Mu(1, u, v)]
if k != i:
if k < i:
obs += 1j*operator[i, k]*rho[Mu(-1, u, v)]
else:
obs += -1j*operator[i, k]*rho[Mu(-1, u, v)]
else:
if k == 0 and i == 0:
obs += operator[i, k]*rho11
else:
obs += operator[i, k]*rho[Mu(0, k, i)]
if not complex:
obs = np.real(obs)
return obs | ['def', 'observable', '(', 'operator', ',', 'rho', ',', 'unfolding', ',', 'complex', '=', 'False', ')', ':', 'if', 'len', '(', 'rho', '.', 'shape', ')', '==', '2', ':', 'return', 'np', '.', 'array', '(', '[', 'observable', '(', 'operator', ',', 'i', ',', 'unfolding', ')', 'for', 'i', 'in', 'rho', ']', ')', 'Ne', '=', 'unfolding', '.', 'Ne', 'Mu', '=', 'unfolding', '.', 'Mu', 'obs', '=', '0', 'if', 'unfolding', '.', 'normalized', ':', 'rho11', '=', '1', '-', 'sum', '(', '[', 'rho', '[', 'Mu', '(', '1', ',', 'i', ',', 'i', ')', ']', 'for', 'i', 'in', 'range', '(', '1', ',', 'Ne', ')', ']', ')', 'for', 'i', 'in', 'range', '(', 'Ne', ')', ':', 'for', 'k', 'in', 'range', '(', 'Ne', ')', ':', 'if', 'unfolding', '.', 'real', ':', 'if', 'k', '==', '0', 'and', 'i', '==', '0', ':', 'obs', '+=', 'operator', '[', 'i', ',', 'k', ']', '*', 'rho11', 'else', ':', 'if', 'k', '<', 'i', ':', 'u', ',', 'v', '=', '(', 'i', ',', 'k', ')', 'else', ':', 'u', ',', 'v', '=', '(', 'k', ',', 'i', ')', 'obs', '+=', 'operator', '[', 'i', ',', 'k', ']', '*', 'rho', '[', 'Mu', '(', '1', ',', 'u', ',', 'v', ')', ']', 'if', 'k', '!=', 'i', ':', 'if', 'k', '<', 'i', ':', 'obs', '+=', '1j', '*', 'operator', '[', 'i', ',', 'k', ']', '*', 'rho', '[', 'Mu', '(', '-', '1', ',', 'u', ',', 'v', ')', ']', 'else', ':', 'obs', '+=', '-', '1j', '*', 'operator', '[', 'i', ',', 'k', ']', '*', 'rho', '[', 'Mu', '(', '-', '1', ',', 'u', ',', 'v', ')', ']', 'else', ':', 'if', 'k', '==', '0', 'and', 'i', '==', '0', ':', 'obs', '+=', 'operator', '[', 'i', ',', 'k', ']', '*', 'rho11', 'else', ':', 'obs', '+=', 'operator', '[', 'i', ',', 'k', ']', '*', 'rho', '[', 'Mu', '(', '0', ',', 'k', ',', 'i', ')', ']', 'if', 'not', 'complex', ':', 'obs', '=', 'np', '.', 'real', '(', 'obs', ')', 'return', 'obs'] | r"""Return an observable ammount.
INPUT:
- ``operator`` - An square matrix representing a hermitian operator \
in thesame basis as the density matrix.
- ``rho`` - A density matrix in unfolded format, or a list of such \
density matrices.
- ``unfolding`` - A mapping from matrix element indices to unfolded \
indices.
>>> Ne = 2
>>> unfolding = Unfolding(Ne, True, True, True)
>>> rho = np.array([[0.6, 1+2j], [1-2j, 0.4]])
>>> rho = unfolding(rho)
>>> sx = np.array([[0, 1], [1, 0]])
>>> print(observable(sx, rho, unfolding))
2.0 | ['r', 'Return', 'an', 'observable', 'ammount', '.'] | train | https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/bloch.py#L3161-L3217 |
3,892 | CyberReboot/vent | vent/extras/network_tap/ncontrol/paths.py | NICsR.on_get | def on_get(self, req, resp):
"""
Send a GET request to get the list of all available network interfaces
"""
resp.content_type = falcon.MEDIA_TEXT
resp.status = falcon.HTTP_200
# connect to docker
try:
d_client = docker.from_env()
except Exception as e: # pragma: no cover
resp.body = "(False, 'unable to connect to docker because: " + str(e) + "')"
return
# start container to get network interfaces
nics = ''
try:
nics = d_client.containers.run('cyberreboot/gonet',
network_mode='host', remove=True)
resp.body = '(True, ' + str(nics.id) + ')'
except Exception as e: # pragma: no cover
resp.body = "(False, 'Failure because: " + str(e) + "')"
return
return | python | def on_get(self, req, resp):
"""
Send a GET request to get the list of all available network interfaces
"""
resp.content_type = falcon.MEDIA_TEXT
resp.status = falcon.HTTP_200
# connect to docker
try:
d_client = docker.from_env()
except Exception as e: # pragma: no cover
resp.body = "(False, 'unable to connect to docker because: " + str(e) + "')"
return
# start container to get network interfaces
nics = ''
try:
nics = d_client.containers.run('cyberreboot/gonet',
network_mode='host', remove=True)
resp.body = '(True, ' + str(nics.id) + ')'
except Exception as e: # pragma: no cover
resp.body = "(False, 'Failure because: " + str(e) + "')"
return
return | ['def', 'on_get', '(', 'self', ',', 'req', ',', 'resp', ')', ':', 'resp', '.', 'content_type', '=', 'falcon', '.', 'MEDIA_TEXT', 'resp', '.', 'status', '=', 'falcon', '.', 'HTTP_200', '# connect to docker', 'try', ':', 'd_client', '=', 'docker', '.', 'from_env', '(', ')', 'except', 'Exception', 'as', 'e', ':', '# pragma: no cover', 'resp', '.', 'body', '=', '"(False, \'unable to connect to docker because: "', '+', 'str', '(', 'e', ')', '+', '"\')"', 'return', '# start container to get network interfaces', 'nics', '=', "''", 'try', ':', 'nics', '=', 'd_client', '.', 'containers', '.', 'run', '(', "'cyberreboot/gonet'", ',', 'network_mode', '=', "'host'", ',', 'remove', '=', 'True', ')', 'resp', '.', 'body', '=', "'(True, '", '+', 'str', '(', 'nics', '.', 'id', ')', '+', "')'", 'except', 'Exception', 'as', 'e', ':', '# pragma: no cover', 'resp', '.', 'body', '=', '"(False, \'Failure because: "', '+', 'str', '(', 'e', ')', '+', '"\')"', 'return', 'return'] | Send a GET request to get the list of all available network interfaces | ['Send', 'a', 'GET', 'request', 'to', 'get', 'the', 'list', 'of', 'all', 'available', 'network', 'interfaces'] | train | https://github.com/CyberReboot/vent/blob/9956a09146b11a89a0eabab3bc7ce8906d124885/vent/extras/network_tap/ncontrol/paths.py#L201-L225 |
3,893 | jtwhite79/pyemu | pyemu/utils/helpers.py | PstFromFlopyModel.setup_hds | def setup_hds(self):
""" setup modflow head save file observations for given kper (zero-based
stress period index) and k (zero-based layer index) pairs using the
kperk argument.
Note
----
this can setup a shit-ton of observations
this is useful for dataworth analyses or for monitoring
water levels as forecasts
"""
if self.hds_kperk is None or len(self.hds_kperk) == 0:
return
from .gw_utils import setup_hds_obs
# if len(self.hds_kperk) == 2:
# try:
# if len(self.hds_kperk[0] == 2):
# pass
# except:
# self.hds_kperk = [self.hds_kperk]
oc = self.m.get_package("OC")
if oc is None:
raise Exception("can't find OC package in model to setup hds grid obs")
if not oc.savehead:
raise Exception("OC not saving hds, can't setup grid obs")
hds_unit = oc.iuhead
hds_file = self.m.get_output(unit=hds_unit)
assert os.path.exists(os.path.join(self.org_model_ws,hds_file)),\
"couldn't find existing hds file {0} in org_model_ws".format(hds_file)
shutil.copy2(os.path.join(self.org_model_ws,hds_file),
os.path.join(self.m.model_ws,hds_file))
inact = None
if self.m.lpf is not None:
inact = self.m.lpf.hdry
elif self.m.upw is not None:
inact = self.m.upw.hdry
if inact is None:
skip = lambda x: np.NaN if x == self.m.bas6.hnoflo else x
else:
skip = lambda x: np.NaN if x == self.m.bas6.hnoflo or x == inact else x
print(self.hds_kperk)
frun_line, df = setup_hds_obs(os.path.join(self.m.model_ws,hds_file),
kperk_pairs=self.hds_kperk,skip=skip)
self.obs_dfs["hds"] = df
self.frun_post_lines.append("pyemu.gw_utils.apply_hds_obs('{0}')".format(hds_file))
self.tmp_files.append(hds_file) | python | def setup_hds(self):
""" setup modflow head save file observations for given kper (zero-based
stress period index) and k (zero-based layer index) pairs using the
kperk argument.
Note
----
this can setup a shit-ton of observations
this is useful for dataworth analyses or for monitoring
water levels as forecasts
"""
if self.hds_kperk is None or len(self.hds_kperk) == 0:
return
from .gw_utils import setup_hds_obs
# if len(self.hds_kperk) == 2:
# try:
# if len(self.hds_kperk[0] == 2):
# pass
# except:
# self.hds_kperk = [self.hds_kperk]
oc = self.m.get_package("OC")
if oc is None:
raise Exception("can't find OC package in model to setup hds grid obs")
if not oc.savehead:
raise Exception("OC not saving hds, can't setup grid obs")
hds_unit = oc.iuhead
hds_file = self.m.get_output(unit=hds_unit)
assert os.path.exists(os.path.join(self.org_model_ws,hds_file)),\
"couldn't find existing hds file {0} in org_model_ws".format(hds_file)
shutil.copy2(os.path.join(self.org_model_ws,hds_file),
os.path.join(self.m.model_ws,hds_file))
inact = None
if self.m.lpf is not None:
inact = self.m.lpf.hdry
elif self.m.upw is not None:
inact = self.m.upw.hdry
if inact is None:
skip = lambda x: np.NaN if x == self.m.bas6.hnoflo else x
else:
skip = lambda x: np.NaN if x == self.m.bas6.hnoflo or x == inact else x
print(self.hds_kperk)
frun_line, df = setup_hds_obs(os.path.join(self.m.model_ws,hds_file),
kperk_pairs=self.hds_kperk,skip=skip)
self.obs_dfs["hds"] = df
self.frun_post_lines.append("pyemu.gw_utils.apply_hds_obs('{0}')".format(hds_file))
self.tmp_files.append(hds_file) | ['def', 'setup_hds', '(', 'self', ')', ':', 'if', 'self', '.', 'hds_kperk', 'is', 'None', 'or', 'len', '(', 'self', '.', 'hds_kperk', ')', '==', '0', ':', 'return', 'from', '.', 'gw_utils', 'import', 'setup_hds_obs', '# if len(self.hds_kperk) == 2:', '# try:', '# if len(self.hds_kperk[0] == 2):', '# pass', '# except:', '# self.hds_kperk = [self.hds_kperk]', 'oc', '=', 'self', '.', 'm', '.', 'get_package', '(', '"OC"', ')', 'if', 'oc', 'is', 'None', ':', 'raise', 'Exception', '(', '"can\'t find OC package in model to setup hds grid obs"', ')', 'if', 'not', 'oc', '.', 'savehead', ':', 'raise', 'Exception', '(', '"OC not saving hds, can\'t setup grid obs"', ')', 'hds_unit', '=', 'oc', '.', 'iuhead', 'hds_file', '=', 'self', '.', 'm', '.', 'get_output', '(', 'unit', '=', 'hds_unit', ')', 'assert', 'os', '.', 'path', '.', 'exists', '(', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'org_model_ws', ',', 'hds_file', ')', ')', ',', '"couldn\'t find existing hds file {0} in org_model_ws"', '.', 'format', '(', 'hds_file', ')', 'shutil', '.', 'copy2', '(', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'org_model_ws', ',', 'hds_file', ')', ',', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'm', '.', 'model_ws', ',', 'hds_file', ')', ')', 'inact', '=', 'None', 'if', 'self', '.', 'm', '.', 'lpf', 'is', 'not', 'None', ':', 'inact', '=', 'self', '.', 'm', '.', 'lpf', '.', 'hdry', 'elif', 'self', '.', 'm', '.', 'upw', 'is', 'not', 'None', ':', 'inact', '=', 'self', '.', 'm', '.', 'upw', '.', 'hdry', 'if', 'inact', 'is', 'None', ':', 'skip', '=', 'lambda', 'x', ':', 'np', '.', 'NaN', 'if', 'x', '==', 'self', '.', 'm', '.', 'bas6', '.', 'hnoflo', 'else', 'x', 'else', ':', 'skip', '=', 'lambda', 'x', ':', 'np', '.', 'NaN', 'if', 'x', '==', 'self', '.', 'm', '.', 'bas6', '.', 'hnoflo', 'or', 'x', '==', 'inact', 'else', 'x', 'print', '(', 'self', '.', 'hds_kperk', ')', 'frun_line', ',', 'df', '=', 'setup_hds_obs', '(', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'm', '.', 'model_ws', ',', 'hds_file', ')', ',', 'kperk_pairs', '=', 'self', '.', 'hds_kperk', ',', 'skip', '=', 'skip', ')', 'self', '.', 'obs_dfs', '[', '"hds"', ']', '=', 'df', 'self', '.', 'frun_post_lines', '.', 'append', '(', '"pyemu.gw_utils.apply_hds_obs(\'{0}\')"', '.', 'format', '(', 'hds_file', ')', ')', 'self', '.', 'tmp_files', '.', 'append', '(', 'hds_file', ')'] | setup modflow head save file observations for given kper (zero-based
stress period index) and k (zero-based layer index) pairs using the
kperk argument.
Note
----
this can setup a shit-ton of observations
this is useful for dataworth analyses or for monitoring
water levels as forecasts | ['setup', 'modflow', 'head', 'save', 'file', 'observations', 'for', 'given', 'kper', '(', 'zero', '-', 'based', 'stress', 'period', 'index', ')', 'and', 'k', '(', 'zero', '-', 'based', 'layer', 'index', ')', 'pairs', 'using', 'the', 'kperk', 'argument', '.'] | train | https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/utils/helpers.py#L3272-L3321 |
3,894 | touilleMan/marshmallow-mongoengine | marshmallow_mongoengine/conversion/fields.py | MetaFieldBuilder.build_marshmallow_field | def build_marshmallow_field(self, **kwargs):
"""
:return: The Marshmallow Field instanciated and configured
"""
field_kwargs = None
for param in self.params:
field_kwargs = param.apply(field_kwargs)
field_kwargs.update(kwargs)
return self.marshmallow_field_cls(**field_kwargs) | python | def build_marshmallow_field(self, **kwargs):
"""
:return: The Marshmallow Field instanciated and configured
"""
field_kwargs = None
for param in self.params:
field_kwargs = param.apply(field_kwargs)
field_kwargs.update(kwargs)
return self.marshmallow_field_cls(**field_kwargs) | ['def', 'build_marshmallow_field', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'field_kwargs', '=', 'None', 'for', 'param', 'in', 'self', '.', 'params', ':', 'field_kwargs', '=', 'param', '.', 'apply', '(', 'field_kwargs', ')', 'field_kwargs', '.', 'update', '(', 'kwargs', ')', 'return', 'self', '.', 'marshmallow_field_cls', '(', '*', '*', 'field_kwargs', ')'] | :return: The Marshmallow Field instanciated and configured | [':', 'return', ':', 'The', 'Marshmallow', 'Field', 'instanciated', 'and', 'configured'] | train | https://github.com/touilleMan/marshmallow-mongoengine/blob/21223700ea1f1d0209c967761e5c22635ee721e7/marshmallow_mongoengine/conversion/fields.py#L25-L33 |
3,895 | JoeVirtual/KonFoo | konfoo/core.py | Field.serialize | def serialize(self, buffer=bytearray(), index=Index(), **options):
""" Serializes the `Field` to the byte *buffer* starting at the begin
of the *buffer* or with the given *index* by packing the :attr:`value`
of the `Field` to the byte *buffer* in accordance with the encoding
*byte order* for the serialization and the encoding :attr:`byte_order`
of the `Field`.
The specific encoding :attr:`byte_order` of the `Field` overrules the
encoding *byte order* for the serialization.
Returns the :class:`Index` of the *buffer* after the `Field`.
Optional the serialization of the referenced :attr:`~Pointer.data` object
of a :class:`Pointer` field can be enabled.
:param bytearray buffer: byte stream.
:param Index index: current write :class:`Index` of the *buffer*.
:keyword byte_order: encoding byte order for the serialization.
:type byte_order: :class:`Byteorder`, :class:`str`
:keyword bool nested: if ``True`` a :class:`Pointer` field serializes
its referenced :attr:`~Pointer.data` object as well
(chained method call).
Each :class:`Pointer` field uses for the encoding of its referenced
:attr:`~Pointer.data` object its own :attr:`~Pointer.bytestream`.
"""
self.index = index
buffer += self.pack(buffer, **options)
return self.index_field(index) | python | def serialize(self, buffer=bytearray(), index=Index(), **options):
""" Serializes the `Field` to the byte *buffer* starting at the begin
of the *buffer* or with the given *index* by packing the :attr:`value`
of the `Field` to the byte *buffer* in accordance with the encoding
*byte order* for the serialization and the encoding :attr:`byte_order`
of the `Field`.
The specific encoding :attr:`byte_order` of the `Field` overrules the
encoding *byte order* for the serialization.
Returns the :class:`Index` of the *buffer* after the `Field`.
Optional the serialization of the referenced :attr:`~Pointer.data` object
of a :class:`Pointer` field can be enabled.
:param bytearray buffer: byte stream.
:param Index index: current write :class:`Index` of the *buffer*.
:keyword byte_order: encoding byte order for the serialization.
:type byte_order: :class:`Byteorder`, :class:`str`
:keyword bool nested: if ``True`` a :class:`Pointer` field serializes
its referenced :attr:`~Pointer.data` object as well
(chained method call).
Each :class:`Pointer` field uses for the encoding of its referenced
:attr:`~Pointer.data` object its own :attr:`~Pointer.bytestream`.
"""
self.index = index
buffer += self.pack(buffer, **options)
return self.index_field(index) | ['def', 'serialize', '(', 'self', ',', 'buffer', '=', 'bytearray', '(', ')', ',', 'index', '=', 'Index', '(', ')', ',', '*', '*', 'options', ')', ':', 'self', '.', 'index', '=', 'index', 'buffer', '+=', 'self', '.', 'pack', '(', 'buffer', ',', '*', '*', 'options', ')', 'return', 'self', '.', 'index_field', '(', 'index', ')'] | Serializes the `Field` to the byte *buffer* starting at the begin
of the *buffer* or with the given *index* by packing the :attr:`value`
of the `Field` to the byte *buffer* in accordance with the encoding
*byte order* for the serialization and the encoding :attr:`byte_order`
of the `Field`.
The specific encoding :attr:`byte_order` of the `Field` overrules the
encoding *byte order* for the serialization.
Returns the :class:`Index` of the *buffer* after the `Field`.
Optional the serialization of the referenced :attr:`~Pointer.data` object
of a :class:`Pointer` field can be enabled.
:param bytearray buffer: byte stream.
:param Index index: current write :class:`Index` of the *buffer*.
:keyword byte_order: encoding byte order for the serialization.
:type byte_order: :class:`Byteorder`, :class:`str`
:keyword bool nested: if ``True`` a :class:`Pointer` field serializes
its referenced :attr:`~Pointer.data` object as well
(chained method call).
Each :class:`Pointer` field uses for the encoding of its referenced
:attr:`~Pointer.data` object its own :attr:`~Pointer.bytestream`. | ['Serializes', 'the', 'Field', 'to', 'the', 'byte', '*', 'buffer', '*', 'starting', 'at', 'the', 'begin', 'of', 'the', '*', 'buffer', '*', 'or', 'with', 'the', 'given', '*', 'index', '*', 'by', 'packing', 'the', ':', 'attr', ':', 'value', 'of', 'the', 'Field', 'to', 'the', 'byte', '*', 'buffer', '*', 'in', 'accordance', 'with', 'the', 'encoding', '*', 'byte', 'order', '*', 'for', 'the', 'serialization', 'and', 'the', 'encoding', ':', 'attr', ':', 'byte_order', 'of', 'the', 'Field', '.'] | train | https://github.com/JoeVirtual/KonFoo/blob/0c62ef5c2bed4deaf908b34082e4de2544532fdc/konfoo/core.py#L1778-L1805 |
3,896 | saltstack/salt | salt/cache/localfs.py | list_ | def list_(bank, cachedir):
'''
Return an iterable object containing all entries stored in the specified bank.
'''
base = os.path.join(cachedir, os.path.normpath(bank))
if not os.path.isdir(base):
return []
try:
items = os.listdir(base)
except OSError as exc:
raise SaltCacheError(
'There was an error accessing directory "{0}": {1}'.format(
base, exc
)
)
ret = []
for item in items:
if item.endswith('.p'):
ret.append(item.rstrip(item[-2:]))
else:
ret.append(item)
return ret | python | def list_(bank, cachedir):
'''
Return an iterable object containing all entries stored in the specified bank.
'''
base = os.path.join(cachedir, os.path.normpath(bank))
if not os.path.isdir(base):
return []
try:
items = os.listdir(base)
except OSError as exc:
raise SaltCacheError(
'There was an error accessing directory "{0}": {1}'.format(
base, exc
)
)
ret = []
for item in items:
if item.endswith('.p'):
ret.append(item.rstrip(item[-2:]))
else:
ret.append(item)
return ret | ['def', 'list_', '(', 'bank', ',', 'cachedir', ')', ':', 'base', '=', 'os', '.', 'path', '.', 'join', '(', 'cachedir', ',', 'os', '.', 'path', '.', 'normpath', '(', 'bank', ')', ')', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'base', ')', ':', 'return', '[', ']', 'try', ':', 'items', '=', 'os', '.', 'listdir', '(', 'base', ')', 'except', 'OSError', 'as', 'exc', ':', 'raise', 'SaltCacheError', '(', '\'There was an error accessing directory "{0}": {1}\'', '.', 'format', '(', 'base', ',', 'exc', ')', ')', 'ret', '=', '[', ']', 'for', 'item', 'in', 'items', ':', 'if', 'item', '.', 'endswith', '(', "'.p'", ')', ':', 'ret', '.', 'append', '(', 'item', '.', 'rstrip', '(', 'item', '[', '-', '2', ':', ']', ')', ')', 'else', ':', 'ret', '.', 'append', '(', 'item', ')', 'return', 'ret'] | Return an iterable object containing all entries stored in the specified bank. | ['Return', 'an', 'iterable', 'object', 'containing', 'all', 'entries', 'stored', 'in', 'the', 'specified', 'bank', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cache/localfs.py#L148-L169 |
3,897 | JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavextra.py | mag_roll | def mag_roll(RAW_IMU, inclination, declination):
'''estimate roll from mag'''
m = mag_rotation(RAW_IMU, inclination, declination)
(r, p, y) = m.to_euler()
return degrees(r) | python | def mag_roll(RAW_IMU, inclination, declination):
'''estimate roll from mag'''
m = mag_rotation(RAW_IMU, inclination, declination)
(r, p, y) = m.to_euler()
return degrees(r) | ['def', 'mag_roll', '(', 'RAW_IMU', ',', 'inclination', ',', 'declination', ')', ':', 'm', '=', 'mag_rotation', '(', 'RAW_IMU', ',', 'inclination', ',', 'declination', ')', '(', 'r', ',', 'p', ',', 'y', ')', '=', 'm', '.', 'to_euler', '(', ')', 'return', 'degrees', '(', 'r', ')'] | estimate roll from mag | ['estimate', 'roll', 'from', 'mag'] | train | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavextra.py#L370-L374 |
3,898 | TUNE-Archive/freight_forwarder | freight_forwarder/registry/auth.py | Auth.load_dockercfg | def load_dockercfg(self):
"""
:return:
"""
if self.ssl_cert_path:
self._validate_ssl_certs()
if self.auth_type == 'registry_rubber':
self.user, self.passwd = self._registry_rubber_uonce('add')
self._config_path = self._create_dockercfg(
self.user,
self.passwd,
os.path.join(os.environ.get('HOME'), '.{0}.dockercfg'.format(self.user))
)
else:
if not os.path.isfile(self.config_path):
raise ValueError("Couldn't find dockercfg file: {0}".format(self.config_path))
with open(self.config_path, 'r') as f:
try:
config = json.loads(f.read())
except Exception:
raise SyntaxError("{0} doesn't container valid json.".format(self.config_path))
if self.registry not in config:
raise LookupError("Was unable to find {0} in {1}".format(self.registry, self.config_path))
registry_config = config[self.registry]
if 'auth' not in registry_config:
raise LookupError("Was unable to find 'auth' obj for {0} in {1}".format(self.registry, self.config_path))
credentials = base64.decodestring(registry_config['auth'])
self.user = credentials.get('user')
self.user = credentials.get('password') | python | def load_dockercfg(self):
"""
:return:
"""
if self.ssl_cert_path:
self._validate_ssl_certs()
if self.auth_type == 'registry_rubber':
self.user, self.passwd = self._registry_rubber_uonce('add')
self._config_path = self._create_dockercfg(
self.user,
self.passwd,
os.path.join(os.environ.get('HOME'), '.{0}.dockercfg'.format(self.user))
)
else:
if not os.path.isfile(self.config_path):
raise ValueError("Couldn't find dockercfg file: {0}".format(self.config_path))
with open(self.config_path, 'r') as f:
try:
config = json.loads(f.read())
except Exception:
raise SyntaxError("{0} doesn't container valid json.".format(self.config_path))
if self.registry not in config:
raise LookupError("Was unable to find {0} in {1}".format(self.registry, self.config_path))
registry_config = config[self.registry]
if 'auth' not in registry_config:
raise LookupError("Was unable to find 'auth' obj for {0} in {1}".format(self.registry, self.config_path))
credentials = base64.decodestring(registry_config['auth'])
self.user = credentials.get('user')
self.user = credentials.get('password') | ['def', 'load_dockercfg', '(', 'self', ')', ':', 'if', 'self', '.', 'ssl_cert_path', ':', 'self', '.', '_validate_ssl_certs', '(', ')', 'if', 'self', '.', 'auth_type', '==', "'registry_rubber'", ':', 'self', '.', 'user', ',', 'self', '.', 'passwd', '=', 'self', '.', '_registry_rubber_uonce', '(', "'add'", ')', 'self', '.', '_config_path', '=', 'self', '.', '_create_dockercfg', '(', 'self', '.', 'user', ',', 'self', '.', 'passwd', ',', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'environ', '.', 'get', '(', "'HOME'", ')', ',', "'.{0}.dockercfg'", '.', 'format', '(', 'self', '.', 'user', ')', ')', ')', 'else', ':', 'if', 'not', 'os', '.', 'path', '.', 'isfile', '(', 'self', '.', 'config_path', ')', ':', 'raise', 'ValueError', '(', '"Couldn\'t find dockercfg file: {0}"', '.', 'format', '(', 'self', '.', 'config_path', ')', ')', 'with', 'open', '(', 'self', '.', 'config_path', ',', "'r'", ')', 'as', 'f', ':', 'try', ':', 'config', '=', 'json', '.', 'loads', '(', 'f', '.', 'read', '(', ')', ')', 'except', 'Exception', ':', 'raise', 'SyntaxError', '(', '"{0} doesn\'t container valid json."', '.', 'format', '(', 'self', '.', 'config_path', ')', ')', 'if', 'self', '.', 'registry', 'not', 'in', 'config', ':', 'raise', 'LookupError', '(', '"Was unable to find {0} in {1}"', '.', 'format', '(', 'self', '.', 'registry', ',', 'self', '.', 'config_path', ')', ')', 'registry_config', '=', 'config', '[', 'self', '.', 'registry', ']', 'if', "'auth'", 'not', 'in', 'registry_config', ':', 'raise', 'LookupError', '(', '"Was unable to find \'auth\' obj for {0} in {1}"', '.', 'format', '(', 'self', '.', 'registry', ',', 'self', '.', 'config_path', ')', ')', 'credentials', '=', 'base64', '.', 'decodestring', '(', 'registry_config', '[', "'auth'", ']', ')', 'self', '.', 'user', '=', 'credentials', '.', 'get', '(', "'user'", ')', 'self', '.', 'user', '=', 'credentials', '.', 'get', '(', "'password'", ')'] | :return: | [':', 'return', ':'] | train | https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/registry/auth.py#L114-L148 |
3,899 | openid/JWTConnect-Python-CryptoJWT | src/cryptojwt/tools/keygen.py | main | def main():
""" Main function"""
parser = argparse.ArgumentParser(description='JSON Web Key (JWK) Generator')
parser.add_argument('--kty',
dest='kty',
metavar='type',
help='Key type',
required=True)
parser.add_argument('--size',
dest='keysize',
type=int,
metavar='size',
help='Key size')
parser.add_argument('--crv',
dest='crv',
metavar='curve',
help='EC curve',
choices=NIST2SEC.keys(),
default=DEFAULT_EC_CURVE)
parser.add_argument('--exp',
dest='rsa_exp',
type=int,
metavar='exponent',
help=f'RSA public key exponent (default {DEFAULT_RSA_EXP})',
default=DEFAULT_RSA_EXP)
parser.add_argument('--kid',
dest='kid',
metavar='id',
help='Key ID')
args = parser.parse_args()
if args.kty.upper() == 'RSA':
if args.keysize is None:
args.keysize = DEFAULT_RSA_KEYSIZE
jwk = new_rsa_key(public_exponent=args.rsa_exp, key_size=args.keysize, kid=args.kid)
elif args.kty.upper() == 'EC':
if args.crv not in NIST2SEC:
print("Unknown curve: {0}".format(args.crv), file=sys.stderr)
exit(1)
jwk = new_ec_key(crv=args.crv, kid=args.kid)
elif args.kty.upper() == 'SYM':
if args.keysize is None:
args.keysize = DEFAULT_SYM_KEYSIZE
randomkey = os.urandom(args.keysize)
jwk = SYMKey(key=randomkey, kid=args.kid)
else:
print(f"Unknown key type: {args.kty}", file=sys.stderr)
exit(1)
jwk_dict = jwk.serialize(private=True)
print(json.dumps(jwk_dict, sort_keys=True, indent=4))
print("SHA-256: " + jwk.thumbprint('SHA-256').decode(), file=sys.stderr) | python | def main():
""" Main function"""
parser = argparse.ArgumentParser(description='JSON Web Key (JWK) Generator')
parser.add_argument('--kty',
dest='kty',
metavar='type',
help='Key type',
required=True)
parser.add_argument('--size',
dest='keysize',
type=int,
metavar='size',
help='Key size')
parser.add_argument('--crv',
dest='crv',
metavar='curve',
help='EC curve',
choices=NIST2SEC.keys(),
default=DEFAULT_EC_CURVE)
parser.add_argument('--exp',
dest='rsa_exp',
type=int,
metavar='exponent',
help=f'RSA public key exponent (default {DEFAULT_RSA_EXP})',
default=DEFAULT_RSA_EXP)
parser.add_argument('--kid',
dest='kid',
metavar='id',
help='Key ID')
args = parser.parse_args()
if args.kty.upper() == 'RSA':
if args.keysize is None:
args.keysize = DEFAULT_RSA_KEYSIZE
jwk = new_rsa_key(public_exponent=args.rsa_exp, key_size=args.keysize, kid=args.kid)
elif args.kty.upper() == 'EC':
if args.crv not in NIST2SEC:
print("Unknown curve: {0}".format(args.crv), file=sys.stderr)
exit(1)
jwk = new_ec_key(crv=args.crv, kid=args.kid)
elif args.kty.upper() == 'SYM':
if args.keysize is None:
args.keysize = DEFAULT_SYM_KEYSIZE
randomkey = os.urandom(args.keysize)
jwk = SYMKey(key=randomkey, kid=args.kid)
else:
print(f"Unknown key type: {args.kty}", file=sys.stderr)
exit(1)
jwk_dict = jwk.serialize(private=True)
print(json.dumps(jwk_dict, sort_keys=True, indent=4))
print("SHA-256: " + jwk.thumbprint('SHA-256').decode(), file=sys.stderr) | ['def', 'main', '(', ')', ':', 'parser', '=', 'argparse', '.', 'ArgumentParser', '(', 'description', '=', "'JSON Web Key (JWK) Generator'", ')', 'parser', '.', 'add_argument', '(', "'--kty'", ',', 'dest', '=', "'kty'", ',', 'metavar', '=', "'type'", ',', 'help', '=', "'Key type'", ',', 'required', '=', 'True', ')', 'parser', '.', 'add_argument', '(', "'--size'", ',', 'dest', '=', "'keysize'", ',', 'type', '=', 'int', ',', 'metavar', '=', "'size'", ',', 'help', '=', "'Key size'", ')', 'parser', '.', 'add_argument', '(', "'--crv'", ',', 'dest', '=', "'crv'", ',', 'metavar', '=', "'curve'", ',', 'help', '=', "'EC curve'", ',', 'choices', '=', 'NIST2SEC', '.', 'keys', '(', ')', ',', 'default', '=', 'DEFAULT_EC_CURVE', ')', 'parser', '.', 'add_argument', '(', "'--exp'", ',', 'dest', '=', "'rsa_exp'", ',', 'type', '=', 'int', ',', 'metavar', '=', "'exponent'", ',', 'help', '=', "f'RSA public key exponent (default {DEFAULT_RSA_EXP})'", ',', 'default', '=', 'DEFAULT_RSA_EXP', ')', 'parser', '.', 'add_argument', '(', "'--kid'", ',', 'dest', '=', "'kid'", ',', 'metavar', '=', "'id'", ',', 'help', '=', "'Key ID'", ')', 'args', '=', 'parser', '.', 'parse_args', '(', ')', 'if', 'args', '.', 'kty', '.', 'upper', '(', ')', '==', "'RSA'", ':', 'if', 'args', '.', 'keysize', 'is', 'None', ':', 'args', '.', 'keysize', '=', 'DEFAULT_RSA_KEYSIZE', 'jwk', '=', 'new_rsa_key', '(', 'public_exponent', '=', 'args', '.', 'rsa_exp', ',', 'key_size', '=', 'args', '.', 'keysize', ',', 'kid', '=', 'args', '.', 'kid', ')', 'elif', 'args', '.', 'kty', '.', 'upper', '(', ')', '==', "'EC'", ':', 'if', 'args', '.', 'crv', 'not', 'in', 'NIST2SEC', ':', 'print', '(', '"Unknown curve: {0}"', '.', 'format', '(', 'args', '.', 'crv', ')', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'exit', '(', '1', ')', 'jwk', '=', 'new_ec_key', '(', 'crv', '=', 'args', '.', 'crv', ',', 'kid', '=', 'args', '.', 'kid', ')', 'elif', 'args', '.', 'kty', '.', 'upper', '(', ')', '==', "'SYM'", ':', 'if', 'args', '.', 'keysize', 'is', 'None', ':', 'args', '.', 'keysize', '=', 'DEFAULT_SYM_KEYSIZE', 'randomkey', '=', 'os', '.', 'urandom', '(', 'args', '.', 'keysize', ')', 'jwk', '=', 'SYMKey', '(', 'key', '=', 'randomkey', ',', 'kid', '=', 'args', '.', 'kid', ')', 'else', ':', 'print', '(', 'f"Unknown key type: {args.kty}"', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'exit', '(', '1', ')', 'jwk_dict', '=', 'jwk', '.', 'serialize', '(', 'private', '=', 'True', ')', 'print', '(', 'json', '.', 'dumps', '(', 'jwk_dict', ',', 'sort_keys', '=', 'True', ',', 'indent', '=', '4', ')', ')', 'print', '(', '"SHA-256: "', '+', 'jwk', '.', 'thumbprint', '(', "'SHA-256'", ')', '.', 'decode', '(', ')', ',', 'file', '=', 'sys', '.', 'stderr', ')'] | Main function | ['Main', 'function'] | train | https://github.com/openid/JWTConnect-Python-CryptoJWT/blob/8863cfbfe77ca885084870b234a66b55bd52930c/src/cryptojwt/tools/keygen.py#L22-L74 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.