Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
7,200 | skggm/skggm | inverse_covariance/inverse_covariance.py | _compute_error | def _compute_error(comp_cov, covariance_, precision_, score_metric="frobenius"):
"""Computes the covariance error vs. comp_cov.
Parameters
----------
comp_cov : array-like, shape = (n_features, n_features)
The precision to compare with.
This should normally be the test sample covariance/precision.
scaling : bool
If True, the squared error norm is divided by n_features.
If False (default), the squared error norm is not rescaled.
score_metric : str
The type of norm used to compute the error between the estimated
self.precision, self.covariance and the reference `comp_cov`.
Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
- 'kl': kl-divergence
- 'quadratic': quadratic loss
- 'log_likelihood': negative log likelihood
squared : bool
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
"""
if score_metric == "frobenius":
return np.linalg.norm(np.triu(comp_cov - covariance_, 1), ord="fro")
elif score_metric == "spectral":
error = comp_cov - covariance_
return np.amax(np.linalg.svdvals(np.dot(error.T, error)))
elif score_metric == "kl":
return metrics.kl_loss(comp_cov, precision_)
elif score_metric == "quadratic":
return metrics.quadratic_loss(comp_cov, precision_)
elif score_metric == "log_likelihood":
return -metrics.log_likelihood(comp_cov, precision_)
else:
raise NotImplementedError(
("Must be frobenius, spectral, kl, " "quadratic, or log_likelihood")
) | python | def _compute_error(comp_cov, covariance_, precision_, score_metric="frobenius"):
"""Computes the covariance error vs. comp_cov.
Parameters
----------
comp_cov : array-like, shape = (n_features, n_features)
The precision to compare with.
This should normally be the test sample covariance/precision.
scaling : bool
If True, the squared error norm is divided by n_features.
If False (default), the squared error norm is not rescaled.
score_metric : str
The type of norm used to compute the error between the estimated
self.precision, self.covariance and the reference `comp_cov`.
Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
- 'kl': kl-divergence
- 'quadratic': quadratic loss
- 'log_likelihood': negative log likelihood
squared : bool
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
"""
if score_metric == "frobenius":
return np.linalg.norm(np.triu(comp_cov - covariance_, 1), ord="fro")
elif score_metric == "spectral":
error = comp_cov - covariance_
return np.amax(np.linalg.svdvals(np.dot(error.T, error)))
elif score_metric == "kl":
return metrics.kl_loss(comp_cov, precision_)
elif score_metric == "quadratic":
return metrics.quadratic_loss(comp_cov, precision_)
elif score_metric == "log_likelihood":
return -metrics.log_likelihood(comp_cov, precision_)
else:
raise NotImplementedError(
("Must be frobenius, spectral, kl, " "quadratic, or log_likelihood")
) | ['def', '_compute_error', '(', 'comp_cov', ',', 'covariance_', ',', 'precision_', ',', 'score_metric', '=', '"frobenius"', ')', ':', 'if', 'score_metric', '==', '"frobenius"', ':', 'return', 'np', '.', 'linalg', '.', 'norm', '(', 'np', '.', 'triu', '(', 'comp_cov', '-', 'covariance_', ',', '1', ')', ',', 'ord', '=', '"fro"', ')', 'elif', 'score_metric', '==', '"spectral"', ':', 'error', '=', 'comp_cov', '-', 'covariance_', 'return', 'np', '.', 'amax', '(', 'np', '.', 'linalg', '.', 'svdvals', '(', 'np', '.', 'dot', '(', 'error', '.', 'T', ',', 'error', ')', ')', ')', 'elif', 'score_metric', '==', '"kl"', ':', 'return', 'metrics', '.', 'kl_loss', '(', 'comp_cov', ',', 'precision_', ')', 'elif', 'score_metric', '==', '"quadratic"', ':', 'return', 'metrics', '.', 'quadratic_loss', '(', 'comp_cov', ',', 'precision_', ')', 'elif', 'score_metric', '==', '"log_likelihood"', ':', 'return', '-', 'metrics', '.', 'log_likelihood', '(', 'comp_cov', ',', 'precision_', ')', 'else', ':', 'raise', 'NotImplementedError', '(', '(', '"Must be frobenius, spectral, kl, "', '"quadratic, or log_likelihood"', ')', ')'] | Computes the covariance error vs. comp_cov.
Parameters
----------
comp_cov : array-like, shape = (n_features, n_features)
The precision to compare with.
This should normally be the test sample covariance/precision.
scaling : bool
If True, the squared error norm is divided by n_features.
If False (default), the squared error norm is not rescaled.
score_metric : str
The type of norm used to compute the error between the estimated
self.precision, self.covariance and the reference `comp_cov`.
Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
- 'kl': kl-divergence
- 'quadratic': quadratic loss
- 'log_likelihood': negative log likelihood
squared : bool
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned. | ['Computes', 'the', 'covariance', 'error', 'vs', '.', 'comp_cov', '.'] | train | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/inverse_covariance.py#L31-L74 |
7,201 | reflexsc/reflex | dev/build.py | Obj.add_info | def add_info(self, data):
"""add info to a build"""
for key in data:
# verboten
if key in ('status','state','name','id','application','services','release'):
raise ValueError("Sorry, cannot set build info with key of {}".format(key))
self.obj[key] = data[key]
self.changes.append("Adding build info")
return self | python | def add_info(self, data):
"""add info to a build"""
for key in data:
# verboten
if key in ('status','state','name','id','application','services','release'):
raise ValueError("Sorry, cannot set build info with key of {}".format(key))
self.obj[key] = data[key]
self.changes.append("Adding build info")
return self | ['def', 'add_info', '(', 'self', ',', 'data', ')', ':', 'for', 'key', 'in', 'data', ':', '# verboten', 'if', 'key', 'in', '(', "'status'", ',', "'state'", ',', "'name'", ',', "'id'", ',', "'application'", ',', "'services'", ',', "'release'", ')', ':', 'raise', 'ValueError', '(', '"Sorry, cannot set build info with key of {}"', '.', 'format', '(', 'key', ')', ')', 'self', '.', 'obj', '[', 'key', ']', '=', 'data', '[', 'key', ']', 'self', '.', 'changes', '.', 'append', '(', '"Adding build info"', ')', 'return', 'self'] | add info to a build | ['add', 'info', 'to', 'a', 'build'] | train | https://github.com/reflexsc/reflex/blob/cee6b0ccfef395ca5e157d644a2e3252cea9fe62/dev/build.py#L213-L221 |
7,202 | autokey/autokey | lib/autokey/interface.py | XInterfaceBase.__ungrabHotkey | def __ungrabHotkey(self, key, modifiers, window):
"""
Ungrab a specific hotkey in the given window
"""
logger.debug("Ungrabbing hotkey: %r %r", modifiers, key)
try:
keycode = self.__lookupKeyCode(key)
mask = 0
for mod in modifiers:
mask |= self.modMasks[mod]
window.ungrab_key(keycode, mask)
if Key.NUMLOCK in self.modMasks:
window.ungrab_key(keycode, mask|self.modMasks[Key.NUMLOCK])
if Key.CAPSLOCK in self.modMasks:
window.ungrab_key(keycode, mask|self.modMasks[Key.CAPSLOCK])
if Key.CAPSLOCK in self.modMasks and Key.NUMLOCK in self.modMasks:
window.ungrab_key(keycode, mask|self.modMasks[Key.CAPSLOCK]|self.modMasks[Key.NUMLOCK])
except Exception as e:
logger.warning("Failed to ungrab hotkey %r %r: %s", modifiers, key, str(e)) | python | def __ungrabHotkey(self, key, modifiers, window):
"""
Ungrab a specific hotkey in the given window
"""
logger.debug("Ungrabbing hotkey: %r %r", modifiers, key)
try:
keycode = self.__lookupKeyCode(key)
mask = 0
for mod in modifiers:
mask |= self.modMasks[mod]
window.ungrab_key(keycode, mask)
if Key.NUMLOCK in self.modMasks:
window.ungrab_key(keycode, mask|self.modMasks[Key.NUMLOCK])
if Key.CAPSLOCK in self.modMasks:
window.ungrab_key(keycode, mask|self.modMasks[Key.CAPSLOCK])
if Key.CAPSLOCK in self.modMasks and Key.NUMLOCK in self.modMasks:
window.ungrab_key(keycode, mask|self.modMasks[Key.CAPSLOCK]|self.modMasks[Key.NUMLOCK])
except Exception as e:
logger.warning("Failed to ungrab hotkey %r %r: %s", modifiers, key, str(e)) | ['def', '__ungrabHotkey', '(', 'self', ',', 'key', ',', 'modifiers', ',', 'window', ')', ':', 'logger', '.', 'debug', '(', '"Ungrabbing hotkey: %r %r"', ',', 'modifiers', ',', 'key', ')', 'try', ':', 'keycode', '=', 'self', '.', '__lookupKeyCode', '(', 'key', ')', 'mask', '=', '0', 'for', 'mod', 'in', 'modifiers', ':', 'mask', '|=', 'self', '.', 'modMasks', '[', 'mod', ']', 'window', '.', 'ungrab_key', '(', 'keycode', ',', 'mask', ')', 'if', 'Key', '.', 'NUMLOCK', 'in', 'self', '.', 'modMasks', ':', 'window', '.', 'ungrab_key', '(', 'keycode', ',', 'mask', '|', 'self', '.', 'modMasks', '[', 'Key', '.', 'NUMLOCK', ']', ')', 'if', 'Key', '.', 'CAPSLOCK', 'in', 'self', '.', 'modMasks', ':', 'window', '.', 'ungrab_key', '(', 'keycode', ',', 'mask', '|', 'self', '.', 'modMasks', '[', 'Key', '.', 'CAPSLOCK', ']', ')', 'if', 'Key', '.', 'CAPSLOCK', 'in', 'self', '.', 'modMasks', 'and', 'Key', '.', 'NUMLOCK', 'in', 'self', '.', 'modMasks', ':', 'window', '.', 'ungrab_key', '(', 'keycode', ',', 'mask', '|', 'self', '.', 'modMasks', '[', 'Key', '.', 'CAPSLOCK', ']', '|', 'self', '.', 'modMasks', '[', 'Key', '.', 'NUMLOCK', ']', ')', 'except', 'Exception', 'as', 'e', ':', 'logger', '.', 'warning', '(', '"Failed to ungrab hotkey %r %r: %s"', ',', 'modifiers', ',', 'key', ',', 'str', '(', 'e', ')', ')'] | Ungrab a specific hotkey in the given window | ['Ungrab', 'a', 'specific', 'hotkey', 'in', 'the', 'given', 'window'] | train | https://github.com/autokey/autokey/blob/35decb72f286ce68cd2a1f09ace8891a520b58d1/lib/autokey/interface.py#L550-L572 |
7,203 | ltworf/typedload | typedload/dataloader.py | _namedtupleload | def _namedtupleload(l: Loader, value: Dict[str, Any], type_) -> Tuple:
"""
This loads a Dict[str, Any] into a NamedTuple.
"""
if not hasattr(type_, '__dataclass_fields__'):
fields = set(type_._fields)
optional_fields = set(getattr(type_, '_field_defaults', {}).keys())
type_hints = type_._field_types
else:
#dataclass
import dataclasses
fields = set(type_.__dataclass_fields__.keys())
optional_fields = {k for k,v in type_.__dataclass_fields__.items() if not (isinstance(getattr(v, 'default', dataclasses._MISSING_TYPE()), dataclasses._MISSING_TYPE) and isinstance(getattr(v, 'default_factory', dataclasses._MISSING_TYPE()), dataclasses._MISSING_TYPE))}
type_hints = {k: v.type for k,v in type_.__dataclass_fields__.items()}
#Name mangling
# Prepare the list of the needed name changes
transforms = [] # type: List[Tuple[str, str]]
for field in fields:
if type_.__dataclass_fields__[field].metadata:
name = type_.__dataclass_fields__[field].metadata.get('name')
if name:
transforms.append((field, name))
# Do the needed name changes
if transforms:
value = value.copy()
for pyname, dataname in transforms:
if dataname in value:
tmp = value[dataname]
del value[dataname]
value[pyname] = tmp
necessary_fields = fields.difference(optional_fields)
try:
vfields = set(value.keys())
except AttributeError as e:
raise TypedloadAttributeError(str(e), value=value, type_=type_)
if necessary_fields.intersection(vfields) != necessary_fields:
raise TypedloadValueError(
'Value does not contain fields: %s which are necessary for type %s' % (
necessary_fields.difference(vfields),
type_
),
value=value,
type_=type_,
)
fieldsdiff = vfields.difference(fields)
if l.failonextra and len(fieldsdiff):
extra = ', '.join(fieldsdiff)
raise TypedloadValueError(
'Dictionary has unrecognized fields: %s and cannot be loaded into %s' % (extra, type_),
value=value,
type_=type_,
)
params = {}
for k, v in value.items():
if k not in fields:
continue
params[k] = l.load(
v,
type_hints[k],
annotation=Annotation(AnnotationType.FIELD, k),
)
return type_(**params) | python | def _namedtupleload(l: Loader, value: Dict[str, Any], type_) -> Tuple:
"""
This loads a Dict[str, Any] into a NamedTuple.
"""
if not hasattr(type_, '__dataclass_fields__'):
fields = set(type_._fields)
optional_fields = set(getattr(type_, '_field_defaults', {}).keys())
type_hints = type_._field_types
else:
#dataclass
import dataclasses
fields = set(type_.__dataclass_fields__.keys())
optional_fields = {k for k,v in type_.__dataclass_fields__.items() if not (isinstance(getattr(v, 'default', dataclasses._MISSING_TYPE()), dataclasses._MISSING_TYPE) and isinstance(getattr(v, 'default_factory', dataclasses._MISSING_TYPE()), dataclasses._MISSING_TYPE))}
type_hints = {k: v.type for k,v in type_.__dataclass_fields__.items()}
#Name mangling
# Prepare the list of the needed name changes
transforms = [] # type: List[Tuple[str, str]]
for field in fields:
if type_.__dataclass_fields__[field].metadata:
name = type_.__dataclass_fields__[field].metadata.get('name')
if name:
transforms.append((field, name))
# Do the needed name changes
if transforms:
value = value.copy()
for pyname, dataname in transforms:
if dataname in value:
tmp = value[dataname]
del value[dataname]
value[pyname] = tmp
necessary_fields = fields.difference(optional_fields)
try:
vfields = set(value.keys())
except AttributeError as e:
raise TypedloadAttributeError(str(e), value=value, type_=type_)
if necessary_fields.intersection(vfields) != necessary_fields:
raise TypedloadValueError(
'Value does not contain fields: %s which are necessary for type %s' % (
necessary_fields.difference(vfields),
type_
),
value=value,
type_=type_,
)
fieldsdiff = vfields.difference(fields)
if l.failonextra and len(fieldsdiff):
extra = ', '.join(fieldsdiff)
raise TypedloadValueError(
'Dictionary has unrecognized fields: %s and cannot be loaded into %s' % (extra, type_),
value=value,
type_=type_,
)
params = {}
for k, v in value.items():
if k not in fields:
continue
params[k] = l.load(
v,
type_hints[k],
annotation=Annotation(AnnotationType.FIELD, k),
)
return type_(**params) | ['def', '_namedtupleload', '(', 'l', ':', 'Loader', ',', 'value', ':', 'Dict', '[', 'str', ',', 'Any', ']', ',', 'type_', ')', '->', 'Tuple', ':', 'if', 'not', 'hasattr', '(', 'type_', ',', "'__dataclass_fields__'", ')', ':', 'fields', '=', 'set', '(', 'type_', '.', '_fields', ')', 'optional_fields', '=', 'set', '(', 'getattr', '(', 'type_', ',', "'_field_defaults'", ',', '{', '}', ')', '.', 'keys', '(', ')', ')', 'type_hints', '=', 'type_', '.', '_field_types', 'else', ':', '#dataclass', 'import', 'dataclasses', 'fields', '=', 'set', '(', 'type_', '.', '__dataclass_fields__', '.', 'keys', '(', ')', ')', 'optional_fields', '=', '{', 'k', 'for', 'k', ',', 'v', 'in', 'type_', '.', '__dataclass_fields__', '.', 'items', '(', ')', 'if', 'not', '(', 'isinstance', '(', 'getattr', '(', 'v', ',', "'default'", ',', 'dataclasses', '.', '_MISSING_TYPE', '(', ')', ')', ',', 'dataclasses', '.', '_MISSING_TYPE', ')', 'and', 'isinstance', '(', 'getattr', '(', 'v', ',', "'default_factory'", ',', 'dataclasses', '.', '_MISSING_TYPE', '(', ')', ')', ',', 'dataclasses', '.', '_MISSING_TYPE', ')', ')', '}', 'type_hints', '=', '{', 'k', ':', 'v', '.', 'type', 'for', 'k', ',', 'v', 'in', 'type_', '.', '__dataclass_fields__', '.', 'items', '(', ')', '}', '#Name mangling', '# Prepare the list of the needed name changes', 'transforms', '=', '[', ']', '# type: List[Tuple[str, str]]', 'for', 'field', 'in', 'fields', ':', 'if', 'type_', '.', '__dataclass_fields__', '[', 'field', ']', '.', 'metadata', ':', 'name', '=', 'type_', '.', '__dataclass_fields__', '[', 'field', ']', '.', 'metadata', '.', 'get', '(', "'name'", ')', 'if', 'name', ':', 'transforms', '.', 'append', '(', '(', 'field', ',', 'name', ')', ')', '# Do the needed name changes', 'if', 'transforms', ':', 'value', '=', 'value', '.', 'copy', '(', ')', 'for', 'pyname', ',', 'dataname', 'in', 'transforms', ':', 'if', 'dataname', 'in', 'value', ':', 'tmp', '=', 'value', '[', 'dataname', ']', 'del', 'value', '[', 'dataname', ']', 'value', '[', 'pyname', ']', '=', 'tmp', 'necessary_fields', '=', 'fields', '.', 'difference', '(', 'optional_fields', ')', 'try', ':', 'vfields', '=', 'set', '(', 'value', '.', 'keys', '(', ')', ')', 'except', 'AttributeError', 'as', 'e', ':', 'raise', 'TypedloadAttributeError', '(', 'str', '(', 'e', ')', ',', 'value', '=', 'value', ',', 'type_', '=', 'type_', ')', 'if', 'necessary_fields', '.', 'intersection', '(', 'vfields', ')', '!=', 'necessary_fields', ':', 'raise', 'TypedloadValueError', '(', "'Value does not contain fields: %s which are necessary for type %s'", '%', '(', 'necessary_fields', '.', 'difference', '(', 'vfields', ')', ',', 'type_', ')', ',', 'value', '=', 'value', ',', 'type_', '=', 'type_', ',', ')', 'fieldsdiff', '=', 'vfields', '.', 'difference', '(', 'fields', ')', 'if', 'l', '.', 'failonextra', 'and', 'len', '(', 'fieldsdiff', ')', ':', 'extra', '=', "', '", '.', 'join', '(', 'fieldsdiff', ')', 'raise', 'TypedloadValueError', '(', "'Dictionary has unrecognized fields: %s and cannot be loaded into %s'", '%', '(', 'extra', ',', 'type_', ')', ',', 'value', '=', 'value', ',', 'type_', '=', 'type_', ',', ')', 'params', '=', '{', '}', 'for', 'k', ',', 'v', 'in', 'value', '.', 'items', '(', ')', ':', 'if', 'k', 'not', 'in', 'fields', ':', 'continue', 'params', '[', 'k', ']', '=', 'l', '.', 'load', '(', 'v', ',', 'type_hints', '[', 'k', ']', ',', 'annotation', '=', 'Annotation', '(', 'AnnotationType', '.', 'FIELD', ',', 'k', ')', ',', ')', 'return', 'type_', '(', '*', '*', 'params', ')'] | This loads a Dict[str, Any] into a NamedTuple. | ['This', 'loads', 'a', 'Dict', '[', 'str', 'Any', ']', 'into', 'a', 'NamedTuple', '.'] | train | https://github.com/ltworf/typedload/blob/7fd130612963bfcec3242698463ef863ca4af927/typedload/dataloader.py#L331-L398 |
7,204 | StackStorm/pybind | pybind/slxos/v17s_1_02/routing_system/router/isis/router_isis_cmds_holder/router_isis_attributes/__init__.py | router_isis_attributes._set_fast_flood | def _set_fast_flood(self, v, load=False):
"""
Setter method for fast_flood, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/router_isis_attributes/fast_flood (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_fast_flood is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fast_flood() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=fast_flood.fast_flood, is_container='container', presence=True, yang_name="fast-flood", rest_name="fast-flood", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'info': u'Define number of LSPs to be flooded before SPF Run'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """fast_flood must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=fast_flood.fast_flood, is_container='container', presence=True, yang_name="fast-flood", rest_name="fast-flood", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'info': u'Define number of LSPs to be flooded before SPF Run'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)""",
})
self.__fast_flood = t
if hasattr(self, '_set'):
self._set() | python | def _set_fast_flood(self, v, load=False):
"""
Setter method for fast_flood, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/router_isis_attributes/fast_flood (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_fast_flood is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fast_flood() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=fast_flood.fast_flood, is_container='container', presence=True, yang_name="fast-flood", rest_name="fast-flood", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'info': u'Define number of LSPs to be flooded before SPF Run'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """fast_flood must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=fast_flood.fast_flood, is_container='container', presence=True, yang_name="fast-flood", rest_name="fast-flood", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'info': u'Define number of LSPs to be flooded before SPF Run'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)""",
})
self.__fast_flood = t
if hasattr(self, '_set'):
self._set() | ['def', '_set_fast_flood', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'fast_flood', '.', 'fast_flood', ',', 'is_container', '=', "'container'", ',', 'presence', '=', 'True', ',', 'yang_name', '=', '"fast-flood"', ',', 'rest_name', '=', '"fast-flood"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'cli-compact-syntax'", ':', 'None', ',', "u'info'", ':', "u'Define number of LSPs to be flooded before SPF Run'", '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-isis'", ',', 'defining_module', '=', "'brocade-isis'", ',', 'yang_type', '=', "'container'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""fast_flood must be of a type compatible with container"""', ',', "'defined-type'", ':', '"container"', ',', "'generated-type'", ':', '"""YANGDynClass(base=fast_flood.fast_flood, is_container=\'container\', presence=True, yang_name="fast-flood", rest_name="fast-flood", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'cli-compact-syntax\': None, u\'info\': u\'Define number of LSPs to be flooded before SPF Run\'}}, namespace=\'urn:brocade.com:mgmt:brocade-isis\', defining_module=\'brocade-isis\', yang_type=\'container\', is_config=True)"""', ',', '}', ')', 'self', '.', '__fast_flood', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')'] | Setter method for fast_flood, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/router_isis_attributes/fast_flood (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_fast_flood is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fast_flood() directly. | ['Setter', 'method', 'for', 'fast_flood', 'mapped', 'from', 'YANG', 'variable', '/', 'routing_system', '/', 'router', '/', 'isis', '/', 'router_isis_cmds_holder', '/', 'router_isis_attributes', '/', 'fast_flood', '(', 'container', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_fast_flood', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_fast_flood', '()', 'directly', '.'] | train | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/routing_system/router/isis/router_isis_cmds_holder/router_isis_attributes/__init__.py#L393-L414 |
7,205 | Aluriak/ACCC | accc/compiler/compiler.py | Compiler._next_lexem | def _next_lexem(self, lexem_type, source_code, source_code_size):
"""Return next readable lexem of given type in source_code.
If no value can be found, the neutral_value will be used"""
# define reader as a lexem extractor
def reader(seq, block_size):
identificator = ''
for char in source_code:
if len(identificator) == self.idnt_values_size[lexem_type]:
yield self.table_values[lexem_type][identificator]
identificator = ''
identificator += char
lexem_reader = reader(source_code, self.idnt_values_size)
lexem = None
time_out = 0
while lexem == None and time_out < 2*source_code_size:
lexem = next(lexem_reader)
time_out += 1
# here we have found a lexem
return lexem | python | def _next_lexem(self, lexem_type, source_code, source_code_size):
"""Return next readable lexem of given type in source_code.
If no value can be found, the neutral_value will be used"""
# define reader as a lexem extractor
def reader(seq, block_size):
identificator = ''
for char in source_code:
if len(identificator) == self.idnt_values_size[lexem_type]:
yield self.table_values[lexem_type][identificator]
identificator = ''
identificator += char
lexem_reader = reader(source_code, self.idnt_values_size)
lexem = None
time_out = 0
while lexem == None and time_out < 2*source_code_size:
lexem = next(lexem_reader)
time_out += 1
# here we have found a lexem
return lexem | ['def', '_next_lexem', '(', 'self', ',', 'lexem_type', ',', 'source_code', ',', 'source_code_size', ')', ':', '# define reader as a lexem extractor', 'def', 'reader', '(', 'seq', ',', 'block_size', ')', ':', 'identificator', '=', "''", 'for', 'char', 'in', 'source_code', ':', 'if', 'len', '(', 'identificator', ')', '==', 'self', '.', 'idnt_values_size', '[', 'lexem_type', ']', ':', 'yield', 'self', '.', 'table_values', '[', 'lexem_type', ']', '[', 'identificator', ']', 'identificator', '=', "''", 'identificator', '+=', 'char', 'lexem_reader', '=', 'reader', '(', 'source_code', ',', 'self', '.', 'idnt_values_size', ')', 'lexem', '=', 'None', 'time_out', '=', '0', 'while', 'lexem', '==', 'None', 'and', 'time_out', '<', '2', '*', 'source_code_size', ':', 'lexem', '=', 'next', '(', 'lexem_reader', ')', 'time_out', '+=', '1', '# here we have found a lexem', 'return', 'lexem'] | Return next readable lexem of given type in source_code.
If no value can be found, the neutral_value will be used | ['Return', 'next', 'readable', 'lexem', 'of', 'given', 'type', 'in', 'source_code', '.', 'If', 'no', 'value', 'can', 'be', 'found', 'the', 'neutral_value', 'will', 'be', 'used'] | train | https://github.com/Aluriak/ACCC/blob/9092f985bef7ed784264c86bc19c980f4ce2309f/accc/compiler/compiler.py#L160-L178 |
7,206 | RedHatInsights/insights-core | insights/core/spec_factory.py | CommandOutputProvider._stream | def _stream(self):
"""
Returns a generator of lines instead of a list of lines.
"""
if self._exception:
raise self._exception
try:
if self._content:
yield self._content
else:
args = self.create_args()
with self.ctx.connect(*args, env=self.create_env(), timeout=self.timeout) as s:
yield s
except StopIteration:
raise
except Exception as ex:
self._exception = ex
raise ContentException(str(ex)) | python | def _stream(self):
"""
Returns a generator of lines instead of a list of lines.
"""
if self._exception:
raise self._exception
try:
if self._content:
yield self._content
else:
args = self.create_args()
with self.ctx.connect(*args, env=self.create_env(), timeout=self.timeout) as s:
yield s
except StopIteration:
raise
except Exception as ex:
self._exception = ex
raise ContentException(str(ex)) | ['def', '_stream', '(', 'self', ')', ':', 'if', 'self', '.', '_exception', ':', 'raise', 'self', '.', '_exception', 'try', ':', 'if', 'self', '.', '_content', ':', 'yield', 'self', '.', '_content', 'else', ':', 'args', '=', 'self', '.', 'create_args', '(', ')', 'with', 'self', '.', 'ctx', '.', 'connect', '(', '*', 'args', ',', 'env', '=', 'self', '.', 'create_env', '(', ')', ',', 'timeout', '=', 'self', '.', 'timeout', ')', 'as', 's', ':', 'yield', 's', 'except', 'StopIteration', ':', 'raise', 'except', 'Exception', 'as', 'ex', ':', 'self', '.', '_exception', '=', 'ex', 'raise', 'ContentException', '(', 'str', '(', 'ex', ')', ')'] | Returns a generator of lines instead of a list of lines. | ['Returns', 'a', 'generator', 'of', 'lines', 'instead', 'of', 'a', 'list', 'of', 'lines', '.'] | train | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/spec_factory.py#L358-L375 |
7,207 | mdiener/grace | grace/py27/slimit/visitors/scopevisitor.py | mangle_scope_tree | def mangle_scope_tree(root, toplevel):
"""Walk over a scope tree and mangle symbol names.
Args:
toplevel: Defines if global scope should be mangled or not.
"""
def mangle(scope):
# don't mangle global scope if not specified otherwise
if scope.get_enclosing_scope() is None and not toplevel:
return
for name in scope.symbols:
mangled_name = scope.get_next_mangled_name()
scope.mangled[name] = mangled_name
scope.rev_mangled[mangled_name] = name
def visit(node):
mangle(node)
for child in node.children:
visit(child)
visit(root) | python | def mangle_scope_tree(root, toplevel):
"""Walk over a scope tree and mangle symbol names.
Args:
toplevel: Defines if global scope should be mangled or not.
"""
def mangle(scope):
# don't mangle global scope if not specified otherwise
if scope.get_enclosing_scope() is None and not toplevel:
return
for name in scope.symbols:
mangled_name = scope.get_next_mangled_name()
scope.mangled[name] = mangled_name
scope.rev_mangled[mangled_name] = name
def visit(node):
mangle(node)
for child in node.children:
visit(child)
visit(root) | ['def', 'mangle_scope_tree', '(', 'root', ',', 'toplevel', ')', ':', 'def', 'mangle', '(', 'scope', ')', ':', "# don't mangle global scope if not specified otherwise", 'if', 'scope', '.', 'get_enclosing_scope', '(', ')', 'is', 'None', 'and', 'not', 'toplevel', ':', 'return', 'for', 'name', 'in', 'scope', '.', 'symbols', ':', 'mangled_name', '=', 'scope', '.', 'get_next_mangled_name', '(', ')', 'scope', '.', 'mangled', '[', 'name', ']', '=', 'mangled_name', 'scope', '.', 'rev_mangled', '[', 'mangled_name', ']', '=', 'name', 'def', 'visit', '(', 'node', ')', ':', 'mangle', '(', 'node', ')', 'for', 'child', 'in', 'node', '.', 'children', ':', 'visit', '(', 'child', ')', 'visit', '(', 'root', ')'] | Walk over a scope tree and mangle symbol names.
Args:
toplevel: Defines if global scope should be mangled or not. | ['Walk', 'over', 'a', 'scope', 'tree', 'and', 'mangle', 'symbol', 'names', '.'] | train | https://github.com/mdiener/grace/blob/2dab13a2cf636da5da989904c5885166fc94d36d/grace/py27/slimit/visitors/scopevisitor.py#L141-L161 |
7,208 | Alignak-monitoring/alignak | alignak/objects/notificationway.py | NotificationWay.want_service_notification | def want_service_notification(self, timeperiods, timestamp, state, n_type,
business_impact, cmd=None):
# pylint: disable=too-many-return-statements
"""Check if notification options match the state of the service
Notification is NOT wanted in ONE of the following case::
* service notifications are disabled
* cmd is not in service_notification_commands
* business_impact < self.min_business_impact
* service_notification_period is not valid
* state does not match service_notification_options for problem, recovery and flapping
* state does not match host_notification_options for downtime
:param timestamp: time we want to notify the contact (usually now)
:type timestamp: int
:param state: host or service state ("WARNING", "CRITICAL" ..)
:type state: str
:param n_type: type of notification ("PROBLEM", "RECOVERY" ..)
:type n_type: str
:param business_impact: impact of this service
:type business_impact: int
:param cmd: command launched to notify the contact
:type cmd: str
:return: True if no condition is matched, otherwise False
:rtype: bool
TODO: Simplify function
"""
if not self.service_notifications_enabled:
return False
# Maybe the command we ask for are not for us, but for another notification ways
# on the same contact. If so, bail out
if cmd and cmd not in self.service_notification_commands:
return False
# If the business_impact is not high enough, we bail out
if business_impact < self.min_business_impact:
return False
notif_period = timeperiods[self.service_notification_period]
in_notification_period = notif_period.is_time_valid(timestamp)
if 'n' in self.service_notification_options:
return False
if in_notification_period:
short_states = {
u'WARNING': 'w', u'UNKNOWN': 'u', u'CRITICAL': 'c',
u'RECOVERY': 'r', u'FLAPPING': 'f', u'DOWNTIME': 's'
}
if n_type == u'PROBLEM' and state in short_states:
return short_states[state] in self.service_notification_options
if n_type == u'RECOVERY' and n_type in short_states:
return short_states[n_type] in self.service_notification_options
if n_type == u'ACKNOWLEDGEMENT':
return in_notification_period
if n_type in (u'FLAPPINGSTART', u'FLAPPINGSTOP', u'FLAPPINGDISABLED'):
return 'f' in self.service_notification_options
if n_type in (u'DOWNTIMESTART', u'DOWNTIMEEND', u'DOWNTIMECANCELLED'):
# No notification when a downtime was cancelled. Is that true??
# According to the documentation we need to look at _host_ options
return 's' in self.host_notification_options
return False | python | def want_service_notification(self, timeperiods, timestamp, state, n_type,
business_impact, cmd=None):
# pylint: disable=too-many-return-statements
"""Check if notification options match the state of the service
Notification is NOT wanted in ONE of the following case::
* service notifications are disabled
* cmd is not in service_notification_commands
* business_impact < self.min_business_impact
* service_notification_period is not valid
* state does not match service_notification_options for problem, recovery and flapping
* state does not match host_notification_options for downtime
:param timestamp: time we want to notify the contact (usually now)
:type timestamp: int
:param state: host or service state ("WARNING", "CRITICAL" ..)
:type state: str
:param n_type: type of notification ("PROBLEM", "RECOVERY" ..)
:type n_type: str
:param business_impact: impact of this service
:type business_impact: int
:param cmd: command launched to notify the contact
:type cmd: str
:return: True if no condition is matched, otherwise False
:rtype: bool
TODO: Simplify function
"""
if not self.service_notifications_enabled:
return False
# Maybe the command we ask for are not for us, but for another notification ways
# on the same contact. If so, bail out
if cmd and cmd not in self.service_notification_commands:
return False
# If the business_impact is not high enough, we bail out
if business_impact < self.min_business_impact:
return False
notif_period = timeperiods[self.service_notification_period]
in_notification_period = notif_period.is_time_valid(timestamp)
if 'n' in self.service_notification_options:
return False
if in_notification_period:
short_states = {
u'WARNING': 'w', u'UNKNOWN': 'u', u'CRITICAL': 'c',
u'RECOVERY': 'r', u'FLAPPING': 'f', u'DOWNTIME': 's'
}
if n_type == u'PROBLEM' and state in short_states:
return short_states[state] in self.service_notification_options
if n_type == u'RECOVERY' and n_type in short_states:
return short_states[n_type] in self.service_notification_options
if n_type == u'ACKNOWLEDGEMENT':
return in_notification_period
if n_type in (u'FLAPPINGSTART', u'FLAPPINGSTOP', u'FLAPPINGDISABLED'):
return 'f' in self.service_notification_options
if n_type in (u'DOWNTIMESTART', u'DOWNTIMEEND', u'DOWNTIMECANCELLED'):
# No notification when a downtime was cancelled. Is that true??
# According to the documentation we need to look at _host_ options
return 's' in self.host_notification_options
return False | ['def', 'want_service_notification', '(', 'self', ',', 'timeperiods', ',', 'timestamp', ',', 'state', ',', 'n_type', ',', 'business_impact', ',', 'cmd', '=', 'None', ')', ':', '# pylint: disable=too-many-return-statements', 'if', 'not', 'self', '.', 'service_notifications_enabled', ':', 'return', 'False', '# Maybe the command we ask for are not for us, but for another notification ways', '# on the same contact. If so, bail out', 'if', 'cmd', 'and', 'cmd', 'not', 'in', 'self', '.', 'service_notification_commands', ':', 'return', 'False', '# If the business_impact is not high enough, we bail out', 'if', 'business_impact', '<', 'self', '.', 'min_business_impact', ':', 'return', 'False', 'notif_period', '=', 'timeperiods', '[', 'self', '.', 'service_notification_period', ']', 'in_notification_period', '=', 'notif_period', '.', 'is_time_valid', '(', 'timestamp', ')', 'if', "'n'", 'in', 'self', '.', 'service_notification_options', ':', 'return', 'False', 'if', 'in_notification_period', ':', 'short_states', '=', '{', "u'WARNING'", ':', "'w'", ',', "u'UNKNOWN'", ':', "'u'", ',', "u'CRITICAL'", ':', "'c'", ',', "u'RECOVERY'", ':', "'r'", ',', "u'FLAPPING'", ':', "'f'", ',', "u'DOWNTIME'", ':', "'s'", '}', 'if', 'n_type', '==', "u'PROBLEM'", 'and', 'state', 'in', 'short_states', ':', 'return', 'short_states', '[', 'state', ']', 'in', 'self', '.', 'service_notification_options', 'if', 'n_type', '==', "u'RECOVERY'", 'and', 'n_type', 'in', 'short_states', ':', 'return', 'short_states', '[', 'n_type', ']', 'in', 'self', '.', 'service_notification_options', 'if', 'n_type', '==', "u'ACKNOWLEDGEMENT'", ':', 'return', 'in_notification_period', 'if', 'n_type', 'in', '(', "u'FLAPPINGSTART'", ',', "u'FLAPPINGSTOP'", ',', "u'FLAPPINGDISABLED'", ')', ':', 'return', "'f'", 'in', 'self', '.', 'service_notification_options', 'if', 'n_type', 'in', '(', "u'DOWNTIMESTART'", ',', "u'DOWNTIMEEND'", ',', "u'DOWNTIMECANCELLED'", ')', ':', '# No notification when a downtime was cancelled. Is that true??', '# According to the documentation we need to look at _host_ options', 'return', "'s'", 'in', 'self', '.', 'host_notification_options', 'return', 'False'] | Check if notification options match the state of the service
Notification is NOT wanted in ONE of the following case::
* service notifications are disabled
* cmd is not in service_notification_commands
* business_impact < self.min_business_impact
* service_notification_period is not valid
* state does not match service_notification_options for problem, recovery and flapping
* state does not match host_notification_options for downtime
:param timestamp: time we want to notify the contact (usually now)
:type timestamp: int
:param state: host or service state ("WARNING", "CRITICAL" ..)
:type state: str
:param n_type: type of notification ("PROBLEM", "RECOVERY" ..)
:type n_type: str
:param business_impact: impact of this service
:type business_impact: int
:param cmd: command launched to notify the contact
:type cmd: str
:return: True if no condition is matched, otherwise False
:rtype: bool
TODO: Simplify function | ['Check', 'if', 'notification', 'options', 'match', 'the', 'state', 'of', 'the', 'service', 'Notification', 'is', 'NOT', 'wanted', 'in', 'ONE', 'of', 'the', 'following', 'case', '::'] | train | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/notificationway.py#L144-L206 |
7,209 | cdeboever3/cdpybio | cdpybio/analysis.py | parse_grasp_gwas | def parse_grasp_gwas(fn):
"""
Read GRASP database and filter for unique hits.
Parameters
----------
fn : str
Path to (subset of) GRASP database.
Returns
-------
df : pandas.DataFrame
Pandas dataframe with de-duplicated, significant SNPs. The index is of
the form chrom:pos where pos is the one-based position of the SNP. The
columns are chrom, start, end, rsid, and pvalue. rsid may be empty or
not actually an RSID. chrom, start, end make a zero-based bed file with
the SNP coordinates.
"""
df = pd.read_table(fn, low_memory=False)
df = df[df.Pvalue < 1e-5]
df = df.sort(columns=['chr(hg19)', 'pos(hg19)', 'Pvalue'])
df = df.drop_duplicates(subset=['chr(hg19)', 'pos(hg19)'])
df = df[df.Pvalue < 1e-5]
df['chrom'] = 'chr' + df['chr(hg19)'].astype(str)
df['end'] = df['pos(hg19)']
df['start'] = df.end - 1
df['rsid'] = df['SNPid(in paper)']
df['pvalue'] = df['Pvalue']
df = df[['chrom', 'start', 'end', 'rsid', 'pvalue']]
df.index = df['chrom'].astype(str) + ':' + df['end'].astype(str)
return df | python | def parse_grasp_gwas(fn):
"""
Read GRASP database and filter for unique hits.
Parameters
----------
fn : str
Path to (subset of) GRASP database.
Returns
-------
df : pandas.DataFrame
Pandas dataframe with de-duplicated, significant SNPs. The index is of
the form chrom:pos where pos is the one-based position of the SNP. The
columns are chrom, start, end, rsid, and pvalue. rsid may be empty or
not actually an RSID. chrom, start, end make a zero-based bed file with
the SNP coordinates.
"""
df = pd.read_table(fn, low_memory=False)
df = df[df.Pvalue < 1e-5]
df = df.sort(columns=['chr(hg19)', 'pos(hg19)', 'Pvalue'])
df = df.drop_duplicates(subset=['chr(hg19)', 'pos(hg19)'])
df = df[df.Pvalue < 1e-5]
df['chrom'] = 'chr' + df['chr(hg19)'].astype(str)
df['end'] = df['pos(hg19)']
df['start'] = df.end - 1
df['rsid'] = df['SNPid(in paper)']
df['pvalue'] = df['Pvalue']
df = df[['chrom', 'start', 'end', 'rsid', 'pvalue']]
df.index = df['chrom'].astype(str) + ':' + df['end'].astype(str)
return df | ['def', 'parse_grasp_gwas', '(', 'fn', ')', ':', 'df', '=', 'pd', '.', 'read_table', '(', 'fn', ',', 'low_memory', '=', 'False', ')', 'df', '=', 'df', '[', 'df', '.', 'Pvalue', '<', '1e-5', ']', 'df', '=', 'df', '.', 'sort', '(', 'columns', '=', '[', "'chr(hg19)'", ',', "'pos(hg19)'", ',', "'Pvalue'", ']', ')', 'df', '=', 'df', '.', 'drop_duplicates', '(', 'subset', '=', '[', "'chr(hg19)'", ',', "'pos(hg19)'", ']', ')', 'df', '=', 'df', '[', 'df', '.', 'Pvalue', '<', '1e-5', ']', 'df', '[', "'chrom'", ']', '=', "'chr'", '+', 'df', '[', "'chr(hg19)'", ']', '.', 'astype', '(', 'str', ')', 'df', '[', "'end'", ']', '=', 'df', '[', "'pos(hg19)'", ']', 'df', '[', "'start'", ']', '=', 'df', '.', 'end', '-', '1', 'df', '[', "'rsid'", ']', '=', 'df', '[', "'SNPid(in paper)'", ']', 'df', '[', "'pvalue'", ']', '=', 'df', '[', "'Pvalue'", ']', 'df', '=', 'df', '[', '[', "'chrom'", ',', "'start'", ',', "'end'", ',', "'rsid'", ',', "'pvalue'", ']', ']', 'df', '.', 'index', '=', 'df', '[', "'chrom'", ']', '.', 'astype', '(', 'str', ')', '+', "':'", '+', 'df', '[', "'end'", ']', '.', 'astype', '(', 'str', ')', 'return', 'df'] | Read GRASP database and filter for unique hits.
Parameters
----------
fn : str
Path to (subset of) GRASP database.
Returns
-------
df : pandas.DataFrame
Pandas dataframe with de-duplicated, significant SNPs. The index is of
the form chrom:pos where pos is the one-based position of the SNP. The
columns are chrom, start, end, rsid, and pvalue. rsid may be empty or
not actually an RSID. chrom, start, end make a zero-based bed file with
the SNP coordinates. | ['Read', 'GRASP', 'database', 'and', 'filter', 'for', 'unique', 'hits', '.', 'Parameters', '----------', 'fn', ':', 'str', 'Path', 'to', '(', 'subset', 'of', ')', 'GRASP', 'database', '.', 'Returns', '-------', 'df', ':', 'pandas', '.', 'DataFrame', 'Pandas', 'dataframe', 'with', 'de', '-', 'duplicated', 'significant', 'SNPs', '.', 'The', 'index', 'is', 'of', 'the', 'form', 'chrom', ':', 'pos', 'where', 'pos', 'is', 'the', 'one', '-', 'based', 'position', 'of', 'the', 'SNP', '.', 'The', 'columns', 'are', 'chrom', 'start', 'end', 'rsid', 'and', 'pvalue', '.', 'rsid', 'may', 'be', 'empty', 'or', 'not', 'actually', 'an', 'RSID', '.', 'chrom', 'start', 'end', 'make', 'a', 'zero', '-', 'based', 'bed', 'file', 'with', 'the', 'SNP', 'coordinates', '.'] | train | https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/analysis.py#L151-L181 |
7,210 | dw/mitogen | mitogen/core.py | Poller.stop_transmit | def stop_transmit(self, fd):
"""
Stop yielding writeability events for `fd`.
Redundant calls to :meth:`stop_transmit` are silently ignored, this may
change in future.
"""
self._wfds.pop(fd, None)
self._update(fd) | python | def stop_transmit(self, fd):
"""
Stop yielding writeability events for `fd`.
Redundant calls to :meth:`stop_transmit` are silently ignored, this may
change in future.
"""
self._wfds.pop(fd, None)
self._update(fd) | ['def', 'stop_transmit', '(', 'self', ',', 'fd', ')', ':', 'self', '.', '_wfds', '.', 'pop', '(', 'fd', ',', 'None', ')', 'self', '.', '_update', '(', 'fd', ')'] | Stop yielding writeability events for `fd`.
Redundant calls to :meth:`stop_transmit` are silently ignored, this may
change in future. | ['Stop', 'yielding', 'writeability', 'events', 'for', 'fd', '.'] | train | https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/core.py#L1999-L2007 |
7,211 | twisted/txaws | txaws/s3/client.py | S3Client.init_multipart_upload | def init_multipart_upload(self, bucket, object_name, content_type=None,
amz_headers={}, metadata={}):
"""
Initiate a multipart upload to a bucket.
@param bucket: The name of the bucket
@param object_name: The object name
@param content_type: The Content-Type for the object
@param metadata: C{dict} containing additional metadata
@param amz_headers: A C{dict} used to build C{x-amz-*} headers.
@return: C{str} upload_id
"""
objectname_plus = '%s?uploads' % object_name
details = self._details(
method=b"POST",
url_context=self._url_context(bucket=bucket, object_name=objectname_plus),
headers=self._headers(content_type),
metadata=metadata,
amz_headers=amz_headers,
)
d = self._submit(self._query_factory(details))
d.addCallback(
lambda (response, body): MultipartInitiationResponse.from_xml(body)
)
return d | python | def init_multipart_upload(self, bucket, object_name, content_type=None,
amz_headers={}, metadata={}):
"""
Initiate a multipart upload to a bucket.
@param bucket: The name of the bucket
@param object_name: The object name
@param content_type: The Content-Type for the object
@param metadata: C{dict} containing additional metadata
@param amz_headers: A C{dict} used to build C{x-amz-*} headers.
@return: C{str} upload_id
"""
objectname_plus = '%s?uploads' % object_name
details = self._details(
method=b"POST",
url_context=self._url_context(bucket=bucket, object_name=objectname_plus),
headers=self._headers(content_type),
metadata=metadata,
amz_headers=amz_headers,
)
d = self._submit(self._query_factory(details))
d.addCallback(
lambda (response, body): MultipartInitiationResponse.from_xml(body)
)
return d | ['def', 'init_multipart_upload', '(', 'self', ',', 'bucket', ',', 'object_name', ',', 'content_type', '=', 'None', ',', 'amz_headers', '=', '{', '}', ',', 'metadata', '=', '{', '}', ')', ':', 'objectname_plus', '=', "'%s?uploads'", '%', 'object_name', 'details', '=', 'self', '.', '_details', '(', 'method', '=', 'b"POST"', ',', 'url_context', '=', 'self', '.', '_url_context', '(', 'bucket', '=', 'bucket', ',', 'object_name', '=', 'objectname_plus', ')', ',', 'headers', '=', 'self', '.', '_headers', '(', 'content_type', ')', ',', 'metadata', '=', 'metadata', ',', 'amz_headers', '=', 'amz_headers', ',', ')', 'd', '=', 'self', '.', '_submit', '(', 'self', '.', '_query_factory', '(', 'details', ')', ')', 'd', '.', 'addCallback', '(', 'lambda', '(', 'response', ',', 'body', ')', ':', 'MultipartInitiationResponse', '.', 'from_xml', '(', 'body', ')', ')', 'return', 'd'] | Initiate a multipart upload to a bucket.
@param bucket: The name of the bucket
@param object_name: The object name
@param content_type: The Content-Type for the object
@param metadata: C{dict} containing additional metadata
@param amz_headers: A C{dict} used to build C{x-amz-*} headers.
@return: C{str} upload_id | ['Initiate', 'a', 'multipart', 'upload', 'to', 'a', 'bucket', '.'] | train | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/s3/client.py#L589-L613 |
7,212 | luismasuelli/python-cantrips | cantrips/patterns/broadcast.py | IBroadcast.BROADCAST_FILTER_OR | def BROADCAST_FILTER_OR(*funcs):
"""
Composes the passed filters into an and-joined filter.
"""
return lambda u, command, *args, **kwargs: any(f(u, command, *args, **kwargs) for f in funcs) | python | def BROADCAST_FILTER_OR(*funcs):
"""
Composes the passed filters into an and-joined filter.
"""
return lambda u, command, *args, **kwargs: any(f(u, command, *args, **kwargs) for f in funcs) | ['def', 'BROADCAST_FILTER_OR', '(', '*', 'funcs', ')', ':', 'return', 'lambda', 'u', ',', 'command', ',', '*', 'args', ',', '*', '*', 'kwargs', ':', 'any', '(', 'f', '(', 'u', ',', 'command', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'for', 'f', 'in', 'funcs', ')'] | Composes the passed filters into an and-joined filter. | ['Composes', 'the', 'passed', 'filters', 'into', 'an', 'and', '-', 'joined', 'filter', '.'] | train | https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/patterns/broadcast.py#L75-L79 |
7,213 | log2timeline/dfvfs | dfvfs/file_io/encoded_stream_io.py | EncodedStream._GetDecodedStreamSize | def _GetDecodedStreamSize(self):
"""Retrieves the decoded stream size.
Returns:
int: decoded stream size.
"""
self._file_object.seek(0, os.SEEK_SET)
self._decoder = self._GetDecoder()
self._decoded_data = b''
encoded_data_offset = 0
encoded_data_size = self._file_object.get_size()
decoded_stream_size = 0
while encoded_data_offset < encoded_data_size:
read_count = self._ReadEncodedData(self._ENCODED_DATA_BUFFER_SIZE)
if read_count == 0:
break
encoded_data_offset += read_count
decoded_stream_size += self._decoded_data_size
return decoded_stream_size | python | def _GetDecodedStreamSize(self):
"""Retrieves the decoded stream size.
Returns:
int: decoded stream size.
"""
self._file_object.seek(0, os.SEEK_SET)
self._decoder = self._GetDecoder()
self._decoded_data = b''
encoded_data_offset = 0
encoded_data_size = self._file_object.get_size()
decoded_stream_size = 0
while encoded_data_offset < encoded_data_size:
read_count = self._ReadEncodedData(self._ENCODED_DATA_BUFFER_SIZE)
if read_count == 0:
break
encoded_data_offset += read_count
decoded_stream_size += self._decoded_data_size
return decoded_stream_size | ['def', '_GetDecodedStreamSize', '(', 'self', ')', ':', 'self', '.', '_file_object', '.', 'seek', '(', '0', ',', 'os', '.', 'SEEK_SET', ')', 'self', '.', '_decoder', '=', 'self', '.', '_GetDecoder', '(', ')', 'self', '.', '_decoded_data', '=', "b''", 'encoded_data_offset', '=', '0', 'encoded_data_size', '=', 'self', '.', '_file_object', '.', 'get_size', '(', ')', 'decoded_stream_size', '=', '0', 'while', 'encoded_data_offset', '<', 'encoded_data_size', ':', 'read_count', '=', 'self', '.', '_ReadEncodedData', '(', 'self', '.', '_ENCODED_DATA_BUFFER_SIZE', ')', 'if', 'read_count', '==', '0', ':', 'break', 'encoded_data_offset', '+=', 'read_count', 'decoded_stream_size', '+=', 'self', '.', '_decoded_data_size', 'return', 'decoded_stream_size'] | Retrieves the decoded stream size.
Returns:
int: decoded stream size. | ['Retrieves', 'the', 'decoded', 'stream', 'size', '.'] | train | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/encoded_stream_io.py#L75-L98 |
7,214 | HPCC-Cloud-Computing/CAL | calplus/v1/network/resources/network.py | NetworkController.get | def get(self, req, driver):
"""Get info of a network
Get info of a specific netowrk with id on special cloud
with:
:Param req
:Type object Request
"""
response = driver.get_network(req.params, id)
data = {
'action': "get",
'controller': "network",
'id': id,
'cloud': req.environ['calplus.cloud'],
'response': response
}
return data | python | def get(self, req, driver):
"""Get info of a network
Get info of a specific netowrk with id on special cloud
with:
:Param req
:Type object Request
"""
response = driver.get_network(req.params, id)
data = {
'action': "get",
'controller': "network",
'id': id,
'cloud': req.environ['calplus.cloud'],
'response': response
}
return data | ['def', 'get', '(', 'self', ',', 'req', ',', 'driver', ')', ':', 'response', '=', 'driver', '.', 'get_network', '(', 'req', '.', 'params', ',', 'id', ')', 'data', '=', '{', "'action'", ':', '"get"', ',', "'controller'", ':', '"network"', ',', "'id'", ':', 'id', ',', "'cloud'", ':', 'req', '.', 'environ', '[', "'calplus.cloud'", ']', ',', "'response'", ':', 'response', '}', 'return', 'data'] | Get info of a network
Get info of a specific netowrk with id on special cloud
with:
:Param req
:Type object Request | ['Get', 'info', 'of', 'a', 'network', 'Get', 'info', 'of', 'a', 'specific', 'netowrk', 'with', 'id', 'on', 'special', 'cloud', 'with', ':', ':', 'Param', 'req', ':', 'Type', 'object', 'Request'] | train | https://github.com/HPCC-Cloud-Computing/CAL/blob/7134b3dfe9ee3a383506a592765c7a12fa4ca1e9/calplus/v1/network/resources/network.py#L85-L100 |
7,215 | yyuu/botornado | boto/ec2/autoscale/__init__.py | AutoScaleConnection.create_launch_configuration | def create_launch_configuration(self, launch_config):
"""
Creates a new Launch Configuration.
:type launch_config: :class:`boto.ec2.autoscale.launchconfig.LaunchConfiguration`
:param launch_config: LaunchConfiguration object.
"""
params = {'ImageId': launch_config.image_id,
'LaunchConfigurationName': launch_config.name,
'InstanceType': launch_config.instance_type}
if launch_config.key_name:
params['KeyName'] = launch_config.key_name
if launch_config.user_data:
params['UserData'] = base64.b64encode(launch_config.user_data)
if launch_config.kernel_id:
params['KernelId'] = launch_config.kernel_id
if launch_config.ramdisk_id:
params['RamdiskId'] = launch_config.ramdisk_id
if launch_config.block_device_mappings:
self.build_list_params(params, launch_config.block_device_mappings,
'BlockDeviceMappings')
if launch_config.security_groups:
self.build_list_params(params, launch_config.security_groups,
'SecurityGroups')
if launch_config.instance_monitoring:
params['InstanceMonitoring.Enabled'] = 'true'
return self.get_object('CreateLaunchConfiguration', params,
Request, verb='POST') | python | def create_launch_configuration(self, launch_config):
"""
Creates a new Launch Configuration.
:type launch_config: :class:`boto.ec2.autoscale.launchconfig.LaunchConfiguration`
:param launch_config: LaunchConfiguration object.
"""
params = {'ImageId': launch_config.image_id,
'LaunchConfigurationName': launch_config.name,
'InstanceType': launch_config.instance_type}
if launch_config.key_name:
params['KeyName'] = launch_config.key_name
if launch_config.user_data:
params['UserData'] = base64.b64encode(launch_config.user_data)
if launch_config.kernel_id:
params['KernelId'] = launch_config.kernel_id
if launch_config.ramdisk_id:
params['RamdiskId'] = launch_config.ramdisk_id
if launch_config.block_device_mappings:
self.build_list_params(params, launch_config.block_device_mappings,
'BlockDeviceMappings')
if launch_config.security_groups:
self.build_list_params(params, launch_config.security_groups,
'SecurityGroups')
if launch_config.instance_monitoring:
params['InstanceMonitoring.Enabled'] = 'true'
return self.get_object('CreateLaunchConfiguration', params,
Request, verb='POST') | ['def', 'create_launch_configuration', '(', 'self', ',', 'launch_config', ')', ':', 'params', '=', '{', "'ImageId'", ':', 'launch_config', '.', 'image_id', ',', "'LaunchConfigurationName'", ':', 'launch_config', '.', 'name', ',', "'InstanceType'", ':', 'launch_config', '.', 'instance_type', '}', 'if', 'launch_config', '.', 'key_name', ':', 'params', '[', "'KeyName'", ']', '=', 'launch_config', '.', 'key_name', 'if', 'launch_config', '.', 'user_data', ':', 'params', '[', "'UserData'", ']', '=', 'base64', '.', 'b64encode', '(', 'launch_config', '.', 'user_data', ')', 'if', 'launch_config', '.', 'kernel_id', ':', 'params', '[', "'KernelId'", ']', '=', 'launch_config', '.', 'kernel_id', 'if', 'launch_config', '.', 'ramdisk_id', ':', 'params', '[', "'RamdiskId'", ']', '=', 'launch_config', '.', 'ramdisk_id', 'if', 'launch_config', '.', 'block_device_mappings', ':', 'self', '.', 'build_list_params', '(', 'params', ',', 'launch_config', '.', 'block_device_mappings', ',', "'BlockDeviceMappings'", ')', 'if', 'launch_config', '.', 'security_groups', ':', 'self', '.', 'build_list_params', '(', 'params', ',', 'launch_config', '.', 'security_groups', ',', "'SecurityGroups'", ')', 'if', 'launch_config', '.', 'instance_monitoring', ':', 'params', '[', "'InstanceMonitoring.Enabled'", ']', '=', "'true'", 'return', 'self', '.', 'get_object', '(', "'CreateLaunchConfiguration'", ',', 'params', ',', 'Request', ',', 'verb', '=', "'POST'", ')'] | Creates a new Launch Configuration.
:type launch_config: :class:`boto.ec2.autoscale.launchconfig.LaunchConfiguration`
:param launch_config: LaunchConfiguration object. | ['Creates', 'a', 'new', 'Launch', 'Configuration', '.'] | train | https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/ec2/autoscale/__init__.py#L188-L215 |
7,216 | Calysto/calysto | calysto/ai/conx.py | Network.actDerivASIG | def actDerivASIG(self, x):
"""
Only works on scalars.
"""
def act(v):
if v < -15.0: return 0.0
elif v > 15.0: return 1.0
else: return 1.0 / (1.0 + Numeric.exp(-v))
return (act(x) * (1.0 - act(x))) + self.sigmoid_prime_offset | python | def actDerivASIG(self, x):
"""
Only works on scalars.
"""
def act(v):
if v < -15.0: return 0.0
elif v > 15.0: return 1.0
else: return 1.0 / (1.0 + Numeric.exp(-v))
return (act(x) * (1.0 - act(x))) + self.sigmoid_prime_offset | ['def', 'actDerivASIG', '(', 'self', ',', 'x', ')', ':', 'def', 'act', '(', 'v', ')', ':', 'if', 'v', '<', '-', '15.0', ':', 'return', '0.0', 'elif', 'v', '>', '15.0', ':', 'return', '1.0', 'else', ':', 'return', '1.0', '/', '(', '1.0', '+', 'Numeric', '.', 'exp', '(', '-', 'v', ')', ')', 'return', '(', 'act', '(', 'x', ')', '*', '(', '1.0', '-', 'act', '(', 'x', ')', ')', ')', '+', 'self', '.', 'sigmoid_prime_offset'] | Only works on scalars. | ['Only', 'works', 'on', 'scalars', '.'] | train | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L2143-L2151 |
7,217 | qntm/greenery | greenery/fsm.py | fsm.islive | def islive(self, state):
'''A state is "live" if a final state can be reached from it.'''
reachable = [state]
i = 0
while i < len(reachable):
current = reachable[i]
if current in self.finals:
return True
if current in self.map:
for symbol in self.map[current]:
next = self.map[current][symbol]
if next not in reachable:
reachable.append(next)
i += 1
return False | python | def islive(self, state):
'''A state is "live" if a final state can be reached from it.'''
reachable = [state]
i = 0
while i < len(reachable):
current = reachable[i]
if current in self.finals:
return True
if current in self.map:
for symbol in self.map[current]:
next = self.map[current][symbol]
if next not in reachable:
reachable.append(next)
i += 1
return False | ['def', 'islive', '(', 'self', ',', 'state', ')', ':', 'reachable', '=', '[', 'state', ']', 'i', '=', '0', 'while', 'i', '<', 'len', '(', 'reachable', ')', ':', 'current', '=', 'reachable', '[', 'i', ']', 'if', 'current', 'in', 'self', '.', 'finals', ':', 'return', 'True', 'if', 'current', 'in', 'self', '.', 'map', ':', 'for', 'symbol', 'in', 'self', '.', 'map', '[', 'current', ']', ':', 'next', '=', 'self', '.', 'map', '[', 'current', ']', '[', 'symbol', ']', 'if', 'next', 'not', 'in', 'reachable', ':', 'reachable', '.', 'append', '(', 'next', ')', 'i', '+=', '1', 'return', 'False'] | A state is "live" if a final state can be reached from it. | ['A', 'state', 'is', 'live', 'if', 'a', 'final', 'state', 'can', 'be', 'reached', 'from', 'it', '.'] | train | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/fsm.py#L421-L435 |
7,218 | keras-rl/keras-rl | rl/core.py | Agent.fit | def fit(self, env, nb_steps, action_repetition=1, callbacks=None, verbose=1,
visualize=False, nb_max_start_steps=0, start_step_policy=None, log_interval=10000,
nb_max_episode_steps=None):
"""Trains the agent on the given environment.
# Arguments
env: (`Env` instance): Environment that the agent interacts with. See [Env](#env) for details.
nb_steps (integer): Number of training steps to be performed.
action_repetition (integer): Number of times the agent repeats the same action without
observing the environment again. Setting this to a value > 1 can be useful
if a single action only has a very small effect on the environment.
callbacks (list of `keras.callbacks.Callback` or `rl.callbacks.Callback` instances):
List of callbacks to apply during training. See [callbacks](/callbacks) for details.
verbose (integer): 0 for no logging, 1 for interval logging (compare `log_interval`), 2 for episode logging
visualize (boolean): If `True`, the environment is visualized during training. However,
this is likely going to slow down training significantly and is thus intended to be
a debugging instrument.
nb_max_start_steps (integer): Number of maximum steps that the agent performs at the beginning
of each episode using `start_step_policy`. Notice that this is an upper limit since
the exact number of steps to be performed is sampled uniformly from [0, max_start_steps]
at the beginning of each episode.
start_step_policy (`lambda observation: action`): The policy
to follow if `nb_max_start_steps` > 0. If set to `None`, a random action is performed.
log_interval (integer): If `verbose` = 1, the number of steps that are considered to be an interval.
nb_max_episode_steps (integer): Number of steps per episode that the agent performs before
automatically resetting the environment. Set to `None` if each episode should run
(potentially indefinitely) until the environment signals a terminal state.
# Returns
A `keras.callbacks.History` instance that recorded the entire training process.
"""
if not self.compiled:
raise RuntimeError('Your tried to fit your agent but it hasn\'t been compiled yet. Please call `compile()` before `fit()`.')
if action_repetition < 1:
raise ValueError('action_repetition must be >= 1, is {}'.format(action_repetition))
self.training = True
callbacks = [] if not callbacks else callbacks[:]
if verbose == 1:
callbacks += [TrainIntervalLogger(interval=log_interval)]
elif verbose > 1:
callbacks += [TrainEpisodeLogger()]
if visualize:
callbacks += [Visualizer()]
history = History()
callbacks += [history]
callbacks = CallbackList(callbacks)
if hasattr(callbacks, 'set_model'):
callbacks.set_model(self)
else:
callbacks._set_model(self)
callbacks._set_env(env)
params = {
'nb_steps': nb_steps,
}
if hasattr(callbacks, 'set_params'):
callbacks.set_params(params)
else:
callbacks._set_params(params)
self._on_train_begin()
callbacks.on_train_begin()
episode = np.int16(0)
self.step = np.int16(0)
observation = None
episode_reward = None
episode_step = None
did_abort = False
try:
while self.step < nb_steps:
if observation is None: # start of a new episode
callbacks.on_episode_begin(episode)
episode_step = np.int16(0)
episode_reward = np.float32(0)
# Obtain the initial observation by resetting the environment.
self.reset_states()
observation = deepcopy(env.reset())
if self.processor is not None:
observation = self.processor.process_observation(observation)
assert observation is not None
# Perform random starts at beginning of episode and do not record them into the experience.
# This slightly changes the start position between games.
nb_random_start_steps = 0 if nb_max_start_steps == 0 else np.random.randint(nb_max_start_steps)
for _ in range(nb_random_start_steps):
if start_step_policy is None:
action = env.action_space.sample()
else:
action = start_step_policy(observation)
if self.processor is not None:
action = self.processor.process_action(action)
callbacks.on_action_begin(action)
observation, reward, done, info = env.step(action)
observation = deepcopy(observation)
if self.processor is not None:
observation, reward, done, info = self.processor.process_step(observation, reward, done, info)
callbacks.on_action_end(action)
if done:
warnings.warn('Env ended before {} random steps could be performed at the start. You should probably lower the `nb_max_start_steps` parameter.'.format(nb_random_start_steps))
observation = deepcopy(env.reset())
if self.processor is not None:
observation = self.processor.process_observation(observation)
break
# At this point, we expect to be fully initialized.
assert episode_reward is not None
assert episode_step is not None
assert observation is not None
# Run a single step.
callbacks.on_step_begin(episode_step)
# This is were all of the work happens. We first perceive and compute the action
# (forward step) and then use the reward to improve (backward step).
action = self.forward(observation)
if self.processor is not None:
action = self.processor.process_action(action)
reward = np.float32(0)
accumulated_info = {}
done = False
for _ in range(action_repetition):
callbacks.on_action_begin(action)
observation, r, done, info = env.step(action)
observation = deepcopy(observation)
if self.processor is not None:
observation, r, done, info = self.processor.process_step(observation, r, done, info)
for key, value in info.items():
if not np.isreal(value):
continue
if key not in accumulated_info:
accumulated_info[key] = np.zeros_like(value)
accumulated_info[key] += value
callbacks.on_action_end(action)
reward += r
if done:
break
if nb_max_episode_steps and episode_step >= nb_max_episode_steps - 1:
# Force a terminal state.
done = True
metrics = self.backward(reward, terminal=done)
episode_reward += reward
step_logs = {
'action': action,
'observation': observation,
'reward': reward,
'metrics': metrics,
'episode': episode,
'info': accumulated_info,
}
callbacks.on_step_end(episode_step, step_logs)
episode_step += 1
self.step += 1
if done:
# We are in a terminal state but the agent hasn't yet seen it. We therefore
# perform one more forward-backward call and simply ignore the action before
# resetting the environment. We need to pass in `terminal=False` here since
# the *next* state, that is the state of the newly reset environment, is
# always non-terminal by convention.
self.forward(observation)
self.backward(0., terminal=False)
# This episode is finished, report and reset.
episode_logs = {
'episode_reward': episode_reward,
'nb_episode_steps': episode_step,
'nb_steps': self.step,
}
callbacks.on_episode_end(episode, episode_logs)
episode += 1
observation = None
episode_step = None
episode_reward = None
except KeyboardInterrupt:
# We catch keyboard interrupts here so that training can be be safely aborted.
# This is so common that we've built this right into this function, which ensures that
# the `on_train_end` method is properly called.
did_abort = True
callbacks.on_train_end(logs={'did_abort': did_abort})
self._on_train_end()
return history | python | def fit(self, env, nb_steps, action_repetition=1, callbacks=None, verbose=1,
visualize=False, nb_max_start_steps=0, start_step_policy=None, log_interval=10000,
nb_max_episode_steps=None):
"""Trains the agent on the given environment.
# Arguments
env: (`Env` instance): Environment that the agent interacts with. See [Env](#env) for details.
nb_steps (integer): Number of training steps to be performed.
action_repetition (integer): Number of times the agent repeats the same action without
observing the environment again. Setting this to a value > 1 can be useful
if a single action only has a very small effect on the environment.
callbacks (list of `keras.callbacks.Callback` or `rl.callbacks.Callback` instances):
List of callbacks to apply during training. See [callbacks](/callbacks) for details.
verbose (integer): 0 for no logging, 1 for interval logging (compare `log_interval`), 2 for episode logging
visualize (boolean): If `True`, the environment is visualized during training. However,
this is likely going to slow down training significantly and is thus intended to be
a debugging instrument.
nb_max_start_steps (integer): Number of maximum steps that the agent performs at the beginning
of each episode using `start_step_policy`. Notice that this is an upper limit since
the exact number of steps to be performed is sampled uniformly from [0, max_start_steps]
at the beginning of each episode.
start_step_policy (`lambda observation: action`): The policy
to follow if `nb_max_start_steps` > 0. If set to `None`, a random action is performed.
log_interval (integer): If `verbose` = 1, the number of steps that are considered to be an interval.
nb_max_episode_steps (integer): Number of steps per episode that the agent performs before
automatically resetting the environment. Set to `None` if each episode should run
(potentially indefinitely) until the environment signals a terminal state.
# Returns
A `keras.callbacks.History` instance that recorded the entire training process.
"""
if not self.compiled:
raise RuntimeError('Your tried to fit your agent but it hasn\'t been compiled yet. Please call `compile()` before `fit()`.')
if action_repetition < 1:
raise ValueError('action_repetition must be >= 1, is {}'.format(action_repetition))
self.training = True
callbacks = [] if not callbacks else callbacks[:]
if verbose == 1:
callbacks += [TrainIntervalLogger(interval=log_interval)]
elif verbose > 1:
callbacks += [TrainEpisodeLogger()]
if visualize:
callbacks += [Visualizer()]
history = History()
callbacks += [history]
callbacks = CallbackList(callbacks)
if hasattr(callbacks, 'set_model'):
callbacks.set_model(self)
else:
callbacks._set_model(self)
callbacks._set_env(env)
params = {
'nb_steps': nb_steps,
}
if hasattr(callbacks, 'set_params'):
callbacks.set_params(params)
else:
callbacks._set_params(params)
self._on_train_begin()
callbacks.on_train_begin()
episode = np.int16(0)
self.step = np.int16(0)
observation = None
episode_reward = None
episode_step = None
did_abort = False
try:
while self.step < nb_steps:
if observation is None: # start of a new episode
callbacks.on_episode_begin(episode)
episode_step = np.int16(0)
episode_reward = np.float32(0)
# Obtain the initial observation by resetting the environment.
self.reset_states()
observation = deepcopy(env.reset())
if self.processor is not None:
observation = self.processor.process_observation(observation)
assert observation is not None
# Perform random starts at beginning of episode and do not record them into the experience.
# This slightly changes the start position between games.
nb_random_start_steps = 0 if nb_max_start_steps == 0 else np.random.randint(nb_max_start_steps)
for _ in range(nb_random_start_steps):
if start_step_policy is None:
action = env.action_space.sample()
else:
action = start_step_policy(observation)
if self.processor is not None:
action = self.processor.process_action(action)
callbacks.on_action_begin(action)
observation, reward, done, info = env.step(action)
observation = deepcopy(observation)
if self.processor is not None:
observation, reward, done, info = self.processor.process_step(observation, reward, done, info)
callbacks.on_action_end(action)
if done:
warnings.warn('Env ended before {} random steps could be performed at the start. You should probably lower the `nb_max_start_steps` parameter.'.format(nb_random_start_steps))
observation = deepcopy(env.reset())
if self.processor is not None:
observation = self.processor.process_observation(observation)
break
# At this point, we expect to be fully initialized.
assert episode_reward is not None
assert episode_step is not None
assert observation is not None
# Run a single step.
callbacks.on_step_begin(episode_step)
# This is were all of the work happens. We first perceive and compute the action
# (forward step) and then use the reward to improve (backward step).
action = self.forward(observation)
if self.processor is not None:
action = self.processor.process_action(action)
reward = np.float32(0)
accumulated_info = {}
done = False
for _ in range(action_repetition):
callbacks.on_action_begin(action)
observation, r, done, info = env.step(action)
observation = deepcopy(observation)
if self.processor is not None:
observation, r, done, info = self.processor.process_step(observation, r, done, info)
for key, value in info.items():
if not np.isreal(value):
continue
if key not in accumulated_info:
accumulated_info[key] = np.zeros_like(value)
accumulated_info[key] += value
callbacks.on_action_end(action)
reward += r
if done:
break
if nb_max_episode_steps and episode_step >= nb_max_episode_steps - 1:
# Force a terminal state.
done = True
metrics = self.backward(reward, terminal=done)
episode_reward += reward
step_logs = {
'action': action,
'observation': observation,
'reward': reward,
'metrics': metrics,
'episode': episode,
'info': accumulated_info,
}
callbacks.on_step_end(episode_step, step_logs)
episode_step += 1
self.step += 1
if done:
# We are in a terminal state but the agent hasn't yet seen it. We therefore
# perform one more forward-backward call and simply ignore the action before
# resetting the environment. We need to pass in `terminal=False` here since
# the *next* state, that is the state of the newly reset environment, is
# always non-terminal by convention.
self.forward(observation)
self.backward(0., terminal=False)
# This episode is finished, report and reset.
episode_logs = {
'episode_reward': episode_reward,
'nb_episode_steps': episode_step,
'nb_steps': self.step,
}
callbacks.on_episode_end(episode, episode_logs)
episode += 1
observation = None
episode_step = None
episode_reward = None
except KeyboardInterrupt:
# We catch keyboard interrupts here so that training can be be safely aborted.
# This is so common that we've built this right into this function, which ensures that
# the `on_train_end` method is properly called.
did_abort = True
callbacks.on_train_end(logs={'did_abort': did_abort})
self._on_train_end()
return history | ['def', 'fit', '(', 'self', ',', 'env', ',', 'nb_steps', ',', 'action_repetition', '=', '1', ',', 'callbacks', '=', 'None', ',', 'verbose', '=', '1', ',', 'visualize', '=', 'False', ',', 'nb_max_start_steps', '=', '0', ',', 'start_step_policy', '=', 'None', ',', 'log_interval', '=', '10000', ',', 'nb_max_episode_steps', '=', 'None', ')', ':', 'if', 'not', 'self', '.', 'compiled', ':', 'raise', 'RuntimeError', '(', "'Your tried to fit your agent but it hasn\\'t been compiled yet. Please call `compile()` before `fit()`.'", ')', 'if', 'action_repetition', '<', '1', ':', 'raise', 'ValueError', '(', "'action_repetition must be >= 1, is {}'", '.', 'format', '(', 'action_repetition', ')', ')', 'self', '.', 'training', '=', 'True', 'callbacks', '=', '[', ']', 'if', 'not', 'callbacks', 'else', 'callbacks', '[', ':', ']', 'if', 'verbose', '==', '1', ':', 'callbacks', '+=', '[', 'TrainIntervalLogger', '(', 'interval', '=', 'log_interval', ')', ']', 'elif', 'verbose', '>', '1', ':', 'callbacks', '+=', '[', 'TrainEpisodeLogger', '(', ')', ']', 'if', 'visualize', ':', 'callbacks', '+=', '[', 'Visualizer', '(', ')', ']', 'history', '=', 'History', '(', ')', 'callbacks', '+=', '[', 'history', ']', 'callbacks', '=', 'CallbackList', '(', 'callbacks', ')', 'if', 'hasattr', '(', 'callbacks', ',', "'set_model'", ')', ':', 'callbacks', '.', 'set_model', '(', 'self', ')', 'else', ':', 'callbacks', '.', '_set_model', '(', 'self', ')', 'callbacks', '.', '_set_env', '(', 'env', ')', 'params', '=', '{', "'nb_steps'", ':', 'nb_steps', ',', '}', 'if', 'hasattr', '(', 'callbacks', ',', "'set_params'", ')', ':', 'callbacks', '.', 'set_params', '(', 'params', ')', 'else', ':', 'callbacks', '.', '_set_params', '(', 'params', ')', 'self', '.', '_on_train_begin', '(', ')', 'callbacks', '.', 'on_train_begin', '(', ')', 'episode', '=', 'np', '.', 'int16', '(', '0', ')', 'self', '.', 'step', '=', 'np', '.', 'int16', '(', '0', ')', 'observation', '=', 'None', 'episode_reward', '=', 'None', 'episode_step', '=', 'None', 'did_abort', '=', 'False', 'try', ':', 'while', 'self', '.', 'step', '<', 'nb_steps', ':', 'if', 'observation', 'is', 'None', ':', '# start of a new episode', 'callbacks', '.', 'on_episode_begin', '(', 'episode', ')', 'episode_step', '=', 'np', '.', 'int16', '(', '0', ')', 'episode_reward', '=', 'np', '.', 'float32', '(', '0', ')', '# Obtain the initial observation by resetting the environment.', 'self', '.', 'reset_states', '(', ')', 'observation', '=', 'deepcopy', '(', 'env', '.', 'reset', '(', ')', ')', 'if', 'self', '.', 'processor', 'is', 'not', 'None', ':', 'observation', '=', 'self', '.', 'processor', '.', 'process_observation', '(', 'observation', ')', 'assert', 'observation', 'is', 'not', 'None', '# Perform random starts at beginning of episode and do not record them into the experience.', '# This slightly changes the start position between games.', 'nb_random_start_steps', '=', '0', 'if', 'nb_max_start_steps', '==', '0', 'else', 'np', '.', 'random', '.', 'randint', '(', 'nb_max_start_steps', ')', 'for', '_', 'in', 'range', '(', 'nb_random_start_steps', ')', ':', 'if', 'start_step_policy', 'is', 'None', ':', 'action', '=', 'env', '.', 'action_space', '.', 'sample', '(', ')', 'else', ':', 'action', '=', 'start_step_policy', '(', 'observation', ')', 'if', 'self', '.', 'processor', 'is', 'not', 'None', ':', 'action', '=', 'self', '.', 'processor', '.', 'process_action', '(', 'action', ')', 'callbacks', '.', 'on_action_begin', '(', 'action', ')', 'observation', ',', 'reward', ',', 'done', ',', 'info', '=', 'env', '.', 'step', '(', 'action', ')', 'observation', '=', 'deepcopy', '(', 'observation', ')', 'if', 'self', '.', 'processor', 'is', 'not', 'None', ':', 'observation', ',', 'reward', ',', 'done', ',', 'info', '=', 'self', '.', 'processor', '.', 'process_step', '(', 'observation', ',', 'reward', ',', 'done', ',', 'info', ')', 'callbacks', '.', 'on_action_end', '(', 'action', ')', 'if', 'done', ':', 'warnings', '.', 'warn', '(', "'Env ended before {} random steps could be performed at the start. You should probably lower the `nb_max_start_steps` parameter.'", '.', 'format', '(', 'nb_random_start_steps', ')', ')', 'observation', '=', 'deepcopy', '(', 'env', '.', 'reset', '(', ')', ')', 'if', 'self', '.', 'processor', 'is', 'not', 'None', ':', 'observation', '=', 'self', '.', 'processor', '.', 'process_observation', '(', 'observation', ')', 'break', '# At this point, we expect to be fully initialized.', 'assert', 'episode_reward', 'is', 'not', 'None', 'assert', 'episode_step', 'is', 'not', 'None', 'assert', 'observation', 'is', 'not', 'None', '# Run a single step.', 'callbacks', '.', 'on_step_begin', '(', 'episode_step', ')', '# This is were all of the work happens. We first perceive and compute the action', '# (forward step) and then use the reward to improve (backward step).', 'action', '=', 'self', '.', 'forward', '(', 'observation', ')', 'if', 'self', '.', 'processor', 'is', 'not', 'None', ':', 'action', '=', 'self', '.', 'processor', '.', 'process_action', '(', 'action', ')', 'reward', '=', 'np', '.', 'float32', '(', '0', ')', 'accumulated_info', '=', '{', '}', 'done', '=', 'False', 'for', '_', 'in', 'range', '(', 'action_repetition', ')', ':', 'callbacks', '.', 'on_action_begin', '(', 'action', ')', 'observation', ',', 'r', ',', 'done', ',', 'info', '=', 'env', '.', 'step', '(', 'action', ')', 'observation', '=', 'deepcopy', '(', 'observation', ')', 'if', 'self', '.', 'processor', 'is', 'not', 'None', ':', 'observation', ',', 'r', ',', 'done', ',', 'info', '=', 'self', '.', 'processor', '.', 'process_step', '(', 'observation', ',', 'r', ',', 'done', ',', 'info', ')', 'for', 'key', ',', 'value', 'in', 'info', '.', 'items', '(', ')', ':', 'if', 'not', 'np', '.', 'isreal', '(', 'value', ')', ':', 'continue', 'if', 'key', 'not', 'in', 'accumulated_info', ':', 'accumulated_info', '[', 'key', ']', '=', 'np', '.', 'zeros_like', '(', 'value', ')', 'accumulated_info', '[', 'key', ']', '+=', 'value', 'callbacks', '.', 'on_action_end', '(', 'action', ')', 'reward', '+=', 'r', 'if', 'done', ':', 'break', 'if', 'nb_max_episode_steps', 'and', 'episode_step', '>=', 'nb_max_episode_steps', '-', '1', ':', '# Force a terminal state.', 'done', '=', 'True', 'metrics', '=', 'self', '.', 'backward', '(', 'reward', ',', 'terminal', '=', 'done', ')', 'episode_reward', '+=', 'reward', 'step_logs', '=', '{', "'action'", ':', 'action', ',', "'observation'", ':', 'observation', ',', "'reward'", ':', 'reward', ',', "'metrics'", ':', 'metrics', ',', "'episode'", ':', 'episode', ',', "'info'", ':', 'accumulated_info', ',', '}', 'callbacks', '.', 'on_step_end', '(', 'episode_step', ',', 'step_logs', ')', 'episode_step', '+=', '1', 'self', '.', 'step', '+=', '1', 'if', 'done', ':', "# We are in a terminal state but the agent hasn't yet seen it. We therefore", '# perform one more forward-backward call and simply ignore the action before', '# resetting the environment. We need to pass in `terminal=False` here since', '# the *next* state, that is the state of the newly reset environment, is', '# always non-terminal by convention.', 'self', '.', 'forward', '(', 'observation', ')', 'self', '.', 'backward', '(', '0.', ',', 'terminal', '=', 'False', ')', '# This episode is finished, report and reset.', 'episode_logs', '=', '{', "'episode_reward'", ':', 'episode_reward', ',', "'nb_episode_steps'", ':', 'episode_step', ',', "'nb_steps'", ':', 'self', '.', 'step', ',', '}', 'callbacks', '.', 'on_episode_end', '(', 'episode', ',', 'episode_logs', ')', 'episode', '+=', '1', 'observation', '=', 'None', 'episode_step', '=', 'None', 'episode_reward', '=', 'None', 'except', 'KeyboardInterrupt', ':', '# We catch keyboard interrupts here so that training can be be safely aborted.', "# This is so common that we've built this right into this function, which ensures that", '# the `on_train_end` method is properly called.', 'did_abort', '=', 'True', 'callbacks', '.', 'on_train_end', '(', 'logs', '=', '{', "'did_abort'", ':', 'did_abort', '}', ')', 'self', '.', '_on_train_end', '(', ')', 'return', 'history'] | Trains the agent on the given environment.
# Arguments
env: (`Env` instance): Environment that the agent interacts with. See [Env](#env) for details.
nb_steps (integer): Number of training steps to be performed.
action_repetition (integer): Number of times the agent repeats the same action without
observing the environment again. Setting this to a value > 1 can be useful
if a single action only has a very small effect on the environment.
callbacks (list of `keras.callbacks.Callback` or `rl.callbacks.Callback` instances):
List of callbacks to apply during training. See [callbacks](/callbacks) for details.
verbose (integer): 0 for no logging, 1 for interval logging (compare `log_interval`), 2 for episode logging
visualize (boolean): If `True`, the environment is visualized during training. However,
this is likely going to slow down training significantly and is thus intended to be
a debugging instrument.
nb_max_start_steps (integer): Number of maximum steps that the agent performs at the beginning
of each episode using `start_step_policy`. Notice that this is an upper limit since
the exact number of steps to be performed is sampled uniformly from [0, max_start_steps]
at the beginning of each episode.
start_step_policy (`lambda observation: action`): The policy
to follow if `nb_max_start_steps` > 0. If set to `None`, a random action is performed.
log_interval (integer): If `verbose` = 1, the number of steps that are considered to be an interval.
nb_max_episode_steps (integer): Number of steps per episode that the agent performs before
automatically resetting the environment. Set to `None` if each episode should run
(potentially indefinitely) until the environment signals a terminal state.
# Returns
A `keras.callbacks.History` instance that recorded the entire training process. | ['Trains', 'the', 'agent', 'on', 'the', 'given', 'environment', '.'] | train | https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/core.py#L53-L238 |
7,219 | agoragames/haigha | haigha/connection.py | Connection._next_channel_id | def _next_channel_id(self):
'''Return the next possible channel id. Is a circular enumeration.'''
self._channel_counter += 1
if self._channel_counter >= self._channel_max:
self._channel_counter = 1
return self._channel_counter | python | def _next_channel_id(self):
'''Return the next possible channel id. Is a circular enumeration.'''
self._channel_counter += 1
if self._channel_counter >= self._channel_max:
self._channel_counter = 1
return self._channel_counter | ['def', '_next_channel_id', '(', 'self', ')', ':', 'self', '.', '_channel_counter', '+=', '1', 'if', 'self', '.', '_channel_counter', '>=', 'self', '.', '_channel_max', ':', 'self', '.', '_channel_counter', '=', '1', 'return', 'self', '.', '_channel_counter'] | Return the next possible channel id. Is a circular enumeration. | ['Return', 'the', 'next', 'possible', 'channel', 'id', '.', 'Is', 'a', 'circular', 'enumeration', '.'] | train | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connection.py#L307-L312 |
7,220 | Bearle/django-private-chat | django_private_chat/handlers.py | check_online | def check_online(stream):
"""
Used to check user's online opponents and show their online/offline status on page on init
"""
while True:
packet = yield from stream.get()
session_id = packet.get('session_key')
opponent_username = packet.get('username')
if session_id and opponent_username:
user_owner = get_user_from_session(session_id)
if user_owner:
# Find all connections including user_owner as opponent
online_opponents = list(filter(lambda x: x[1] == user_owner.username, ws_connections))
logger.debug('User ' + user_owner.username + ' has ' + str(len(online_opponents)) + ' opponents online')
# Send user online statuses of his opponents
socket = ws_connections.get((user_owner.username, opponent_username))
if socket:
online_opponents_usernames = [i[0] for i in online_opponents]
yield from target_message(socket,
{'type': 'gone-online', 'usernames': online_opponents_usernames})
else:
pass # socket for the pair user_owner.username, opponent_username not found
# this can be in case the user has already gone offline
else:
pass # invalid session id
else:
pass | python | def check_online(stream):
"""
Used to check user's online opponents and show their online/offline status on page on init
"""
while True:
packet = yield from stream.get()
session_id = packet.get('session_key')
opponent_username = packet.get('username')
if session_id and opponent_username:
user_owner = get_user_from_session(session_id)
if user_owner:
# Find all connections including user_owner as opponent
online_opponents = list(filter(lambda x: x[1] == user_owner.username, ws_connections))
logger.debug('User ' + user_owner.username + ' has ' + str(len(online_opponents)) + ' opponents online')
# Send user online statuses of his opponents
socket = ws_connections.get((user_owner.username, opponent_username))
if socket:
online_opponents_usernames = [i[0] for i in online_opponents]
yield from target_message(socket,
{'type': 'gone-online', 'usernames': online_opponents_usernames})
else:
pass # socket for the pair user_owner.username, opponent_username not found
# this can be in case the user has already gone offline
else:
pass # invalid session id
else:
pass | ['def', 'check_online', '(', 'stream', ')', ':', 'while', 'True', ':', 'packet', '=', 'yield', 'from', 'stream', '.', 'get', '(', ')', 'session_id', '=', 'packet', '.', 'get', '(', "'session_key'", ')', 'opponent_username', '=', 'packet', '.', 'get', '(', "'username'", ')', 'if', 'session_id', 'and', 'opponent_username', ':', 'user_owner', '=', 'get_user_from_session', '(', 'session_id', ')', 'if', 'user_owner', ':', '# Find all connections including user_owner as opponent\r', 'online_opponents', '=', 'list', '(', 'filter', '(', 'lambda', 'x', ':', 'x', '[', '1', ']', '==', 'user_owner', '.', 'username', ',', 'ws_connections', ')', ')', 'logger', '.', 'debug', '(', "'User '", '+', 'user_owner', '.', 'username', '+', "' has '", '+', 'str', '(', 'len', '(', 'online_opponents', ')', ')', '+', "' opponents online'", ')', '# Send user online statuses of his opponents\r', 'socket', '=', 'ws_connections', '.', 'get', '(', '(', 'user_owner', '.', 'username', ',', 'opponent_username', ')', ')', 'if', 'socket', ':', 'online_opponents_usernames', '=', '[', 'i', '[', '0', ']', 'for', 'i', 'in', 'online_opponents', ']', 'yield', 'from', 'target_message', '(', 'socket', ',', '{', "'type'", ':', "'gone-online'", ',', "'usernames'", ':', 'online_opponents_usernames', '}', ')', 'else', ':', 'pass', '# socket for the pair user_owner.username, opponent_username not found\r', '# this can be in case the user has already gone offline\r', 'else', ':', 'pass', '# invalid session id\r', 'else', ':', 'pass'] | Used to check user's online opponents and show their online/offline status on page on init | ['Used', 'to', 'check', 'user', 's', 'online', 'opponents', 'and', 'show', 'their', 'online', '/', 'offline', 'status', 'on', 'page', 'on', 'init'] | train | https://github.com/Bearle/django-private-chat/blob/5b51e65875795c5c0ce21bb631c53bd3aac4c26b/django_private_chat/handlers.py#L64-L90 |
7,221 | franciscogarate/pyliferisk | pyliferisk/__init__.py | Axn | def Axn(mt, x, n):
""" (A^1)x:n : Returns the EPV (net single premium) of a term insurance. """
return (mt.Mx[x] - mt.Mx[x + n]) / mt.Dx[x] | python | def Axn(mt, x, n):
""" (A^1)x:n : Returns the EPV (net single premium) of a term insurance. """
return (mt.Mx[x] - mt.Mx[x + n]) / mt.Dx[x] | ['def', 'Axn', '(', 'mt', ',', 'x', ',', 'n', ')', ':', 'return', '(', 'mt', '.', 'Mx', '[', 'x', ']', '-', 'mt', '.', 'Mx', '[', 'x', '+', 'n', ']', ')', '/', 'mt', '.', 'Dx', '[', 'x', ']'] | (A^1)x:n : Returns the EPV (net single premium) of a term insurance. | ['(', 'A^1', ')', 'x', ':', 'n', ':', 'Returns', 'the', 'EPV', '(', 'net', 'single', 'premium', ')', 'of', 'a', 'term', 'insurance', '.'] | train | https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L305-L307 |
7,222 | rdo-management/python-rdomanager-oscplugin | rdomanager_oscplugin/utils.py | generate_overcloud_passwords | def generate_overcloud_passwords(output_file="tripleo-overcloud-passwords"):
"""Create the passwords needed for the overcloud
This will create the set of passwords required by the overcloud, store
them in the output file path and return a dictionary of passwords. If the
file already exists the existing passwords will be returned instead,
"""
if os.path.isfile(output_file):
with open(output_file) as f:
return dict(line.split('=') for line in f.read().splitlines())
password_names = (
"OVERCLOUD_ADMIN_PASSWORD",
"OVERCLOUD_ADMIN_TOKEN",
"OVERCLOUD_CEILOMETER_PASSWORD",
"OVERCLOUD_CEILOMETER_SECRET",
"OVERCLOUD_CINDER_PASSWORD",
"OVERCLOUD_DEMO_PASSWORD",
"OVERCLOUD_GLANCE_PASSWORD",
"OVERCLOUD_HEAT_PASSWORD",
"OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD",
"OVERCLOUD_NEUTRON_PASSWORD",
"OVERCLOUD_NOVA_PASSWORD",
"OVERCLOUD_SWIFT_HASH",
"OVERCLOUD_SWIFT_PASSWORD",
)
passwords = dict((p, _generate_password()) for p in password_names)
with open(output_file, 'w') as f:
for name, password in passwords.items():
f.write("{0}={1}\n".format(name, password))
return passwords | python | def generate_overcloud_passwords(output_file="tripleo-overcloud-passwords"):
"""Create the passwords needed for the overcloud
This will create the set of passwords required by the overcloud, store
them in the output file path and return a dictionary of passwords. If the
file already exists the existing passwords will be returned instead,
"""
if os.path.isfile(output_file):
with open(output_file) as f:
return dict(line.split('=') for line in f.read().splitlines())
password_names = (
"OVERCLOUD_ADMIN_PASSWORD",
"OVERCLOUD_ADMIN_TOKEN",
"OVERCLOUD_CEILOMETER_PASSWORD",
"OVERCLOUD_CEILOMETER_SECRET",
"OVERCLOUD_CINDER_PASSWORD",
"OVERCLOUD_DEMO_PASSWORD",
"OVERCLOUD_GLANCE_PASSWORD",
"OVERCLOUD_HEAT_PASSWORD",
"OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD",
"OVERCLOUD_NEUTRON_PASSWORD",
"OVERCLOUD_NOVA_PASSWORD",
"OVERCLOUD_SWIFT_HASH",
"OVERCLOUD_SWIFT_PASSWORD",
)
passwords = dict((p, _generate_password()) for p in password_names)
with open(output_file, 'w') as f:
for name, password in passwords.items():
f.write("{0}={1}\n".format(name, password))
return passwords | ['def', 'generate_overcloud_passwords', '(', 'output_file', '=', '"tripleo-overcloud-passwords"', ')', ':', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'output_file', ')', ':', 'with', 'open', '(', 'output_file', ')', 'as', 'f', ':', 'return', 'dict', '(', 'line', '.', 'split', '(', "'='", ')', 'for', 'line', 'in', 'f', '.', 'read', '(', ')', '.', 'splitlines', '(', ')', ')', 'password_names', '=', '(', '"OVERCLOUD_ADMIN_PASSWORD"', ',', '"OVERCLOUD_ADMIN_TOKEN"', ',', '"OVERCLOUD_CEILOMETER_PASSWORD"', ',', '"OVERCLOUD_CEILOMETER_SECRET"', ',', '"OVERCLOUD_CINDER_PASSWORD"', ',', '"OVERCLOUD_DEMO_PASSWORD"', ',', '"OVERCLOUD_GLANCE_PASSWORD"', ',', '"OVERCLOUD_HEAT_PASSWORD"', ',', '"OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD"', ',', '"OVERCLOUD_NEUTRON_PASSWORD"', ',', '"OVERCLOUD_NOVA_PASSWORD"', ',', '"OVERCLOUD_SWIFT_HASH"', ',', '"OVERCLOUD_SWIFT_PASSWORD"', ',', ')', 'passwords', '=', 'dict', '(', '(', 'p', ',', '_generate_password', '(', ')', ')', 'for', 'p', 'in', 'password_names', ')', 'with', 'open', '(', 'output_file', ',', "'w'", ')', 'as', 'f', ':', 'for', 'name', ',', 'password', 'in', 'passwords', '.', 'items', '(', ')', ':', 'f', '.', 'write', '(', '"{0}={1}\\n"', '.', 'format', '(', 'name', ',', 'password', ')', ')', 'return', 'passwords'] | Create the passwords needed for the overcloud
This will create the set of passwords required by the overcloud, store
them in the output file path and return a dictionary of passwords. If the
file already exists the existing passwords will be returned instead, | ['Create', 'the', 'passwords', 'needed', 'for', 'the', 'overcloud'] | train | https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/utils.py#L63-L97 |
7,223 | cloud-custodian/cloud-custodian | tools/c7n_logexporter/c7n_logexporter/exporter.py | subscribe | def subscribe(config, accounts, region, merge, debug):
"""subscribe accounts log groups to target account log group destination"""
config = validate.callback(config)
subscription = config.get('subscription')
if subscription is None:
log.error("config file: logs subscription missing")
sys.exit(1)
def converge_destination_policy(client, config):
destination_name = subscription['destination-arn'].rsplit(':', 1)[-1]
try:
extant_destinations = client.describe_destinations(
DestinationNamePrefix=destination_name).get('destinations')
except ClientError:
log.error("Log group destination not found: %s",
subscription['destination-arn'])
sys.exit(1)
account_ids = set()
for a in accounts:
if isinstance(a['role'], list):
account_ids.add(a['role'][-1].split(':')[4])
else:
account_ids.add(a['role'].split(':')[4])
if merge:
for d in extant_destinations:
if d['destinationName'] == destination_name:
for s in json.loads(d['accessPolicy']):
if s['Sid'] == 'CrossAccountDelivery':
account_ids.update(s['Principal']['AWS'])
client.put_destination_policy(
destinationName=destination_name,
accessPolicy=json.dumps({
'Statement': [{
'Action': 'logs:PutSubscriptionFilter',
'Effect': 'Allow',
'Principal': {'AWS': list(account_ids)},
'Resource': subscription['destination-arn'],
'Sid': 'CrossAccountDelivery'}]}))
def subscribe_account(t_account, subscription, region):
session = get_session(t_account['role'], region)
client = session.client('logs')
distribution = subscription.get('distribution', 'ByLogStream')
for g in account.get('groups'):
if (g.endswith('*')):
g = g.replace('*', '')
paginator = client.get_paginator('describe_log_groups')
allLogGroups = paginator.paginate(logGroupNamePrefix=g).build_full_result()
for l in allLogGroups:
_process_subscribe_group(
client, l['logGroupName'], subscription, distribution)
else:
_process_subscribe_group(client, g, subscription, distribution)
if subscription.get('managed-policy'):
if subscription.get('destination-role'):
session = get_session(subscription['destination-role'], region)
else:
session = boto3.Session()
converge_destination_policy(session.client('logs'), config)
executor = debug and MainThreadExecutor or ThreadPoolExecutor
with executor(max_workers=32) as w:
futures = {}
for account in config.get('accounts', ()):
if accounts and account['name'] not in accounts:
continue
futures[w.submit(subscribe_account, account, subscription, region)] = account
for f in as_completed(futures):
account = futures[f]
if f.exception():
log.error("Error on account %s err: %s",
account['name'], f.exception())
log.info("Completed %s", account['name']) | python | def subscribe(config, accounts, region, merge, debug):
"""subscribe accounts log groups to target account log group destination"""
config = validate.callback(config)
subscription = config.get('subscription')
if subscription is None:
log.error("config file: logs subscription missing")
sys.exit(1)
def converge_destination_policy(client, config):
destination_name = subscription['destination-arn'].rsplit(':', 1)[-1]
try:
extant_destinations = client.describe_destinations(
DestinationNamePrefix=destination_name).get('destinations')
except ClientError:
log.error("Log group destination not found: %s",
subscription['destination-arn'])
sys.exit(1)
account_ids = set()
for a in accounts:
if isinstance(a['role'], list):
account_ids.add(a['role'][-1].split(':')[4])
else:
account_ids.add(a['role'].split(':')[4])
if merge:
for d in extant_destinations:
if d['destinationName'] == destination_name:
for s in json.loads(d['accessPolicy']):
if s['Sid'] == 'CrossAccountDelivery':
account_ids.update(s['Principal']['AWS'])
client.put_destination_policy(
destinationName=destination_name,
accessPolicy=json.dumps({
'Statement': [{
'Action': 'logs:PutSubscriptionFilter',
'Effect': 'Allow',
'Principal': {'AWS': list(account_ids)},
'Resource': subscription['destination-arn'],
'Sid': 'CrossAccountDelivery'}]}))
def subscribe_account(t_account, subscription, region):
session = get_session(t_account['role'], region)
client = session.client('logs')
distribution = subscription.get('distribution', 'ByLogStream')
for g in account.get('groups'):
if (g.endswith('*')):
g = g.replace('*', '')
paginator = client.get_paginator('describe_log_groups')
allLogGroups = paginator.paginate(logGroupNamePrefix=g).build_full_result()
for l in allLogGroups:
_process_subscribe_group(
client, l['logGroupName'], subscription, distribution)
else:
_process_subscribe_group(client, g, subscription, distribution)
if subscription.get('managed-policy'):
if subscription.get('destination-role'):
session = get_session(subscription['destination-role'], region)
else:
session = boto3.Session()
converge_destination_policy(session.client('logs'), config)
executor = debug and MainThreadExecutor or ThreadPoolExecutor
with executor(max_workers=32) as w:
futures = {}
for account in config.get('accounts', ()):
if accounts and account['name'] not in accounts:
continue
futures[w.submit(subscribe_account, account, subscription, region)] = account
for f in as_completed(futures):
account = futures[f]
if f.exception():
log.error("Error on account %s err: %s",
account['name'], f.exception())
log.info("Completed %s", account['name']) | ['def', 'subscribe', '(', 'config', ',', 'accounts', ',', 'region', ',', 'merge', ',', 'debug', ')', ':', 'config', '=', 'validate', '.', 'callback', '(', 'config', ')', 'subscription', '=', 'config', '.', 'get', '(', "'subscription'", ')', 'if', 'subscription', 'is', 'None', ':', 'log', '.', 'error', '(', '"config file: logs subscription missing"', ')', 'sys', '.', 'exit', '(', '1', ')', 'def', 'converge_destination_policy', '(', 'client', ',', 'config', ')', ':', 'destination_name', '=', 'subscription', '[', "'destination-arn'", ']', '.', 'rsplit', '(', "':'", ',', '1', ')', '[', '-', '1', ']', 'try', ':', 'extant_destinations', '=', 'client', '.', 'describe_destinations', '(', 'DestinationNamePrefix', '=', 'destination_name', ')', '.', 'get', '(', "'destinations'", ')', 'except', 'ClientError', ':', 'log', '.', 'error', '(', '"Log group destination not found: %s"', ',', 'subscription', '[', "'destination-arn'", ']', ')', 'sys', '.', 'exit', '(', '1', ')', 'account_ids', '=', 'set', '(', ')', 'for', 'a', 'in', 'accounts', ':', 'if', 'isinstance', '(', 'a', '[', "'role'", ']', ',', 'list', ')', ':', 'account_ids', '.', 'add', '(', 'a', '[', "'role'", ']', '[', '-', '1', ']', '.', 'split', '(', "':'", ')', '[', '4', ']', ')', 'else', ':', 'account_ids', '.', 'add', '(', 'a', '[', "'role'", ']', '.', 'split', '(', "':'", ')', '[', '4', ']', ')', 'if', 'merge', ':', 'for', 'd', 'in', 'extant_destinations', ':', 'if', 'd', '[', "'destinationName'", ']', '==', 'destination_name', ':', 'for', 's', 'in', 'json', '.', 'loads', '(', 'd', '[', "'accessPolicy'", ']', ')', ':', 'if', 's', '[', "'Sid'", ']', '==', "'CrossAccountDelivery'", ':', 'account_ids', '.', 'update', '(', 's', '[', "'Principal'", ']', '[', "'AWS'", ']', ')', 'client', '.', 'put_destination_policy', '(', 'destinationName', '=', 'destination_name', ',', 'accessPolicy', '=', 'json', '.', 'dumps', '(', '{', "'Statement'", ':', '[', '{', "'Action'", ':', "'logs:PutSubscriptionFilter'", ',', "'Effect'", ':', "'Allow'", ',', "'Principal'", ':', '{', "'AWS'", ':', 'list', '(', 'account_ids', ')', '}', ',', "'Resource'", ':', 'subscription', '[', "'destination-arn'", ']', ',', "'Sid'", ':', "'CrossAccountDelivery'", '}', ']', '}', ')', ')', 'def', 'subscribe_account', '(', 't_account', ',', 'subscription', ',', 'region', ')', ':', 'session', '=', 'get_session', '(', 't_account', '[', "'role'", ']', ',', 'region', ')', 'client', '=', 'session', '.', 'client', '(', "'logs'", ')', 'distribution', '=', 'subscription', '.', 'get', '(', "'distribution'", ',', "'ByLogStream'", ')', 'for', 'g', 'in', 'account', '.', 'get', '(', "'groups'", ')', ':', 'if', '(', 'g', '.', 'endswith', '(', "'*'", ')', ')', ':', 'g', '=', 'g', '.', 'replace', '(', "'*'", ',', "''", ')', 'paginator', '=', 'client', '.', 'get_paginator', '(', "'describe_log_groups'", ')', 'allLogGroups', '=', 'paginator', '.', 'paginate', '(', 'logGroupNamePrefix', '=', 'g', ')', '.', 'build_full_result', '(', ')', 'for', 'l', 'in', 'allLogGroups', ':', '_process_subscribe_group', '(', 'client', ',', 'l', '[', "'logGroupName'", ']', ',', 'subscription', ',', 'distribution', ')', 'else', ':', '_process_subscribe_group', '(', 'client', ',', 'g', ',', 'subscription', ',', 'distribution', ')', 'if', 'subscription', '.', 'get', '(', "'managed-policy'", ')', ':', 'if', 'subscription', '.', 'get', '(', "'destination-role'", ')', ':', 'session', '=', 'get_session', '(', 'subscription', '[', "'destination-role'", ']', ',', 'region', ')', 'else', ':', 'session', '=', 'boto3', '.', 'Session', '(', ')', 'converge_destination_policy', '(', 'session', '.', 'client', '(', "'logs'", ')', ',', 'config', ')', 'executor', '=', 'debug', 'and', 'MainThreadExecutor', 'or', 'ThreadPoolExecutor', 'with', 'executor', '(', 'max_workers', '=', '32', ')', 'as', 'w', ':', 'futures', '=', '{', '}', 'for', 'account', 'in', 'config', '.', 'get', '(', "'accounts'", ',', '(', ')', ')', ':', 'if', 'accounts', 'and', 'account', '[', "'name'", ']', 'not', 'in', 'accounts', ':', 'continue', 'futures', '[', 'w', '.', 'submit', '(', 'subscribe_account', ',', 'account', ',', 'subscription', ',', 'region', ')', ']', '=', 'account', 'for', 'f', 'in', 'as_completed', '(', 'futures', ')', ':', 'account', '=', 'futures', '[', 'f', ']', 'if', 'f', '.', 'exception', '(', ')', ':', 'log', '.', 'error', '(', '"Error on account %s err: %s"', ',', 'account', '[', "'name'", ']', ',', 'f', '.', 'exception', '(', ')', ')', 'log', '.', 'info', '(', '"Completed %s"', ',', 'account', '[', "'name'", ']', ')'] | subscribe accounts log groups to target account log group destination | ['subscribe', 'accounts', 'log', 'groups', 'to', 'target', 'account', 'log', 'group', 'destination'] | train | https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/c7n_logexporter/c7n_logexporter/exporter.py#L171-L251 |
7,224 | jlaine/python-netfilter | netfilter/rule.py | Extension.log | def log(self, level, prefix = ''):
"""Writes the contents of the Extension to the logging system.
"""
logging.log(level, "%sname: %s", prefix, self.__name)
logging.log(level, "%soptions: %s", prefix, self.__options) | python | def log(self, level, prefix = ''):
"""Writes the contents of the Extension to the logging system.
"""
logging.log(level, "%sname: %s", prefix, self.__name)
logging.log(level, "%soptions: %s", prefix, self.__options) | ['def', 'log', '(', 'self', ',', 'level', ',', 'prefix', '=', "''", ')', ':', 'logging', '.', 'log', '(', 'level', ',', '"%sname: %s"', ',', 'prefix', ',', 'self', '.', '__name', ')', 'logging', '.', 'log', '(', 'level', ',', '"%soptions: %s"', ',', 'prefix', ',', 'self', '.', '__options', ')'] | Writes the contents of the Extension to the logging system. | ['Writes', 'the', 'contents', 'of', 'the', 'Extension', 'to', 'the', 'logging', 'system', '.'] | train | https://github.com/jlaine/python-netfilter/blob/e4942c0f6a654a985049b629ead3dc6dcdb30145/netfilter/rule.py#L91-L95 |
7,225 | prthkms/alex | alex/preprocess.py | QueryMatcher.query | def query(self, query):
"""Q.query(query string) -> category string -- return the matched
category for any user query
"""
self.query = query
self.process_query()
matching_corpus_index = self.match_query_to_corpus()
return self.category_list[matching_corpus_index].strip() | python | def query(self, query):
"""Q.query(query string) -> category string -- return the matched
category for any user query
"""
self.query = query
self.process_query()
matching_corpus_index = self.match_query_to_corpus()
return self.category_list[matching_corpus_index].strip() | ['def', 'query', '(', 'self', ',', 'query', ')', ':', 'self', '.', 'query', '=', 'query', 'self', '.', 'process_query', '(', ')', 'matching_corpus_index', '=', 'self', '.', 'match_query_to_corpus', '(', ')', 'return', 'self', '.', 'category_list', '[', 'matching_corpus_index', ']', '.', 'strip', '(', ')'] | Q.query(query string) -> category string -- return the matched
category for any user query | ['Q', '.', 'query', '(', 'query', 'string', ')', '-', '>', 'category', 'string', '--', 'return', 'the', 'matched', 'category', 'for', 'any', 'user', 'query'] | train | https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/preprocess.py#L149-L156 |
7,226 | jahuth/litus | spikes.py | LabeledMatrix.convert | def convert(self,label,units=None,conversion_function=convert_time):
""" converts a dimension in place """
label_no = self.get_label_no(label)
new_label, new_column = self.get_converted(label_no,units,conversion_function)
labels = [LabelDimension(l) for l in self.labels]
labels[label_no] = new_label
matrix = self.matrix.copy()
matrix[:,label_no] = new_column
return LabeledMatrix(matrix,labels) | python | def convert(self,label,units=None,conversion_function=convert_time):
""" converts a dimension in place """
label_no = self.get_label_no(label)
new_label, new_column = self.get_converted(label_no,units,conversion_function)
labels = [LabelDimension(l) for l in self.labels]
labels[label_no] = new_label
matrix = self.matrix.copy()
matrix[:,label_no] = new_column
return LabeledMatrix(matrix,labels) | ['def', 'convert', '(', 'self', ',', 'label', ',', 'units', '=', 'None', ',', 'conversion_function', '=', 'convert_time', ')', ':', 'label_no', '=', 'self', '.', 'get_label_no', '(', 'label', ')', 'new_label', ',', 'new_column', '=', 'self', '.', 'get_converted', '(', 'label_no', ',', 'units', ',', 'conversion_function', ')', 'labels', '=', '[', 'LabelDimension', '(', 'l', ')', 'for', 'l', 'in', 'self', '.', 'labels', ']', 'labels', '[', 'label_no', ']', '=', 'new_label', 'matrix', '=', 'self', '.', 'matrix', '.', 'copy', '(', ')', 'matrix', '[', ':', ',', 'label_no', ']', '=', 'new_column', 'return', 'LabeledMatrix', '(', 'matrix', ',', 'labels', ')'] | converts a dimension in place | ['converts', 'a', 'dimension', 'in', 'place'] | train | https://github.com/jahuth/litus/blob/712b016ea2dbb1cf0a30bfdbb0a136945a7b7c5e/spikes.py#L597-L605 |
7,227 | Shizmob/pydle | pydle/features/rfc1459/client.py | RFC1459Support.join | async def join(self, channel, password=None):
""" Join channel, optionally with password. """
if self.in_channel(channel):
raise AlreadyInChannel(channel)
if password:
await self.rawmsg('JOIN', channel, password)
else:
await self.rawmsg('JOIN', channel) | python | async def join(self, channel, password=None):
""" Join channel, optionally with password. """
if self.in_channel(channel):
raise AlreadyInChannel(channel)
if password:
await self.rawmsg('JOIN', channel, password)
else:
await self.rawmsg('JOIN', channel) | ['async', 'def', 'join', '(', 'self', ',', 'channel', ',', 'password', '=', 'None', ')', ':', 'if', 'self', '.', 'in_channel', '(', 'channel', ')', ':', 'raise', 'AlreadyInChannel', '(', 'channel', ')', 'if', 'password', ':', 'await', 'self', '.', 'rawmsg', '(', "'JOIN'", ',', 'channel', ',', 'password', ')', 'else', ':', 'await', 'self', '.', 'rawmsg', '(', "'JOIN'", ',', 'channel', ')'] | Join channel, optionally with password. | ['Join', 'channel', 'optionally', 'with', 'password', '.'] | train | https://github.com/Shizmob/pydle/blob/7ec7d65d097318ed0bcdc5d8401470287d8c7cf7/pydle/features/rfc1459/client.py#L254-L262 |
7,228 | Capitains/MyCapytain | MyCapytain/retrievers/cts5.py | HttpCtsRetriever.getReffs | def getReffs(self, textId, level=1, subreference=None):
""" Retrieve the siblings of a textual node
:param textId: CtsTextMetadata Identifier
:type textId: str
:param level: Depth for retrieval
:type level: int
:param subreference: CapitainsCtsPassage Reference
:type subreference: str
:return: List of references
:rtype: [str]
"""
depth = level
if subreference:
textId = "{}:{}".format(textId, subreference)
if subreference:
if isinstance(subreference, CtsReference):
depth += subreference.depth
else:
depth += (CtsReference(subreference)).depth
if level:
level = max(depth, level)
return self.getValidReff(urn=textId, level=level) | python | def getReffs(self, textId, level=1, subreference=None):
""" Retrieve the siblings of a textual node
:param textId: CtsTextMetadata Identifier
:type textId: str
:param level: Depth for retrieval
:type level: int
:param subreference: CapitainsCtsPassage Reference
:type subreference: str
:return: List of references
:rtype: [str]
"""
depth = level
if subreference:
textId = "{}:{}".format(textId, subreference)
if subreference:
if isinstance(subreference, CtsReference):
depth += subreference.depth
else:
depth += (CtsReference(subreference)).depth
if level:
level = max(depth, level)
return self.getValidReff(urn=textId, level=level) | ['def', 'getReffs', '(', 'self', ',', 'textId', ',', 'level', '=', '1', ',', 'subreference', '=', 'None', ')', ':', 'depth', '=', 'level', 'if', 'subreference', ':', 'textId', '=', '"{}:{}"', '.', 'format', '(', 'textId', ',', 'subreference', ')', 'if', 'subreference', ':', 'if', 'isinstance', '(', 'subreference', ',', 'CtsReference', ')', ':', 'depth', '+=', 'subreference', '.', 'depth', 'else', ':', 'depth', '+=', '(', 'CtsReference', '(', 'subreference', ')', ')', '.', 'depth', 'if', 'level', ':', 'level', '=', 'max', '(', 'depth', ',', 'level', ')', 'return', 'self', '.', 'getValidReff', '(', 'urn', '=', 'textId', ',', 'level', '=', 'level', ')'] | Retrieve the siblings of a textual node
:param textId: CtsTextMetadata Identifier
:type textId: str
:param level: Depth for retrieval
:type level: int
:param subreference: CapitainsCtsPassage Reference
:type subreference: str
:return: List of references
:rtype: [str] | ['Retrieve', 'the', 'siblings', 'of', 'a', 'textual', 'node'] | train | https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/retrievers/cts5.py#L216-L238 |
7,229 | wavefrontHQ/python-client | wavefront_api_client/models/chart_settings.py | ChartSettings.sparkline_display_value_type | def sparkline_display_value_type(self, sparkline_display_value_type):
"""Sets the sparkline_display_value_type of this ChartSettings.
For the single stat view, whether to display the name of the query or the value of query # noqa: E501
:param sparkline_display_value_type: The sparkline_display_value_type of this ChartSettings. # noqa: E501
:type: str
"""
allowed_values = ["VALUE", "LABEL"] # noqa: E501
if sparkline_display_value_type not in allowed_values:
raise ValueError(
"Invalid value for `sparkline_display_value_type` ({0}), must be one of {1}" # noqa: E501
.format(sparkline_display_value_type, allowed_values)
)
self._sparkline_display_value_type = sparkline_display_value_type | python | def sparkline_display_value_type(self, sparkline_display_value_type):
"""Sets the sparkline_display_value_type of this ChartSettings.
For the single stat view, whether to display the name of the query or the value of query # noqa: E501
:param sparkline_display_value_type: The sparkline_display_value_type of this ChartSettings. # noqa: E501
:type: str
"""
allowed_values = ["VALUE", "LABEL"] # noqa: E501
if sparkline_display_value_type not in allowed_values:
raise ValueError(
"Invalid value for `sparkline_display_value_type` ({0}), must be one of {1}" # noqa: E501
.format(sparkline_display_value_type, allowed_values)
)
self._sparkline_display_value_type = sparkline_display_value_type | ['def', 'sparkline_display_value_type', '(', 'self', ',', 'sparkline_display_value_type', ')', ':', 'allowed_values', '=', '[', '"VALUE"', ',', '"LABEL"', ']', '# noqa: E501', 'if', 'sparkline_display_value_type', 'not', 'in', 'allowed_values', ':', 'raise', 'ValueError', '(', '"Invalid value for `sparkline_display_value_type` ({0}), must be one of {1}"', '# noqa: E501', '.', 'format', '(', 'sparkline_display_value_type', ',', 'allowed_values', ')', ')', 'self', '.', '_sparkline_display_value_type', '=', 'sparkline_display_value_type'] | Sets the sparkline_display_value_type of this ChartSettings.
For the single stat view, whether to display the name of the query or the value of query # noqa: E501
:param sparkline_display_value_type: The sparkline_display_value_type of this ChartSettings. # noqa: E501
:type: str | ['Sets', 'the', 'sparkline_display_value_type', 'of', 'this', 'ChartSettings', '.'] | train | https://github.com/wavefrontHQ/python-client/blob/b0f1046a8f68c2c7d69e395f7167241f224c738a/wavefront_api_client/models/chart_settings.py#L1038-L1053 |
7,230 | softlayer/softlayer-python | SoftLayer/managers/dns.py | DNSManager._generate_create_dict | def _generate_create_dict(record, record_type, data, ttl, **kwargs):
"""Returns a dict appropriate to pass into Dns_Domain_ResourceRecord::createObject"""
# Basic dns record structure
resource_record = {
'host': record,
'data': data,
'ttl': ttl,
'type': record_type
}
for (key, value) in kwargs.items():
resource_record.setdefault(key, value)
return resource_record | python | def _generate_create_dict(record, record_type, data, ttl, **kwargs):
"""Returns a dict appropriate to pass into Dns_Domain_ResourceRecord::createObject"""
# Basic dns record structure
resource_record = {
'host': record,
'data': data,
'ttl': ttl,
'type': record_type
}
for (key, value) in kwargs.items():
resource_record.setdefault(key, value)
return resource_record | ['def', '_generate_create_dict', '(', 'record', ',', 'record_type', ',', 'data', ',', 'ttl', ',', '*', '*', 'kwargs', ')', ':', '# Basic dns record structure', 'resource_record', '=', '{', "'host'", ':', 'record', ',', "'data'", ':', 'data', ',', "'ttl'", ':', 'ttl', ',', "'type'", ':', 'record_type', '}', 'for', '(', 'key', ',', 'value', ')', 'in', 'kwargs', '.', 'items', '(', ')', ':', 'resource_record', '.', 'setdefault', '(', 'key', ',', 'value', ')', 'return', 'resource_record'] | Returns a dict appropriate to pass into Dns_Domain_ResourceRecord::createObject | ['Returns', 'a', 'dict', 'appropriate', 'to', 'pass', 'into', 'Dns_Domain_ResourceRecord', '::', 'createObject'] | train | https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/dns.py#L152-L166 |
7,231 | marrow/schema | marrow/schema/transform/base.py | BaseTransform.dump | def dump(self, fh, value, context=None):
"""Attempt to transform and write a string-based foreign value to the given file-like object.
Returns the length written.
"""
value = self.dumps(value)
fh.write(value)
return len(value) | python | def dump(self, fh, value, context=None):
"""Attempt to transform and write a string-based foreign value to the given file-like object.
Returns the length written.
"""
value = self.dumps(value)
fh.write(value)
return len(value) | ['def', 'dump', '(', 'self', ',', 'fh', ',', 'value', ',', 'context', '=', 'None', ')', ':', 'value', '=', 'self', '.', 'dumps', '(', 'value', ')', 'fh', '.', 'write', '(', 'value', ')', 'return', 'len', '(', 'value', ')'] | Attempt to transform and write a string-based foreign value to the given file-like object.
Returns the length written. | ['Attempt', 'to', 'transform', 'and', 'write', 'a', 'string', '-', 'based', 'foreign', 'value', 'to', 'the', 'given', 'file', '-', 'like', 'object', '.', 'Returns', 'the', 'length', 'written', '.'] | train | https://github.com/marrow/schema/blob/0c4c3e3b8c79d8bfeb8d7265cfa5b12a2e643152/marrow/schema/transform/base.py#L50-L58 |
7,232 | googleapis/google-cloud-python | bigquery/google/cloud/bigquery/table.py | _EmptyRowIterator.to_dataframe | def to_dataframe(self, bqstorage_client=None, dtypes=None, progress_bar_type=None):
"""Create an empty dataframe.
Args:
bqstorage_client (Any):
Ignored. Added for compatibility with RowIterator.
dtypes (Any):
Ignored. Added for compatibility with RowIterator.
progress_bar_type (Any):
Ignored. Added for compatibility with RowIterator.
Returns:
pandas.DataFrame:
An empty :class:`~pandas.DataFrame`.
"""
if pandas is None:
raise ValueError(_NO_PANDAS_ERROR)
return pandas.DataFrame() | python | def to_dataframe(self, bqstorage_client=None, dtypes=None, progress_bar_type=None):
"""Create an empty dataframe.
Args:
bqstorage_client (Any):
Ignored. Added for compatibility with RowIterator.
dtypes (Any):
Ignored. Added for compatibility with RowIterator.
progress_bar_type (Any):
Ignored. Added for compatibility with RowIterator.
Returns:
pandas.DataFrame:
An empty :class:`~pandas.DataFrame`.
"""
if pandas is None:
raise ValueError(_NO_PANDAS_ERROR)
return pandas.DataFrame() | ['def', 'to_dataframe', '(', 'self', ',', 'bqstorage_client', '=', 'None', ',', 'dtypes', '=', 'None', ',', 'progress_bar_type', '=', 'None', ')', ':', 'if', 'pandas', 'is', 'None', ':', 'raise', 'ValueError', '(', '_NO_PANDAS_ERROR', ')', 'return', 'pandas', '.', 'DataFrame', '(', ')'] | Create an empty dataframe.
Args:
bqstorage_client (Any):
Ignored. Added for compatibility with RowIterator.
dtypes (Any):
Ignored. Added for compatibility with RowIterator.
progress_bar_type (Any):
Ignored. Added for compatibility with RowIterator.
Returns:
pandas.DataFrame:
An empty :class:`~pandas.DataFrame`. | ['Create', 'an', 'empty', 'dataframe', '.'] | train | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L1710-L1727 |
7,233 | datajoint/datajoint-python | datajoint/expression.py | QueryExpression.get_select_fields | def get_select_fields(self, select_fields=None):
"""
:return: string specifying the attributes to return
"""
return self.heading.as_sql if select_fields is None else self.heading.project(select_fields).as_sql | python | def get_select_fields(self, select_fields=None):
"""
:return: string specifying the attributes to return
"""
return self.heading.as_sql if select_fields is None else self.heading.project(select_fields).as_sql | ['def', 'get_select_fields', '(', 'self', ',', 'select_fields', '=', 'None', ')', ':', 'return', 'self', '.', 'heading', '.', 'as_sql', 'if', 'select_fields', 'is', 'None', 'else', 'self', '.', 'heading', '.', 'project', '(', 'select_fields', ')', '.', 'as_sql'] | :return: string specifying the attributes to return | [':', 'return', ':', 'string', 'specifying', 'the', 'attributes', 'to', 'return'] | train | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/expression.py#L203-L207 |
7,234 | fake-name/WebRequest | WebRequest/utility.py | determine_json_encoding | def determine_json_encoding(json_bytes):
'''
Given the fact that the first 2 characters in json are guaranteed to be ASCII, we can use
these to determine the encoding.
See: http://tools.ietf.org/html/rfc4627#section-3
Copied here:
Since the first two characters of a JSON text will always be ASCII
characters [RFC0020], it is possible to determine whether an octet
stream is UTF-8, UTF-16 (BE or LE), or UTF-32 (BE or LE) by looking
at the pattern of nulls in the first four octets.
00 00 00 xx UTF-32BE
00 xx 00 xx UTF-16BE
xx 00 00 00 UTF-32LE
xx 00 xx 00 UTF-16LE
xx xx xx xx UTF-8
'''
assert isinstance(json_bytes, bytes), "`determine_json_encoding()` can only operate on bytestring inputs"
if len(json_bytes) > 4:
b1, b2, b3, b4 = json_bytes[0], json_bytes[1], json_bytes[2], json_bytes[3]
if b1 == 0 and b2 == 0 and b3 == 0 and b4 != 0:
return "UTF-32BE"
elif b1 == 0 and b2 != 0 and b3 == 0 and b4 != 0:
return "UTF-16BE"
elif b1 != 0 and b2 == 0 and b3 == 0 and b4 == 0:
return "UTF-32LE"
elif b1 != 0 and b2 == 0 and b3 != 0 and b4 == 0:
return "UTF-16LE"
elif b1 != 0 and b2 != 0 and b3 != 0 and b4 != 0:
return "UTF-8"
else:
raise Exceptions.ContentTypeError("Unknown encoding!")
elif len(json_bytes) > 2:
b1, b2 = json_bytes[0], json_bytes[1]
if b1 == 0 and b2 == 0:
return "UTF-32BE"
elif b1 == 0 and b2 != 0:
return "UTF-16BE"
elif b1 != 0 and b2 == 0:
raise Exceptions.ContentTypeError("Json string too short to definitively infer encoding.")
elif b1 != 0 and b2 != 0:
return "UTF-8"
else:
raise Exceptions.ContentTypeError("Unknown encoding!")
raise Exceptions.ContentTypeError("Input string too short to guess encoding!") | python | def determine_json_encoding(json_bytes):
'''
Given the fact that the first 2 characters in json are guaranteed to be ASCII, we can use
these to determine the encoding.
See: http://tools.ietf.org/html/rfc4627#section-3
Copied here:
Since the first two characters of a JSON text will always be ASCII
characters [RFC0020], it is possible to determine whether an octet
stream is UTF-8, UTF-16 (BE or LE), or UTF-32 (BE or LE) by looking
at the pattern of nulls in the first four octets.
00 00 00 xx UTF-32BE
00 xx 00 xx UTF-16BE
xx 00 00 00 UTF-32LE
xx 00 xx 00 UTF-16LE
xx xx xx xx UTF-8
'''
assert isinstance(json_bytes, bytes), "`determine_json_encoding()` can only operate on bytestring inputs"
if len(json_bytes) > 4:
b1, b2, b3, b4 = json_bytes[0], json_bytes[1], json_bytes[2], json_bytes[3]
if b1 == 0 and b2 == 0 and b3 == 0 and b4 != 0:
return "UTF-32BE"
elif b1 == 0 and b2 != 0 and b3 == 0 and b4 != 0:
return "UTF-16BE"
elif b1 != 0 and b2 == 0 and b3 == 0 and b4 == 0:
return "UTF-32LE"
elif b1 != 0 and b2 == 0 and b3 != 0 and b4 == 0:
return "UTF-16LE"
elif b1 != 0 and b2 != 0 and b3 != 0 and b4 != 0:
return "UTF-8"
else:
raise Exceptions.ContentTypeError("Unknown encoding!")
elif len(json_bytes) > 2:
b1, b2 = json_bytes[0], json_bytes[1]
if b1 == 0 and b2 == 0:
return "UTF-32BE"
elif b1 == 0 and b2 != 0:
return "UTF-16BE"
elif b1 != 0 and b2 == 0:
raise Exceptions.ContentTypeError("Json string too short to definitively infer encoding.")
elif b1 != 0 and b2 != 0:
return "UTF-8"
else:
raise Exceptions.ContentTypeError("Unknown encoding!")
raise Exceptions.ContentTypeError("Input string too short to guess encoding!") | ['def', 'determine_json_encoding', '(', 'json_bytes', ')', ':', 'assert', 'isinstance', '(', 'json_bytes', ',', 'bytes', ')', ',', '"`determine_json_encoding()` can only operate on bytestring inputs"', 'if', 'len', '(', 'json_bytes', ')', '>', '4', ':', 'b1', ',', 'b2', ',', 'b3', ',', 'b4', '=', 'json_bytes', '[', '0', ']', ',', 'json_bytes', '[', '1', ']', ',', 'json_bytes', '[', '2', ']', ',', 'json_bytes', '[', '3', ']', 'if', 'b1', '==', '0', 'and', 'b2', '==', '0', 'and', 'b3', '==', '0', 'and', 'b4', '!=', '0', ':', 'return', '"UTF-32BE"', 'elif', 'b1', '==', '0', 'and', 'b2', '!=', '0', 'and', 'b3', '==', '0', 'and', 'b4', '!=', '0', ':', 'return', '"UTF-16BE"', 'elif', 'b1', '!=', '0', 'and', 'b2', '==', '0', 'and', 'b3', '==', '0', 'and', 'b4', '==', '0', ':', 'return', '"UTF-32LE"', 'elif', 'b1', '!=', '0', 'and', 'b2', '==', '0', 'and', 'b3', '!=', '0', 'and', 'b4', '==', '0', ':', 'return', '"UTF-16LE"', 'elif', 'b1', '!=', '0', 'and', 'b2', '!=', '0', 'and', 'b3', '!=', '0', 'and', 'b4', '!=', '0', ':', 'return', '"UTF-8"', 'else', ':', 'raise', 'Exceptions', '.', 'ContentTypeError', '(', '"Unknown encoding!"', ')', 'elif', 'len', '(', 'json_bytes', ')', '>', '2', ':', 'b1', ',', 'b2', '=', 'json_bytes', '[', '0', ']', ',', 'json_bytes', '[', '1', ']', 'if', 'b1', '==', '0', 'and', 'b2', '==', '0', ':', 'return', '"UTF-32BE"', 'elif', 'b1', '==', '0', 'and', 'b2', '!=', '0', ':', 'return', '"UTF-16BE"', 'elif', 'b1', '!=', '0', 'and', 'b2', '==', '0', ':', 'raise', 'Exceptions', '.', 'ContentTypeError', '(', '"Json string too short to definitively infer encoding."', ')', 'elif', 'b1', '!=', '0', 'and', 'b2', '!=', '0', ':', 'return', '"UTF-8"', 'else', ':', 'raise', 'Exceptions', '.', 'ContentTypeError', '(', '"Unknown encoding!"', ')', 'raise', 'Exceptions', '.', 'ContentTypeError', '(', '"Input string too short to guess encoding!"', ')'] | Given the fact that the first 2 characters in json are guaranteed to be ASCII, we can use
these to determine the encoding.
See: http://tools.ietf.org/html/rfc4627#section-3
Copied here:
Since the first two characters of a JSON text will always be ASCII
characters [RFC0020], it is possible to determine whether an octet
stream is UTF-8, UTF-16 (BE or LE), or UTF-32 (BE or LE) by looking
at the pattern of nulls in the first four octets.
00 00 00 xx UTF-32BE
00 xx 00 xx UTF-16BE
xx 00 00 00 UTF-32LE
xx 00 xx 00 UTF-16LE
xx xx xx xx UTF-8 | ['Given', 'the', 'fact', 'that', 'the', 'first', '2', 'characters', 'in', 'json', 'are', 'guaranteed', 'to', 'be', 'ASCII', 'we', 'can', 'use', 'these', 'to', 'determine', 'the', 'encoding', '.', 'See', ':', 'http', ':', '//', 'tools', '.', 'ietf', '.', 'org', '/', 'html', '/', 'rfc4627#section', '-', '3'] | train | https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/utility.py#L28-L77 |
7,235 | frejanordsiek/hdf5storage | hdf5storage/__init__.py | reads | def reads(paths, filename='data.h5', options=None, **keywords):
""" Reads data from an HDF5 file (high level).
High level function to read one or more pieces of data from an HDF5
file located at the paths specified in `paths` into Python
types. Each path is specified as a POSIX style path where the data
to read is located.
There are various options that can be used to influence how the data
is read. They can be passed as an already constructed ``Options``
into `options` or as additional keywords that will be used to make
one by ``options = Options(**keywords)``.
Paths are POSIX style and can either be given directly as ``str`` or
``bytes``, or the separated path can be given as an iterable of
``str`` and ``bytes``. Each part of a separated path is escaped
using ``utilities.escape_path``. Otherwise, the path is assumed to
be already escaped. Escaping is done so that targets with a part
that starts with one or more periods, contain slashes, and/or
contain nulls can be used without causing the wrong Group to be
looked in or the wrong target to be looked at. It essentially allows
one to make a Dataset named ``'..'`` or ``'a/a'`` instead of moving
around in the Dataset hierarchy.
Parameters
----------
paths : iterable of paths
An iterable of paths to read data from. Each must be a POSIX
style path where the directory name is the Group to put it in
and the basename is the name to write it to. The format of
paths is described in the paragraph above.
filename : str, optional
The name of the HDF5 file to read data from.
options : Options, optional
The options to use when reading. Is mutually exclusive with any
additional keyword arguments given (set to ``None`` or don't
provide to use them).
**keywords :
If `options` was not provided or was ``None``, these are used as
arguments to make a ``Options``.
Returns
-------
datas : iterable
An iterable holding the piece of data for each path in `paths`
in the same order.
Raises
------
exceptions.CantReadError
If reading the data can't be done.
See Also
--------
utilities.process_path
utilities.escape_path
read : Reads just a single piece of data
writes
write
Options
utilities.read_data : Low level version.
"""
# Pack the different options into an Options class if an Options was
# not given. By default, the matlab_compatible option is set to
# False. So, if it wasn't passed in the keywords, this needs to be
# added to override the default value (True) for a new Options.
if not isinstance(options, Options):
kw = copy.deepcopy(keywords)
if 'matlab_compatible' not in kw:
kw['matlab_compatible'] = False
options = Options(**kw)
# Process the paths and stuff the group names and target names as
# tuples into toread.
toread = []
for p in paths:
groupname, targetname = utilities.process_path(p)
# Pack them into toread
toread.append((groupname, targetname))
# Open the hdf5 file and start reading the data. This is all wrapped
# in a try block, so that the file can be closed if any errors
# happen (the error is re-raised).
try:
f = None
f = h5py.File(filename, mode='r')
# Read the data item by item
datas = []
for groupname, targetname in toread:
# Check that the containing group is in f and is indeed a
# group. If it isn't an error needs to be thrown.
if groupname not in f \
or not isinstance(f[groupname], h5py.Group):
raise exceptions.CantReadError( \
'Could not find containing Group ' \
+ groupname + '.')
# Hand off everything to the low level reader.
datas.append(utilities.read_data(f, f[groupname],
targetname, options))
except:
raise
finally:
if f is not None:
f.close()
return datas | python | def reads(paths, filename='data.h5', options=None, **keywords):
""" Reads data from an HDF5 file (high level).
High level function to read one or more pieces of data from an HDF5
file located at the paths specified in `paths` into Python
types. Each path is specified as a POSIX style path where the data
to read is located.
There are various options that can be used to influence how the data
is read. They can be passed as an already constructed ``Options``
into `options` or as additional keywords that will be used to make
one by ``options = Options(**keywords)``.
Paths are POSIX style and can either be given directly as ``str`` or
``bytes``, or the separated path can be given as an iterable of
``str`` and ``bytes``. Each part of a separated path is escaped
using ``utilities.escape_path``. Otherwise, the path is assumed to
be already escaped. Escaping is done so that targets with a part
that starts with one or more periods, contain slashes, and/or
contain nulls can be used without causing the wrong Group to be
looked in or the wrong target to be looked at. It essentially allows
one to make a Dataset named ``'..'`` or ``'a/a'`` instead of moving
around in the Dataset hierarchy.
Parameters
----------
paths : iterable of paths
An iterable of paths to read data from. Each must be a POSIX
style path where the directory name is the Group to put it in
and the basename is the name to write it to. The format of
paths is described in the paragraph above.
filename : str, optional
The name of the HDF5 file to read data from.
options : Options, optional
The options to use when reading. Is mutually exclusive with any
additional keyword arguments given (set to ``None`` or don't
provide to use them).
**keywords :
If `options` was not provided or was ``None``, these are used as
arguments to make a ``Options``.
Returns
-------
datas : iterable
An iterable holding the piece of data for each path in `paths`
in the same order.
Raises
------
exceptions.CantReadError
If reading the data can't be done.
See Also
--------
utilities.process_path
utilities.escape_path
read : Reads just a single piece of data
writes
write
Options
utilities.read_data : Low level version.
"""
# Pack the different options into an Options class if an Options was
# not given. By default, the matlab_compatible option is set to
# False. So, if it wasn't passed in the keywords, this needs to be
# added to override the default value (True) for a new Options.
if not isinstance(options, Options):
kw = copy.deepcopy(keywords)
if 'matlab_compatible' not in kw:
kw['matlab_compatible'] = False
options = Options(**kw)
# Process the paths and stuff the group names and target names as
# tuples into toread.
toread = []
for p in paths:
groupname, targetname = utilities.process_path(p)
# Pack them into toread
toread.append((groupname, targetname))
# Open the hdf5 file and start reading the data. This is all wrapped
# in a try block, so that the file can be closed if any errors
# happen (the error is re-raised).
try:
f = None
f = h5py.File(filename, mode='r')
# Read the data item by item
datas = []
for groupname, targetname in toread:
# Check that the containing group is in f and is indeed a
# group. If it isn't an error needs to be thrown.
if groupname not in f \
or not isinstance(f[groupname], h5py.Group):
raise exceptions.CantReadError( \
'Could not find containing Group ' \
+ groupname + '.')
# Hand off everything to the low level reader.
datas.append(utilities.read_data(f, f[groupname],
targetname, options))
except:
raise
finally:
if f is not None:
f.close()
return datas | ['def', 'reads', '(', 'paths', ',', 'filename', '=', "'data.h5'", ',', 'options', '=', 'None', ',', '*', '*', 'keywords', ')', ':', '# Pack the different options into an Options class if an Options was', '# not given. By default, the matlab_compatible option is set to', "# False. So, if it wasn't passed in the keywords, this needs to be", '# added to override the default value (True) for a new Options.', 'if', 'not', 'isinstance', '(', 'options', ',', 'Options', ')', ':', 'kw', '=', 'copy', '.', 'deepcopy', '(', 'keywords', ')', 'if', "'matlab_compatible'", 'not', 'in', 'kw', ':', 'kw', '[', "'matlab_compatible'", ']', '=', 'False', 'options', '=', 'Options', '(', '*', '*', 'kw', ')', '# Process the paths and stuff the group names and target names as', '# tuples into toread.', 'toread', '=', '[', ']', 'for', 'p', 'in', 'paths', ':', 'groupname', ',', 'targetname', '=', 'utilities', '.', 'process_path', '(', 'p', ')', '# Pack them into toread', 'toread', '.', 'append', '(', '(', 'groupname', ',', 'targetname', ')', ')', '# Open the hdf5 file and start reading the data. This is all wrapped', '# in a try block, so that the file can be closed if any errors', '# happen (the error is re-raised).', 'try', ':', 'f', '=', 'None', 'f', '=', 'h5py', '.', 'File', '(', 'filename', ',', 'mode', '=', "'r'", ')', '# Read the data item by item', 'datas', '=', '[', ']', 'for', 'groupname', ',', 'targetname', 'in', 'toread', ':', '# Check that the containing group is in f and is indeed a', "# group. If it isn't an error needs to be thrown.", 'if', 'groupname', 'not', 'in', 'f', 'or', 'not', 'isinstance', '(', 'f', '[', 'groupname', ']', ',', 'h5py', '.', 'Group', ')', ':', 'raise', 'exceptions', '.', 'CantReadError', '(', "'Could not find containing Group '", '+', 'groupname', '+', "'.'", ')', '# Hand off everything to the low level reader.', 'datas', '.', 'append', '(', 'utilities', '.', 'read_data', '(', 'f', ',', 'f', '[', 'groupname', ']', ',', 'targetname', ',', 'options', ')', ')', 'except', ':', 'raise', 'finally', ':', 'if', 'f', 'is', 'not', 'None', ':', 'f', '.', 'close', '(', ')', 'return', 'datas'] | Reads data from an HDF5 file (high level).
High level function to read one or more pieces of data from an HDF5
file located at the paths specified in `paths` into Python
types. Each path is specified as a POSIX style path where the data
to read is located.
There are various options that can be used to influence how the data
is read. They can be passed as an already constructed ``Options``
into `options` or as additional keywords that will be used to make
one by ``options = Options(**keywords)``.
Paths are POSIX style and can either be given directly as ``str`` or
``bytes``, or the separated path can be given as an iterable of
``str`` and ``bytes``. Each part of a separated path is escaped
using ``utilities.escape_path``. Otherwise, the path is assumed to
be already escaped. Escaping is done so that targets with a part
that starts with one or more periods, contain slashes, and/or
contain nulls can be used without causing the wrong Group to be
looked in or the wrong target to be looked at. It essentially allows
one to make a Dataset named ``'..'`` or ``'a/a'`` instead of moving
around in the Dataset hierarchy.
Parameters
----------
paths : iterable of paths
An iterable of paths to read data from. Each must be a POSIX
style path where the directory name is the Group to put it in
and the basename is the name to write it to. The format of
paths is described in the paragraph above.
filename : str, optional
The name of the HDF5 file to read data from.
options : Options, optional
The options to use when reading. Is mutually exclusive with any
additional keyword arguments given (set to ``None`` or don't
provide to use them).
**keywords :
If `options` was not provided or was ``None``, these are used as
arguments to make a ``Options``.
Returns
-------
datas : iterable
An iterable holding the piece of data for each path in `paths`
in the same order.
Raises
------
exceptions.CantReadError
If reading the data can't be done.
See Also
--------
utilities.process_path
utilities.escape_path
read : Reads just a single piece of data
writes
write
Options
utilities.read_data : Low level version. | ['Reads', 'data', 'from', 'an', 'HDF5', 'file', '(', 'high', 'level', ')', '.'] | train | https://github.com/frejanordsiek/hdf5storage/blob/539275141dd3a4efbbbfd9bdb978f3ed59e3f05d/hdf5storage/__init__.py#L1727-L1836 |
7,236 | rdussurget/py-altimetry | altimetry/tools/nctools.py | load_ncVar | def load_ncVar(varName, nc=None, **kwargs):
'''
Loads a variable from the NetCDF file and saves it as a data structure.
:parameter varName: variable name
:keywords kwargs: additional keyword arguments for slicing the dataset. Keywords should be named the name of the dimensions to subsample along and associated value should be a length 2 or 3 tuple (min,max,<step>).
.. note: slices are provided in this interval : [min,max] (ie. including both extremities)
'''
if (nc is None) : raise Exception('No Netcdf file passed')
var = nc.variables[varName]
var.set_auto_maskandscale(False)
#Load dimensions
varDim = [str(dim) for dim in var.dimensions]
missDim=len(varDim) == 0
if (missDim):
warn('No dimension found - creating it')
sh=var[:].shape
varDimval = sh
varDim = ['dim_%02i' % (i+1) for i in xrange(len(varDimval))]
else : varDimval = [len(nc.dimensions[dimname]) for dimname in varDim]
#Load Attributes
attrStr=var.__dict__
ind_list = [] #Init index list
dims = OrderedDict({'_ndims':0}) #Init dimensions
dstr=[]
shape=()
#Construct index list
#looping on variable dimension list
for vid,vn in enumerate(varDim) :
#No indexation on current dimension
if not kwargs.has_key(vn) :
dstr=np.append(dstr,':')
sz=np.long(varDimval[vid])
# ind_list.append(range(varDimval[vid])) # if no restriction in kargs then equivalent to [:]
# dims.update({vn:varDimval[vid]})
#Data is indexed along current dimension
else :
drange=kwargs[vn]
if len(drange) == 2 : drange = drange + (1,)
if nc.variables.has_key(vn) : #Check if current dimension exists
dumvar = nc.variables[vn][:]
else :
dumvar = np.arange(len(nc.dimensions[vn]))
if vn.startswith('lon') : dumvar=recale(dumvar,degrees=True)
fg=(dumvar >= drange[0]) & (dumvar <= drange[1])
if fg.sum() == 0 :
#retry switrhcing lon/lat
dumvar=recale(dumvar,degrees=True)
drange=tuple(recale(drange,degrees=True))#.astype(np.long))
fg=(dumvar >= drange[0]) & (dumvar <= drange[1])
if fg.sum() == 0 :
raise IndexError('{0} {1} is not matching given dimensions {2}'.format(vn,(np.nanmin(nc.variables[vn][:]),np.nanmax(nc.variables[vn][:])),drange))
if len(fg) == 1 :
dstr=np.append(dstr,':')
sz=1L
elif len(fg) == 0:
sz=0L
else :
dumind=np.arange(varDimval[vid],dtype=long).compress(fg)
bg=dumind[0]
en=dumind[-1]+1
st=long(drange[2])
dstr=np.append(dstr,'{0}:{1}:{2}'.format(bg,en,st))
sz = np.long(np.mod(np.float(en-bg-1)/st,np.float(en-bg)) + 1.)
dims.update({vn:sz})
shape = shape + (sz,)
# if isinstance(dumind, np.ndarray) : dumind = dumind.tolist() #Rq: tolist() can take a very long time to run on large arrays
# if type(dumind) is not list : dumind = [dumind]
# ind_list.append(dumind)
# dims.update({vn:len(dumind)})
# #check index list
# sz = [np.size(i) for i in ind_list]
dstr=','.join(dstr) #invert dimension list for numpy
# dstr=','.join(dstr[::-1]) #invert dimension list for numpy
if missDim : cmd = 'varOut = var[:]'
else : cmd = 'varOut = var[{0}]'.format(dstr)
exec(cmd)
#find empty variables
# if not (atools.where_list([0], shape)[0] == -1) : varOut = var[[0]][[]]
# else : varOut = var[ind_list]
#Mask it!
if var.__dict__.has_key('_FillValue') :
fill_value=var._FillValue
mask = varOut == var._FillValue
elif var.__dict__.has_key('missing_value') :
fill_value=var._FillValue
mask = varOut == var._FillValue
else :
fill_value = None
mask = np.zeros(varOut.shape, dtype='bool')
#Scale it
#note : we do not use the *= or += operators to force casting to scaling attribute types
if var.__dict__.has_key('scale') : varOut = varOut * var.scale
elif var.__dict__.has_key('scale_factor') : varOut = varOut * var.scale_factor
if var.__dict__.has_key('add_offset') : varOut = varOut + var.add_offset
#Set masks properly
if isinstance(varOut, np.ndarray) : varOut = np.ma.masked_array(varOut, mask=mask,dtype=varOut.dtype,fill_value=fill_value)
elif isinstance(varOut, np.ma.masked_array) : var.mask = mask
elif np.isscalar(varOut) : varOut = np.ma.masked_array([varOut], mask=mask,dtype=varOut.dtype,fill_value=fill_value) #Case of a scalar: cast to array and force having a shape
else :
try: varOut = np.ma.masked_array(np.array(varOut), mask=np.array(mask),dtype=varOut.dtype,fill_value=fill_value)
except: raise Exception('This data type (%s) has not been defined - code it!' % type(varOut))
#Update masked data properly
varOut.data[varOut.mask]=varOut.fill_value
#Switch dimensions
if not missDim : varOut=np.transpose(varOut,tuple(range(len(dims.keys()[1:]))[::-1]))
#Build up output structure
dims.update({'_ndims':len(dims.keys()[1:])})
outStr = {'_dimensions':dims, 'data':varOut}
#Add variable attributes
for A in var.__dict__.keys():
outStr[A]=var.getncattr(A)
return outStr | python | def load_ncVar(varName, nc=None, **kwargs):
'''
Loads a variable from the NetCDF file and saves it as a data structure.
:parameter varName: variable name
:keywords kwargs: additional keyword arguments for slicing the dataset. Keywords should be named the name of the dimensions to subsample along and associated value should be a length 2 or 3 tuple (min,max,<step>).
.. note: slices are provided in this interval : [min,max] (ie. including both extremities)
'''
if (nc is None) : raise Exception('No Netcdf file passed')
var = nc.variables[varName]
var.set_auto_maskandscale(False)
#Load dimensions
varDim = [str(dim) for dim in var.dimensions]
missDim=len(varDim) == 0
if (missDim):
warn('No dimension found - creating it')
sh=var[:].shape
varDimval = sh
varDim = ['dim_%02i' % (i+1) for i in xrange(len(varDimval))]
else : varDimval = [len(nc.dimensions[dimname]) for dimname in varDim]
#Load Attributes
attrStr=var.__dict__
ind_list = [] #Init index list
dims = OrderedDict({'_ndims':0}) #Init dimensions
dstr=[]
shape=()
#Construct index list
#looping on variable dimension list
for vid,vn in enumerate(varDim) :
#No indexation on current dimension
if not kwargs.has_key(vn) :
dstr=np.append(dstr,':')
sz=np.long(varDimval[vid])
# ind_list.append(range(varDimval[vid])) # if no restriction in kargs then equivalent to [:]
# dims.update({vn:varDimval[vid]})
#Data is indexed along current dimension
else :
drange=kwargs[vn]
if len(drange) == 2 : drange = drange + (1,)
if nc.variables.has_key(vn) : #Check if current dimension exists
dumvar = nc.variables[vn][:]
else :
dumvar = np.arange(len(nc.dimensions[vn]))
if vn.startswith('lon') : dumvar=recale(dumvar,degrees=True)
fg=(dumvar >= drange[0]) & (dumvar <= drange[1])
if fg.sum() == 0 :
#retry switrhcing lon/lat
dumvar=recale(dumvar,degrees=True)
drange=tuple(recale(drange,degrees=True))#.astype(np.long))
fg=(dumvar >= drange[0]) & (dumvar <= drange[1])
if fg.sum() == 0 :
raise IndexError('{0} {1} is not matching given dimensions {2}'.format(vn,(np.nanmin(nc.variables[vn][:]),np.nanmax(nc.variables[vn][:])),drange))
if len(fg) == 1 :
dstr=np.append(dstr,':')
sz=1L
elif len(fg) == 0:
sz=0L
else :
dumind=np.arange(varDimval[vid],dtype=long).compress(fg)
bg=dumind[0]
en=dumind[-1]+1
st=long(drange[2])
dstr=np.append(dstr,'{0}:{1}:{2}'.format(bg,en,st))
sz = np.long(np.mod(np.float(en-bg-1)/st,np.float(en-bg)) + 1.)
dims.update({vn:sz})
shape = shape + (sz,)
# if isinstance(dumind, np.ndarray) : dumind = dumind.tolist() #Rq: tolist() can take a very long time to run on large arrays
# if type(dumind) is not list : dumind = [dumind]
# ind_list.append(dumind)
# dims.update({vn:len(dumind)})
# #check index list
# sz = [np.size(i) for i in ind_list]
dstr=','.join(dstr) #invert dimension list for numpy
# dstr=','.join(dstr[::-1]) #invert dimension list for numpy
if missDim : cmd = 'varOut = var[:]'
else : cmd = 'varOut = var[{0}]'.format(dstr)
exec(cmd)
#find empty variables
# if not (atools.where_list([0], shape)[0] == -1) : varOut = var[[0]][[]]
# else : varOut = var[ind_list]
#Mask it!
if var.__dict__.has_key('_FillValue') :
fill_value=var._FillValue
mask = varOut == var._FillValue
elif var.__dict__.has_key('missing_value') :
fill_value=var._FillValue
mask = varOut == var._FillValue
else :
fill_value = None
mask = np.zeros(varOut.shape, dtype='bool')
#Scale it
#note : we do not use the *= or += operators to force casting to scaling attribute types
if var.__dict__.has_key('scale') : varOut = varOut * var.scale
elif var.__dict__.has_key('scale_factor') : varOut = varOut * var.scale_factor
if var.__dict__.has_key('add_offset') : varOut = varOut + var.add_offset
#Set masks properly
if isinstance(varOut, np.ndarray) : varOut = np.ma.masked_array(varOut, mask=mask,dtype=varOut.dtype,fill_value=fill_value)
elif isinstance(varOut, np.ma.masked_array) : var.mask = mask
elif np.isscalar(varOut) : varOut = np.ma.masked_array([varOut], mask=mask,dtype=varOut.dtype,fill_value=fill_value) #Case of a scalar: cast to array and force having a shape
else :
try: varOut = np.ma.masked_array(np.array(varOut), mask=np.array(mask),dtype=varOut.dtype,fill_value=fill_value)
except: raise Exception('This data type (%s) has not been defined - code it!' % type(varOut))
#Update masked data properly
varOut.data[varOut.mask]=varOut.fill_value
#Switch dimensions
if not missDim : varOut=np.transpose(varOut,tuple(range(len(dims.keys()[1:]))[::-1]))
#Build up output structure
dims.update({'_ndims':len(dims.keys()[1:])})
outStr = {'_dimensions':dims, 'data':varOut}
#Add variable attributes
for A in var.__dict__.keys():
outStr[A]=var.getncattr(A)
return outStr | ['def', 'load_ncVar', '(', 'varName', ',', 'nc', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'if', '(', 'nc', 'is', 'None', ')', ':', 'raise', 'Exception', '(', "'No Netcdf file passed'", ')', 'var', '=', 'nc', '.', 'variables', '[', 'varName', ']', 'var', '.', 'set_auto_maskandscale', '(', 'False', ')', '#Load dimensions\r', 'varDim', '=', '[', 'str', '(', 'dim', ')', 'for', 'dim', 'in', 'var', '.', 'dimensions', ']', 'missDim', '=', 'len', '(', 'varDim', ')', '==', '0', 'if', '(', 'missDim', ')', ':', 'warn', '(', "'No dimension found - creating it'", ')', 'sh', '=', 'var', '[', ':', ']', '.', 'shape', 'varDimval', '=', 'sh', 'varDim', '=', '[', "'dim_%02i'", '%', '(', 'i', '+', '1', ')', 'for', 'i', 'in', 'xrange', '(', 'len', '(', 'varDimval', ')', ')', ']', 'else', ':', 'varDimval', '=', '[', 'len', '(', 'nc', '.', 'dimensions', '[', 'dimname', ']', ')', 'for', 'dimname', 'in', 'varDim', ']', '#Load Attributes\r', 'attrStr', '=', 'var', '.', '__dict__', 'ind_list', '=', '[', ']', '#Init index list\r', 'dims', '=', 'OrderedDict', '(', '{', "'_ndims'", ':', '0', '}', ')', '#Init dimensions\r', 'dstr', '=', '[', ']', 'shape', '=', '(', ')', '#Construct index list\r', '#looping on variable dimension list\r', 'for', 'vid', ',', 'vn', 'in', 'enumerate', '(', 'varDim', ')', ':', '#No indexation on current dimension\r', 'if', 'not', 'kwargs', '.', 'has_key', '(', 'vn', ')', ':', 'dstr', '=', 'np', '.', 'append', '(', 'dstr', ',', "':'", ')', 'sz', '=', 'np', '.', 'long', '(', 'varDimval', '[', 'vid', ']', ')', '# ind_list.append(range(varDimval[vid])) # if no restriction in kargs then equivalent to [:]\r', '# dims.update({vn:varDimval[vid]})\r', '#Data is indexed along current dimension\r', 'else', ':', 'drange', '=', 'kwargs', '[', 'vn', ']', 'if', 'len', '(', 'drange', ')', '==', '2', ':', 'drange', '=', 'drange', '+', '(', '1', ',', ')', 'if', 'nc', '.', 'variables', '.', 'has_key', '(', 'vn', ')', ':', '#Check if current dimension exists\r', 'dumvar', '=', 'nc', '.', 'variables', '[', 'vn', ']', '[', ':', ']', 'else', ':', 'dumvar', '=', 'np', '.', 'arange', '(', 'len', '(', 'nc', '.', 'dimensions', '[', 'vn', ']', ')', ')', 'if', 'vn', '.', 'startswith', '(', "'lon'", ')', ':', 'dumvar', '=', 'recale', '(', 'dumvar', ',', 'degrees', '=', 'True', ')', 'fg', '=', '(', 'dumvar', '>=', 'drange', '[', '0', ']', ')', '&', '(', 'dumvar', '<=', 'drange', '[', '1', ']', ')', 'if', 'fg', '.', 'sum', '(', ')', '==', '0', ':', '#retry switrhcing lon/lat\r', 'dumvar', '=', 'recale', '(', 'dumvar', ',', 'degrees', '=', 'True', ')', 'drange', '=', 'tuple', '(', 'recale', '(', 'drange', ',', 'degrees', '=', 'True', ')', ')', '#.astype(np.long))\r', 'fg', '=', '(', 'dumvar', '>=', 'drange', '[', '0', ']', ')', '&', '(', 'dumvar', '<=', 'drange', '[', '1', ']', ')', 'if', 'fg', '.', 'sum', '(', ')', '==', '0', ':', 'raise', 'IndexError', '(', "'{0} {1} is not matching given dimensions {2}'", '.', 'format', '(', 'vn', ',', '(', 'np', '.', 'nanmin', '(', 'nc', '.', 'variables', '[', 'vn', ']', '[', ':', ']', ')', ',', 'np', '.', 'nanmax', '(', 'nc', '.', 'variables', '[', 'vn', ']', '[', ':', ']', ')', ')', ',', 'drange', ')', ')', 'if', 'len', '(', 'fg', ')', '==', '1', ':', 'dstr', '=', 'np', '.', 'append', '(', 'dstr', ',', "':'", ')', 'sz', '=', '1L', 'elif', 'len', '(', 'fg', ')', '==', '0', ':', 'sz', '=', '0L', 'else', ':', 'dumind', '=', 'np', '.', 'arange', '(', 'varDimval', '[', 'vid', ']', ',', 'dtype', '=', 'long', ')', '.', 'compress', '(', 'fg', ')', 'bg', '=', 'dumind', '[', '0', ']', 'en', '=', 'dumind', '[', '-', '1', ']', '+', '1', 'st', '=', 'long', '(', 'drange', '[', '2', ']', ')', 'dstr', '=', 'np', '.', 'append', '(', 'dstr', ',', "'{0}:{1}:{2}'", '.', 'format', '(', 'bg', ',', 'en', ',', 'st', ')', ')', 'sz', '=', 'np', '.', 'long', '(', 'np', '.', 'mod', '(', 'np', '.', 'float', '(', 'en', '-', 'bg', '-', '1', ')', '/', 'st', ',', 'np', '.', 'float', '(', 'en', '-', 'bg', ')', ')', '+', '1.', ')', 'dims', '.', 'update', '(', '{', 'vn', ':', 'sz', '}', ')', 'shape', '=', 'shape', '+', '(', 'sz', ',', ')', '# if isinstance(dumind, np.ndarray) : dumind = dumind.tolist() #Rq: tolist() can take a very long time to run on large arrays\r', '# if type(dumind) is not list : dumind = [dumind] \r', '# ind_list.append(dumind)\r', '# dims.update({vn:len(dumind)})\r', '# #check index list\r', '# sz = [np.size(i) for i in ind_list]\r', 'dstr', '=', "','", '.', 'join', '(', 'dstr', ')', '#invert dimension list for numpy\r', "# dstr=','.join(dstr[::-1]) #invert dimension list for numpy\r", 'if', 'missDim', ':', 'cmd', '=', "'varOut = var[:]'", 'else', ':', 'cmd', '=', "'varOut = var[{0}]'", '.', 'format', '(', 'dstr', ')', 'exec', '(', 'cmd', ')', '#find empty variables\r', '# if not (atools.where_list([0], shape)[0] == -1) : varOut = var[[0]][[]]\r', '# else : varOut = var[ind_list]\r', '#Mask it!\r', 'if', 'var', '.', '__dict__', '.', 'has_key', '(', "'_FillValue'", ')', ':', 'fill_value', '=', 'var', '.', '_FillValue', 'mask', '=', 'varOut', '==', 'var', '.', '_FillValue', 'elif', 'var', '.', '__dict__', '.', 'has_key', '(', "'missing_value'", ')', ':', 'fill_value', '=', 'var', '.', '_FillValue', 'mask', '=', 'varOut', '==', 'var', '.', '_FillValue', 'else', ':', 'fill_value', '=', 'None', 'mask', '=', 'np', '.', 'zeros', '(', 'varOut', '.', 'shape', ',', 'dtype', '=', "'bool'", ')', '#Scale it\r', '#note : we do not use the *= or += operators to force casting to scaling attribute types\r', 'if', 'var', '.', '__dict__', '.', 'has_key', '(', "'scale'", ')', ':', 'varOut', '=', 'varOut', '*', 'var', '.', 'scale', 'elif', 'var', '.', '__dict__', '.', 'has_key', '(', "'scale_factor'", ')', ':', 'varOut', '=', 'varOut', '*', 'var', '.', 'scale_factor', 'if', 'var', '.', '__dict__', '.', 'has_key', '(', "'add_offset'", ')', ':', 'varOut', '=', 'varOut', '+', 'var', '.', 'add_offset', '#Set masks properly\r', 'if', 'isinstance', '(', 'varOut', ',', 'np', '.', 'ndarray', ')', ':', 'varOut', '=', 'np', '.', 'ma', '.', 'masked_array', '(', 'varOut', ',', 'mask', '=', 'mask', ',', 'dtype', '=', 'varOut', '.', 'dtype', ',', 'fill_value', '=', 'fill_value', ')', 'elif', 'isinstance', '(', 'varOut', ',', 'np', '.', 'ma', '.', 'masked_array', ')', ':', 'var', '.', 'mask', '=', 'mask', 'elif', 'np', '.', 'isscalar', '(', 'varOut', ')', ':', 'varOut', '=', 'np', '.', 'ma', '.', 'masked_array', '(', '[', 'varOut', ']', ',', 'mask', '=', 'mask', ',', 'dtype', '=', 'varOut', '.', 'dtype', ',', 'fill_value', '=', 'fill_value', ')', '#Case of a scalar: cast to array and force having a shape\r', 'else', ':', 'try', ':', 'varOut', '=', 'np', '.', 'ma', '.', 'masked_array', '(', 'np', '.', 'array', '(', 'varOut', ')', ',', 'mask', '=', 'np', '.', 'array', '(', 'mask', ')', ',', 'dtype', '=', 'varOut', '.', 'dtype', ',', 'fill_value', '=', 'fill_value', ')', 'except', ':', 'raise', 'Exception', '(', "'This data type (%s) has not been defined - code it!'", '%', 'type', '(', 'varOut', ')', ')', '#Update masked data properly\r', 'varOut', '.', 'data', '[', 'varOut', '.', 'mask', ']', '=', 'varOut', '.', 'fill_value', '#Switch dimensions \r', 'if', 'not', 'missDim', ':', 'varOut', '=', 'np', '.', 'transpose', '(', 'varOut', ',', 'tuple', '(', 'range', '(', 'len', '(', 'dims', '.', 'keys', '(', ')', '[', '1', ':', ']', ')', ')', '[', ':', ':', '-', '1', ']', ')', ')', '#Build up output structure\r', 'dims', '.', 'update', '(', '{', "'_ndims'", ':', 'len', '(', 'dims', '.', 'keys', '(', ')', '[', '1', ':', ']', ')', '}', ')', 'outStr', '=', '{', "'_dimensions'", ':', 'dims', ',', "'data'", ':', 'varOut', '}', '#Add variable attributes\r', 'for', 'A', 'in', 'var', '.', '__dict__', '.', 'keys', '(', ')', ':', 'outStr', '[', 'A', ']', '=', 'var', '.', 'getncattr', '(', 'A', ')', 'return', 'outStr'] | Loads a variable from the NetCDF file and saves it as a data structure.
:parameter varName: variable name
:keywords kwargs: additional keyword arguments for slicing the dataset. Keywords should be named the name of the dimensions to subsample along and associated value should be a length 2 or 3 tuple (min,max,<step>).
.. note: slices are provided in this interval : [min,max] (ie. including both extremities) | ['Loads', 'a', 'variable', 'from', 'the', 'NetCDF', 'file', 'and', 'saves', 'it', 'as', 'a', 'data', 'structure', '.', ':', 'parameter', 'varName', ':', 'variable', 'name', ':', 'keywords', 'kwargs', ':', 'additional', 'keyword', 'arguments', 'for', 'slicing', 'the', 'dataset', '.', 'Keywords', 'should', 'be', 'named', 'the', 'name', 'of', 'the', 'dimensions', 'to', 'subsample', 'along', 'and', 'associated', 'value', 'should', 'be', 'a', 'length', '2', 'or', '3', 'tuple', '(', 'min', 'max', '<step', '>', ')', '.', '..', 'note', ':', 'slices', 'are', 'provided', 'in', 'this', 'interval', ':', '[', 'min', 'max', ']', '(', 'ie', '.', 'including', 'both', 'extremities', ')'] | train | https://github.com/rdussurget/py-altimetry/blob/57ce7f2d63c6bbc4993821af0bbe46929e3a2d98/altimetry/tools/nctools.py#L1134-L1270 |
7,237 | astroswego/plotypus | src/plotypus/lightcurve.py | make_predictor | def make_predictor(regressor=LassoLarsIC(fit_intercept=False),
Selector=GridSearchCV, fourier_degree=(2, 25),
selector_processes=1,
use_baart=False, scoring='r2', scoring_cv=3,
**kwargs):
"""make_predictor(regressor=LassoLarsIC(fit_intercept=False), Selector=GridSearchCV, fourier_degree=(2, 25), selector_processes=1, use_baart=False, scoring='r2', scoring_cv=3, **kwargs)
Makes a predictor object for use in :func:`get_lightcurve`.
**Parameters**
regressor : object with "fit" and "transform" methods, optional
Regression object used for solving Fourier matrix
(default ``sklearn.linear_model.LassoLarsIC(fit_intercept=False)``).
Selector : class with "fit" and "predict" methods, optional
Model selection class used for finding the best fit
(default :class:`sklearn.grid_search.GridSearchCV`).
selector_processes : positive integer, optional
Number of processes to use for *Selector* (default 1).
use_baart : boolean, optional
If True, ignores *Selector* and uses Baart's Criteria to find
the Fourier degree, within the boundaries (default False).
fourier_degree : 2-tuple, optional
Tuple containing lower and upper bounds on Fourier degree, in that
order (default (2, 25)).
scoring : str, optional
Scoring method to use for *Selector*. This parameter can be:
* "r2", in which case use :math:`R^2` (the default)
* "mse", in which case use mean square error
scoring_cv : positive integer, optional
Number of cross validation folds used in scoring (default 3).
**Returns**
out : object with "fit" and "predict" methods
The created predictor object.
"""
fourier = Fourier(degree_range=fourier_degree, regressor=regressor) \
if use_baart else Fourier()
pipeline = Pipeline([('Fourier', fourier), ('Regressor', regressor)])
if use_baart:
return pipeline
else:
params = {'Fourier__degree': list(range(fourier_degree[0],
fourier_degree[1]+1))}
return Selector(pipeline, params, scoring=scoring, cv=scoring_cv,
n_jobs=selector_processes) | python | def make_predictor(regressor=LassoLarsIC(fit_intercept=False),
Selector=GridSearchCV, fourier_degree=(2, 25),
selector_processes=1,
use_baart=False, scoring='r2', scoring_cv=3,
**kwargs):
"""make_predictor(regressor=LassoLarsIC(fit_intercept=False), Selector=GridSearchCV, fourier_degree=(2, 25), selector_processes=1, use_baart=False, scoring='r2', scoring_cv=3, **kwargs)
Makes a predictor object for use in :func:`get_lightcurve`.
**Parameters**
regressor : object with "fit" and "transform" methods, optional
Regression object used for solving Fourier matrix
(default ``sklearn.linear_model.LassoLarsIC(fit_intercept=False)``).
Selector : class with "fit" and "predict" methods, optional
Model selection class used for finding the best fit
(default :class:`sklearn.grid_search.GridSearchCV`).
selector_processes : positive integer, optional
Number of processes to use for *Selector* (default 1).
use_baart : boolean, optional
If True, ignores *Selector* and uses Baart's Criteria to find
the Fourier degree, within the boundaries (default False).
fourier_degree : 2-tuple, optional
Tuple containing lower and upper bounds on Fourier degree, in that
order (default (2, 25)).
scoring : str, optional
Scoring method to use for *Selector*. This parameter can be:
* "r2", in which case use :math:`R^2` (the default)
* "mse", in which case use mean square error
scoring_cv : positive integer, optional
Number of cross validation folds used in scoring (default 3).
**Returns**
out : object with "fit" and "predict" methods
The created predictor object.
"""
fourier = Fourier(degree_range=fourier_degree, regressor=regressor) \
if use_baart else Fourier()
pipeline = Pipeline([('Fourier', fourier), ('Regressor', regressor)])
if use_baart:
return pipeline
else:
params = {'Fourier__degree': list(range(fourier_degree[0],
fourier_degree[1]+1))}
return Selector(pipeline, params, scoring=scoring, cv=scoring_cv,
n_jobs=selector_processes) | ['def', 'make_predictor', '(', 'regressor', '=', 'LassoLarsIC', '(', 'fit_intercept', '=', 'False', ')', ',', 'Selector', '=', 'GridSearchCV', ',', 'fourier_degree', '=', '(', '2', ',', '25', ')', ',', 'selector_processes', '=', '1', ',', 'use_baart', '=', 'False', ',', 'scoring', '=', "'r2'", ',', 'scoring_cv', '=', '3', ',', '*', '*', 'kwargs', ')', ':', 'fourier', '=', 'Fourier', '(', 'degree_range', '=', 'fourier_degree', ',', 'regressor', '=', 'regressor', ')', 'if', 'use_baart', 'else', 'Fourier', '(', ')', 'pipeline', '=', 'Pipeline', '(', '[', '(', "'Fourier'", ',', 'fourier', ')', ',', '(', "'Regressor'", ',', 'regressor', ')', ']', ')', 'if', 'use_baart', ':', 'return', 'pipeline', 'else', ':', 'params', '=', '{', "'Fourier__degree'", ':', 'list', '(', 'range', '(', 'fourier_degree', '[', '0', ']', ',', 'fourier_degree', '[', '1', ']', '+', '1', ')', ')', '}', 'return', 'Selector', '(', 'pipeline', ',', 'params', ',', 'scoring', '=', 'scoring', ',', 'cv', '=', 'scoring_cv', ',', 'n_jobs', '=', 'selector_processes', ')'] | make_predictor(regressor=LassoLarsIC(fit_intercept=False), Selector=GridSearchCV, fourier_degree=(2, 25), selector_processes=1, use_baart=False, scoring='r2', scoring_cv=3, **kwargs)
Makes a predictor object for use in :func:`get_lightcurve`.
**Parameters**
regressor : object with "fit" and "transform" methods, optional
Regression object used for solving Fourier matrix
(default ``sklearn.linear_model.LassoLarsIC(fit_intercept=False)``).
Selector : class with "fit" and "predict" methods, optional
Model selection class used for finding the best fit
(default :class:`sklearn.grid_search.GridSearchCV`).
selector_processes : positive integer, optional
Number of processes to use for *Selector* (default 1).
use_baart : boolean, optional
If True, ignores *Selector* and uses Baart's Criteria to find
the Fourier degree, within the boundaries (default False).
fourier_degree : 2-tuple, optional
Tuple containing lower and upper bounds on Fourier degree, in that
order (default (2, 25)).
scoring : str, optional
Scoring method to use for *Selector*. This parameter can be:
* "r2", in which case use :math:`R^2` (the default)
* "mse", in which case use mean square error
scoring_cv : positive integer, optional
Number of cross validation folds used in scoring (default 3).
**Returns**
out : object with "fit" and "predict" methods
The created predictor object. | ['make_predictor', '(', 'regressor', '=', 'LassoLarsIC', '(', 'fit_intercept', '=', 'False', ')', 'Selector', '=', 'GridSearchCV', 'fourier_degree', '=', '(', '2', '25', ')', 'selector_processes', '=', '1', 'use_baart', '=', 'False', 'scoring', '=', 'r2', 'scoring_cv', '=', '3', '**', 'kwargs', ')'] | train | https://github.com/astroswego/plotypus/blob/b1162194ca1d4f6c00e79afe3e6fb40f0eaffcb9/src/plotypus/lightcurve.py#L35-L81 |
7,238 | pywbem/pywbem | pywbem/cim_obj.py | CIMInstance.property_list | def property_list(self, property_list):
"""Setter method; for a description see the getter method."""
if property_list is not None:
msg = "The 'property_list' init parameter and attribute of " \
"CIMInstance is deprecated; Set only the desired properties " \
"instead."
if DEBUG_WARNING_ORIGIN:
msg += "\nTraceback:\n" + ''.join(traceback.format_stack())
warnings.warn(msg, DeprecationWarning,
stacklevel=_stacklevel_above_module(__name__))
property_list = [_ensure_unicode(x).lower()
for x in property_list]
# pylint: disable=attribute-defined-outside-init
self._property_list = property_list | python | def property_list(self, property_list):
"""Setter method; for a description see the getter method."""
if property_list is not None:
msg = "The 'property_list' init parameter and attribute of " \
"CIMInstance is deprecated; Set only the desired properties " \
"instead."
if DEBUG_WARNING_ORIGIN:
msg += "\nTraceback:\n" + ''.join(traceback.format_stack())
warnings.warn(msg, DeprecationWarning,
stacklevel=_stacklevel_above_module(__name__))
property_list = [_ensure_unicode(x).lower()
for x in property_list]
# pylint: disable=attribute-defined-outside-init
self._property_list = property_list | ['def', 'property_list', '(', 'self', ',', 'property_list', ')', ':', 'if', 'property_list', 'is', 'not', 'None', ':', 'msg', '=', '"The \'property_list\' init parameter and attribute of "', '"CIMInstance is deprecated; Set only the desired properties "', '"instead."', 'if', 'DEBUG_WARNING_ORIGIN', ':', 'msg', '+=', '"\\nTraceback:\\n"', '+', "''", '.', 'join', '(', 'traceback', '.', 'format_stack', '(', ')', ')', 'warnings', '.', 'warn', '(', 'msg', ',', 'DeprecationWarning', ',', 'stacklevel', '=', '_stacklevel_above_module', '(', '__name__', ')', ')', 'property_list', '=', '[', '_ensure_unicode', '(', 'x', ')', '.', 'lower', '(', ')', 'for', 'x', 'in', 'property_list', ']', '# pylint: disable=attribute-defined-outside-init', 'self', '.', '_property_list', '=', 'property_list'] | Setter method; for a description see the getter method. | ['Setter', 'method', ';', 'for', 'a', 'description', 'see', 'the', 'getter', 'method', '.'] | train | https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/cim_obj.py#L2540-L2554 |
7,239 | internetarchive/brozzler | brozzler/frontier.py | RethinkDbFrontier.enforce_time_limit | def enforce_time_limit(self, site):
'''
Raises `brozzler.ReachedTimeLimit` if appropriate.
'''
if (site.time_limit and site.time_limit > 0
and site.elapsed() > site.time_limit):
self.logger.debug(
"site FINISHED_TIME_LIMIT! time_limit=%s "
"elapsed=%s %s", site.time_limit, site.elapsed(), site)
raise brozzler.ReachedTimeLimit | python | def enforce_time_limit(self, site):
'''
Raises `brozzler.ReachedTimeLimit` if appropriate.
'''
if (site.time_limit and site.time_limit > 0
and site.elapsed() > site.time_limit):
self.logger.debug(
"site FINISHED_TIME_LIMIT! time_limit=%s "
"elapsed=%s %s", site.time_limit, site.elapsed(), site)
raise brozzler.ReachedTimeLimit | ['def', 'enforce_time_limit', '(', 'self', ',', 'site', ')', ':', 'if', '(', 'site', '.', 'time_limit', 'and', 'site', '.', 'time_limit', '>', '0', 'and', 'site', '.', 'elapsed', '(', ')', '>', 'site', '.', 'time_limit', ')', ':', 'self', '.', 'logger', '.', 'debug', '(', '"site FINISHED_TIME_LIMIT! time_limit=%s "', '"elapsed=%s %s"', ',', 'site', '.', 'time_limit', ',', 'site', '.', 'elapsed', '(', ')', ',', 'site', ')', 'raise', 'brozzler', '.', 'ReachedTimeLimit'] | Raises `brozzler.ReachedTimeLimit` if appropriate. | ['Raises', 'brozzler', '.', 'ReachedTimeLimit', 'if', 'appropriate', '.'] | train | https://github.com/internetarchive/brozzler/blob/411b3f266a38b9bb942021c0121ebd8e5ca66447/brozzler/frontier.py#L155-L164 |
7,240 | ubc/ubcpi | ubcpi/answer_pool.py | offer_answer | def offer_answer(pool, answer, rationale, student_id, algo, options):
"""
submit a student answer to the answer pool
The answer maybe selected to stay in the pool depending on the selection algorithm
Args:
pool (dict): answer pool
Answer pool format:
{
option1_index: {
'student_id': { can store algorithm specific info here },
...
}
option2_index: ...
}
answer (int): the option student selected
rationale (str): the rationale text
student_id (str): student identifier
algo (str): the selection algorithm
options (dict): the options available in the question
Raises:
UnknownChooseAnswerAlgorithm: when we don't know the algorithm
"""
if algo['name'] == 'simple':
offer_simple(pool, answer, rationale, student_id, options)
elif algo['name'] == 'random':
offer_random(pool, answer, rationale, student_id, options)
else:
raise UnknownChooseAnswerAlgorithm() | python | def offer_answer(pool, answer, rationale, student_id, algo, options):
"""
submit a student answer to the answer pool
The answer maybe selected to stay in the pool depending on the selection algorithm
Args:
pool (dict): answer pool
Answer pool format:
{
option1_index: {
'student_id': { can store algorithm specific info here },
...
}
option2_index: ...
}
answer (int): the option student selected
rationale (str): the rationale text
student_id (str): student identifier
algo (str): the selection algorithm
options (dict): the options available in the question
Raises:
UnknownChooseAnswerAlgorithm: when we don't know the algorithm
"""
if algo['name'] == 'simple':
offer_simple(pool, answer, rationale, student_id, options)
elif algo['name'] == 'random':
offer_random(pool, answer, rationale, student_id, options)
else:
raise UnknownChooseAnswerAlgorithm() | ['def', 'offer_answer', '(', 'pool', ',', 'answer', ',', 'rationale', ',', 'student_id', ',', 'algo', ',', 'options', ')', ':', 'if', 'algo', '[', "'name'", ']', '==', "'simple'", ':', 'offer_simple', '(', 'pool', ',', 'answer', ',', 'rationale', ',', 'student_id', ',', 'options', ')', 'elif', 'algo', '[', "'name'", ']', '==', "'random'", ':', 'offer_random', '(', 'pool', ',', 'answer', ',', 'rationale', ',', 'student_id', ',', 'options', ')', 'else', ':', 'raise', 'UnknownChooseAnswerAlgorithm', '(', ')'] | submit a student answer to the answer pool
The answer maybe selected to stay in the pool depending on the selection algorithm
Args:
pool (dict): answer pool
Answer pool format:
{
option1_index: {
'student_id': { can store algorithm specific info here },
...
}
option2_index: ...
}
answer (int): the option student selected
rationale (str): the rationale text
student_id (str): student identifier
algo (str): the selection algorithm
options (dict): the options available in the question
Raises:
UnknownChooseAnswerAlgorithm: when we don't know the algorithm | ['submit', 'a', 'student', 'answer', 'to', 'the', 'answer', 'pool'] | train | https://github.com/ubc/ubcpi/blob/7b6de03f93f3a4a8af4b92dfde7c69eeaf21f46e/ubcpi/answer_pool.py#L43-L73 |
7,241 | tensorflow/cleverhans | cleverhans/utils_tf.py | jacobian_augmentation | def jacobian_augmentation(sess,
x,
X_sub_prev,
Y_sub,
grads,
lmbda,
aug_batch_size=512,
feed=None):
"""
Augment an adversary's substitute training set using the Jacobian
of a substitute model to generate new synthetic inputs.
See https://arxiv.org/abs/1602.02697 for more details.
See cleverhans_tutorials/mnist_blackbox.py for example use case
:param sess: TF session in which the substitute model is defined
:param x: input TF placeholder for the substitute model
:param X_sub_prev: substitute training data available to the adversary
at the previous iteration
:param Y_sub: substitute training labels available to the adversary
at the previous iteration
:param grads: Jacobian symbolic graph for the substitute
(should be generated using utils_tf.jacobian_graph)
:return: augmented substitute data (will need to be labeled by oracle)
"""
assert len(x.get_shape()) == len(np.shape(X_sub_prev))
assert len(grads) >= np.max(Y_sub) + 1
assert len(X_sub_prev) == len(Y_sub)
aug_batch_size = min(aug_batch_size, X_sub_prev.shape[0])
# Prepare input_shape (outside loop) for feeding dictionary below
input_shape = list(x.get_shape())
input_shape[0] = 1
# Create new numpy array for adversary training data
# with twice as many components on the first dimension.
X_sub = np.vstack([X_sub_prev, X_sub_prev])
num_samples = X_sub_prev.shape[0]
# Creating and processing as batch
for p_idxs in range(0, num_samples, aug_batch_size):
X_batch = X_sub_prev[p_idxs:p_idxs + aug_batch_size, ...]
feed_dict = {x: X_batch}
if feed is not None:
feed_dict.update(feed)
# Compute sign matrix
grad_val = sess.run([tf.sign(grads)], feed_dict=feed_dict)[0]
# Create new synthetic point in adversary substitute training set
for (indx, ind) in zip(range(p_idxs, p_idxs + X_batch.shape[0]),
range(X_batch.shape[0])):
X_sub[num_samples + indx] = (
X_batch[ind] + lmbda * grad_val[Y_sub[indx], ind, ...])
# Return augmented training data (needs to be labeled afterwards)
return X_sub | python | def jacobian_augmentation(sess,
x,
X_sub_prev,
Y_sub,
grads,
lmbda,
aug_batch_size=512,
feed=None):
"""
Augment an adversary's substitute training set using the Jacobian
of a substitute model to generate new synthetic inputs.
See https://arxiv.org/abs/1602.02697 for more details.
See cleverhans_tutorials/mnist_blackbox.py for example use case
:param sess: TF session in which the substitute model is defined
:param x: input TF placeholder for the substitute model
:param X_sub_prev: substitute training data available to the adversary
at the previous iteration
:param Y_sub: substitute training labels available to the adversary
at the previous iteration
:param grads: Jacobian symbolic graph for the substitute
(should be generated using utils_tf.jacobian_graph)
:return: augmented substitute data (will need to be labeled by oracle)
"""
assert len(x.get_shape()) == len(np.shape(X_sub_prev))
assert len(grads) >= np.max(Y_sub) + 1
assert len(X_sub_prev) == len(Y_sub)
aug_batch_size = min(aug_batch_size, X_sub_prev.shape[0])
# Prepare input_shape (outside loop) for feeding dictionary below
input_shape = list(x.get_shape())
input_shape[0] = 1
# Create new numpy array for adversary training data
# with twice as many components on the first dimension.
X_sub = np.vstack([X_sub_prev, X_sub_prev])
num_samples = X_sub_prev.shape[0]
# Creating and processing as batch
for p_idxs in range(0, num_samples, aug_batch_size):
X_batch = X_sub_prev[p_idxs:p_idxs + aug_batch_size, ...]
feed_dict = {x: X_batch}
if feed is not None:
feed_dict.update(feed)
# Compute sign matrix
grad_val = sess.run([tf.sign(grads)], feed_dict=feed_dict)[0]
# Create new synthetic point in adversary substitute training set
for (indx, ind) in zip(range(p_idxs, p_idxs + X_batch.shape[0]),
range(X_batch.shape[0])):
X_sub[num_samples + indx] = (
X_batch[ind] + lmbda * grad_val[Y_sub[indx], ind, ...])
# Return augmented training data (needs to be labeled afterwards)
return X_sub | ['def', 'jacobian_augmentation', '(', 'sess', ',', 'x', ',', 'X_sub_prev', ',', 'Y_sub', ',', 'grads', ',', 'lmbda', ',', 'aug_batch_size', '=', '512', ',', 'feed', '=', 'None', ')', ':', 'assert', 'len', '(', 'x', '.', 'get_shape', '(', ')', ')', '==', 'len', '(', 'np', '.', 'shape', '(', 'X_sub_prev', ')', ')', 'assert', 'len', '(', 'grads', ')', '>=', 'np', '.', 'max', '(', 'Y_sub', ')', '+', '1', 'assert', 'len', '(', 'X_sub_prev', ')', '==', 'len', '(', 'Y_sub', ')', 'aug_batch_size', '=', 'min', '(', 'aug_batch_size', ',', 'X_sub_prev', '.', 'shape', '[', '0', ']', ')', '# Prepare input_shape (outside loop) for feeding dictionary below', 'input_shape', '=', 'list', '(', 'x', '.', 'get_shape', '(', ')', ')', 'input_shape', '[', '0', ']', '=', '1', '# Create new numpy array for adversary training data', '# with twice as many components on the first dimension.', 'X_sub', '=', 'np', '.', 'vstack', '(', '[', 'X_sub_prev', ',', 'X_sub_prev', ']', ')', 'num_samples', '=', 'X_sub_prev', '.', 'shape', '[', '0', ']', '# Creating and processing as batch', 'for', 'p_idxs', 'in', 'range', '(', '0', ',', 'num_samples', ',', 'aug_batch_size', ')', ':', 'X_batch', '=', 'X_sub_prev', '[', 'p_idxs', ':', 'p_idxs', '+', 'aug_batch_size', ',', '...', ']', 'feed_dict', '=', '{', 'x', ':', 'X_batch', '}', 'if', 'feed', 'is', 'not', 'None', ':', 'feed_dict', '.', 'update', '(', 'feed', ')', '# Compute sign matrix', 'grad_val', '=', 'sess', '.', 'run', '(', '[', 'tf', '.', 'sign', '(', 'grads', ')', ']', ',', 'feed_dict', '=', 'feed_dict', ')', '[', '0', ']', '# Create new synthetic point in adversary substitute training set', 'for', '(', 'indx', ',', 'ind', ')', 'in', 'zip', '(', 'range', '(', 'p_idxs', ',', 'p_idxs', '+', 'X_batch', '.', 'shape', '[', '0', ']', ')', ',', 'range', '(', 'X_batch', '.', 'shape', '[', '0', ']', ')', ')', ':', 'X_sub', '[', 'num_samples', '+', 'indx', ']', '=', '(', 'X_batch', '[', 'ind', ']', '+', 'lmbda', '*', 'grad_val', '[', 'Y_sub', '[', 'indx', ']', ',', 'ind', ',', '...', ']', ')', '# Return augmented training data (needs to be labeled afterwards)', 'return', 'X_sub'] | Augment an adversary's substitute training set using the Jacobian
of a substitute model to generate new synthetic inputs.
See https://arxiv.org/abs/1602.02697 for more details.
See cleverhans_tutorials/mnist_blackbox.py for example use case
:param sess: TF session in which the substitute model is defined
:param x: input TF placeholder for the substitute model
:param X_sub_prev: substitute training data available to the adversary
at the previous iteration
:param Y_sub: substitute training labels available to the adversary
at the previous iteration
:param grads: Jacobian symbolic graph for the substitute
(should be generated using utils_tf.jacobian_graph)
:return: augmented substitute data (will need to be labeled by oracle) | ['Augment', 'an', 'adversary', 's', 'substitute', 'training', 'set', 'using', 'the', 'Jacobian', 'of', 'a', 'substitute', 'model', 'to', 'generate', 'new', 'synthetic', 'inputs', '.', 'See', 'https', ':', '//', 'arxiv', '.', 'org', '/', 'abs', '/', '1602', '.', '02697', 'for', 'more', 'details', '.', 'See', 'cleverhans_tutorials', '/', 'mnist_blackbox', '.', 'py', 'for', 'example', 'use', 'case', ':', 'param', 'sess', ':', 'TF', 'session', 'in', 'which', 'the', 'substitute', 'model', 'is', 'defined', ':', 'param', 'x', ':', 'input', 'TF', 'placeholder', 'for', 'the', 'substitute', 'model', ':', 'param', 'X_sub_prev', ':', 'substitute', 'training', 'data', 'available', 'to', 'the', 'adversary', 'at', 'the', 'previous', 'iteration', ':', 'param', 'Y_sub', ':', 'substitute', 'training', 'labels', 'available', 'to', 'the', 'adversary', 'at', 'the', 'previous', 'iteration', ':', 'param', 'grads', ':', 'Jacobian', 'symbolic', 'graph', 'for', 'the', 'substitute', '(', 'should', 'be', 'generated', 'using', 'utils_tf', '.', 'jacobian_graph', ')', ':', 'return', ':', 'augmented', 'substitute', 'data', '(', 'will', 'need', 'to', 'be', 'labeled', 'by', 'oracle', ')'] | train | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_tf.py#L667-L722 |
7,242 | xolox/python-vcs-repo-mgr | vcs_repo_mgr/backends/bzr.py | BzrRepo.update_context | def update_context(self):
"""
Make sure Bazaar respects the configured author.
This method first calls :func:`.Repository.update_context()` and then
it sets the ``$BZR_EMAIL`` environment variable based on the value of
:attr:`~Repository.author` (but only if :attr:`~Repository.author` was
set by the caller).
This is a workaround for a weird behavior of Bazaar that I've observed
when running under Python 2.6: The ``bzr commit --author`` command line
option is documented but it doesn't prevent Bazaar from nevertheless
reporting the following error::
bzr: ERROR: Unable to determine your name.
Please, set your name with the 'whoami' command.
E.g. bzr whoami "Your Name <[email protected]>"
"""
# Call our superclass.
super(BzrRepo, self).update_context()
# Try to ensure that $BZR_EMAIL is set (see above for the reason)
# but only if the `author' property was set by the caller (more
# specifically there's no point in setting $BZR_EMAIL to the
# output of `bzr whoami').
if self.__dict__.get('author'):
environment = self.context.options.setdefault('environment', {})
environment.setdefault('BZR_EMAIL', self.author.combined) | python | def update_context(self):
"""
Make sure Bazaar respects the configured author.
This method first calls :func:`.Repository.update_context()` and then
it sets the ``$BZR_EMAIL`` environment variable based on the value of
:attr:`~Repository.author` (but only if :attr:`~Repository.author` was
set by the caller).
This is a workaround for a weird behavior of Bazaar that I've observed
when running under Python 2.6: The ``bzr commit --author`` command line
option is documented but it doesn't prevent Bazaar from nevertheless
reporting the following error::
bzr: ERROR: Unable to determine your name.
Please, set your name with the 'whoami' command.
E.g. bzr whoami "Your Name <[email protected]>"
"""
# Call our superclass.
super(BzrRepo, self).update_context()
# Try to ensure that $BZR_EMAIL is set (see above for the reason)
# but only if the `author' property was set by the caller (more
# specifically there's no point in setting $BZR_EMAIL to the
# output of `bzr whoami').
if self.__dict__.get('author'):
environment = self.context.options.setdefault('environment', {})
environment.setdefault('BZR_EMAIL', self.author.combined) | ['def', 'update_context', '(', 'self', ')', ':', '# Call our superclass.', 'super', '(', 'BzrRepo', ',', 'self', ')', '.', 'update_context', '(', ')', '# Try to ensure that $BZR_EMAIL is set (see above for the reason)', "# but only if the `author' property was set by the caller (more", "# specifically there's no point in setting $BZR_EMAIL to the", "# output of `bzr whoami').", 'if', 'self', '.', '__dict__', '.', 'get', '(', "'author'", ')', ':', 'environment', '=', 'self', '.', 'context', '.', 'options', '.', 'setdefault', '(', "'environment'", ',', '{', '}', ')', 'environment', '.', 'setdefault', '(', "'BZR_EMAIL'", ',', 'self', '.', 'author', '.', 'combined', ')'] | Make sure Bazaar respects the configured author.
This method first calls :func:`.Repository.update_context()` and then
it sets the ``$BZR_EMAIL`` environment variable based on the value of
:attr:`~Repository.author` (but only if :attr:`~Repository.author` was
set by the caller).
This is a workaround for a weird behavior of Bazaar that I've observed
when running under Python 2.6: The ``bzr commit --author`` command line
option is documented but it doesn't prevent Bazaar from nevertheless
reporting the following error::
bzr: ERROR: Unable to determine your name.
Please, set your name with the 'whoami' command.
E.g. bzr whoami "Your Name <[email protected]>" | ['Make', 'sure', 'Bazaar', 'respects', 'the', 'configured', 'author', '.'] | train | https://github.com/xolox/python-vcs-repo-mgr/blob/fdad2441a3e7ba5deeeddfa1c2f5ebc00c393aed/vcs_repo_mgr/backends/bzr.py#L272-L298 |
7,243 | skyfielders/python-skyfield | skyfield/positionlib.py | ICRF.galactic_xyz | def galactic_xyz(self):
"""Compute galactic coordinates (x, y, z)"""
vector = _GALACTIC.dot(self.position.au)
return Distance(vector) | python | def galactic_xyz(self):
"""Compute galactic coordinates (x, y, z)"""
vector = _GALACTIC.dot(self.position.au)
return Distance(vector) | ['def', 'galactic_xyz', '(', 'self', ')', ':', 'vector', '=', '_GALACTIC', '.', 'dot', '(', 'self', '.', 'position', '.', 'au', ')', 'return', 'Distance', '(', 'vector', ')'] | Compute galactic coordinates (x, y, z) | ['Compute', 'galactic', 'coordinates', '(', 'x', 'y', 'z', ')'] | train | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/positionlib.py#L245-L248 |
7,244 | novopl/peltak | src/peltak/core/conf.py | load_py_config | def load_py_config(conf_file):
# type: (str) -> None
""" Import configuration from a python file.
This will just import the file into python. Sky is the limit. The file
has to deal with the configuration all by itself (i.e. call conf.init()).
You will also need to add your src directory to sys.paths if it's not the
current working directory. This is done automatically if you use yaml
config as well.
Args:
conf_file (str):
Path to the py module config. This function will not check the file
name or extension and will just crash if the given file does not
exist or is not a valid python file.
"""
if sys.version_info >= (3, 5):
from importlib import util
spec = util.spec_from_file_location('pelconf', conf_file)
mod = util.module_from_spec(spec)
spec.loader.exec_module(mod)
elif sys.version_info >= (3, 3):
from importlib import machinery
loader = machinery.SourceFileLoader('pelconf', conf_file)
_ = loader.load_module()
elif sys.version_info <= (3, 0):
import imp
imp.load_source('pelconf', conf_file) | python | def load_py_config(conf_file):
# type: (str) -> None
""" Import configuration from a python file.
This will just import the file into python. Sky is the limit. The file
has to deal with the configuration all by itself (i.e. call conf.init()).
You will also need to add your src directory to sys.paths if it's not the
current working directory. This is done automatically if you use yaml
config as well.
Args:
conf_file (str):
Path to the py module config. This function will not check the file
name or extension and will just crash if the given file does not
exist or is not a valid python file.
"""
if sys.version_info >= (3, 5):
from importlib import util
spec = util.spec_from_file_location('pelconf', conf_file)
mod = util.module_from_spec(spec)
spec.loader.exec_module(mod)
elif sys.version_info >= (3, 3):
from importlib import machinery
loader = machinery.SourceFileLoader('pelconf', conf_file)
_ = loader.load_module()
elif sys.version_info <= (3, 0):
import imp
imp.load_source('pelconf', conf_file) | ['def', 'load_py_config', '(', 'conf_file', ')', ':', '# type: (str) -> None', 'if', 'sys', '.', 'version_info', '>=', '(', '3', ',', '5', ')', ':', 'from', 'importlib', 'import', 'util', 'spec', '=', 'util', '.', 'spec_from_file_location', '(', "'pelconf'", ',', 'conf_file', ')', 'mod', '=', 'util', '.', 'module_from_spec', '(', 'spec', ')', 'spec', '.', 'loader', '.', 'exec_module', '(', 'mod', ')', 'elif', 'sys', '.', 'version_info', '>=', '(', '3', ',', '3', ')', ':', 'from', 'importlib', 'import', 'machinery', 'loader', '=', 'machinery', '.', 'SourceFileLoader', '(', "'pelconf'", ',', 'conf_file', ')', '_', '=', 'loader', '.', 'load_module', '(', ')', 'elif', 'sys', '.', 'version_info', '<=', '(', '3', ',', '0', ')', ':', 'import', 'imp', 'imp', '.', 'load_source', '(', "'pelconf'", ',', 'conf_file', ')'] | Import configuration from a python file.
This will just import the file into python. Sky is the limit. The file
has to deal with the configuration all by itself (i.e. call conf.init()).
You will also need to add your src directory to sys.paths if it's not the
current working directory. This is done automatically if you use yaml
config as well.
Args:
conf_file (str):
Path to the py module config. This function will not check the file
name or extension and will just crash if the given file does not
exist or is not a valid python file. | ['Import', 'configuration', 'from', 'a', 'python', 'file', '.'] | train | https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/core/conf.py#L140-L171 |
7,245 | csparpa/pyowm | pyowm/agroapi10/enums.py | PaletteEnum.items | def items(cls):
"""
All values for this enum
:return: list of str
"""
return [
cls.GREEN,
cls.BLACK_AND_WHITE,
cls.CONTRAST_SHIFTED,
cls.CONTRAST_CONTINUOUS
] | python | def items(cls):
"""
All values for this enum
:return: list of str
"""
return [
cls.GREEN,
cls.BLACK_AND_WHITE,
cls.CONTRAST_SHIFTED,
cls.CONTRAST_CONTINUOUS
] | ['def', 'items', '(', 'cls', ')', ':', 'return', '[', 'cls', '.', 'GREEN', ',', 'cls', '.', 'BLACK_AND_WHITE', ',', 'cls', '.', 'CONTRAST_SHIFTED', ',', 'cls', '.', 'CONTRAST_CONTINUOUS', ']'] | All values for this enum
:return: list of str | ['All', 'values', 'for', 'this', 'enum', ':', 'return', ':', 'list', 'of', 'str'] | train | https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/agroapi10/enums.py#L61-L72 |
7,246 | adamzap/landslide | landslide/rst.py | html_parts | def html_parts(input_string, source_path=None, destination_path=None,
input_encoding='unicode', doctitle=1, initial_header_level=1):
"""
Given an input string, returns a dictionary of HTML document parts.
Dictionary keys are the names of parts, and values are Unicode strings;
encoding is up to the client.
Parameters:
- `input_string`: A multi-line text string; required.
- `source_path`: Path to the source file or object. Optional, but useful
for diagnostic output (system messages).
- `destination_path`: Path to the file or object which will receive the
output; optional. Used for determining relative paths (stylesheets,
source links, etc.).
- `input_encoding`: The encoding of `input_string`. If it is an encoded
8-bit string, provide the correct encoding. If it is a Unicode string,
use "unicode", the default.
- `doctitle`: Disable the promotion of a lone top-level section title to
document title (and subsequent section title to document subtitle
promotion); enabled by default.
- `initial_header_level`: The initial level for header elements (e.g. 1
for "<h1>").
"""
overrides = {
'input_encoding': input_encoding,
'doctitle_xform': doctitle,
'initial_header_level': initial_header_level,
'report_level': 5
}
parts = core.publish_parts(
source=input_string, source_path=source_path,
destination_path=destination_path,
writer_name='html', settings_overrides=overrides)
return parts | python | def html_parts(input_string, source_path=None, destination_path=None,
input_encoding='unicode', doctitle=1, initial_header_level=1):
"""
Given an input string, returns a dictionary of HTML document parts.
Dictionary keys are the names of parts, and values are Unicode strings;
encoding is up to the client.
Parameters:
- `input_string`: A multi-line text string; required.
- `source_path`: Path to the source file or object. Optional, but useful
for diagnostic output (system messages).
- `destination_path`: Path to the file or object which will receive the
output; optional. Used for determining relative paths (stylesheets,
source links, etc.).
- `input_encoding`: The encoding of `input_string`. If it is an encoded
8-bit string, provide the correct encoding. If it is a Unicode string,
use "unicode", the default.
- `doctitle`: Disable the promotion of a lone top-level section title to
document title (and subsequent section title to document subtitle
promotion); enabled by default.
- `initial_header_level`: The initial level for header elements (e.g. 1
for "<h1>").
"""
overrides = {
'input_encoding': input_encoding,
'doctitle_xform': doctitle,
'initial_header_level': initial_header_level,
'report_level': 5
}
parts = core.publish_parts(
source=input_string, source_path=source_path,
destination_path=destination_path,
writer_name='html', settings_overrides=overrides)
return parts | ['def', 'html_parts', '(', 'input_string', ',', 'source_path', '=', 'None', ',', 'destination_path', '=', 'None', ',', 'input_encoding', '=', "'unicode'", ',', 'doctitle', '=', '1', ',', 'initial_header_level', '=', '1', ')', ':', 'overrides', '=', '{', "'input_encoding'", ':', 'input_encoding', ',', "'doctitle_xform'", ':', 'doctitle', ',', "'initial_header_level'", ':', 'initial_header_level', ',', "'report_level'", ':', '5', '}', 'parts', '=', 'core', '.', 'publish_parts', '(', 'source', '=', 'input_string', ',', 'source_path', '=', 'source_path', ',', 'destination_path', '=', 'destination_path', ',', 'writer_name', '=', "'html'", ',', 'settings_overrides', '=', 'overrides', ')', 'return', 'parts'] | Given an input string, returns a dictionary of HTML document parts.
Dictionary keys are the names of parts, and values are Unicode strings;
encoding is up to the client.
Parameters:
- `input_string`: A multi-line text string; required.
- `source_path`: Path to the source file or object. Optional, but useful
for diagnostic output (system messages).
- `destination_path`: Path to the file or object which will receive the
output; optional. Used for determining relative paths (stylesheets,
source links, etc.).
- `input_encoding`: The encoding of `input_string`. If it is an encoded
8-bit string, provide the correct encoding. If it is a Unicode string,
use "unicode", the default.
- `doctitle`: Disable the promotion of a lone top-level section title to
document title (and subsequent section title to document subtitle
promotion); enabled by default.
- `initial_header_level`: The initial level for header elements (e.g. 1
for "<h1>"). | ['Given', 'an', 'input', 'string', 'returns', 'a', 'dictionary', 'of', 'HTML', 'document', 'parts', '.'] | train | https://github.com/adamzap/landslide/blob/59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832/landslide/rst.py#L43-L79 |
7,247 | jalanb/pysyte | pysyte/bash/git.py | branches | def branches(remotes=False):
"""Return a list of all local branches in the repo
If remotes is true then also include remote branches
Note: the normal '*' indicator for current branch is removed
this method just gives a list of branch names
Use branch() method to determine the current branch
"""
stdout = branch('--list %s' % (remotes and '-a' or ''), quiet=True)
return [_.lstrip('*').strip() for _ in stdout.splitlines()] | python | def branches(remotes=False):
"""Return a list of all local branches in the repo
If remotes is true then also include remote branches
Note: the normal '*' indicator for current branch is removed
this method just gives a list of branch names
Use branch() method to determine the current branch
"""
stdout = branch('--list %s' % (remotes and '-a' or ''), quiet=True)
return [_.lstrip('*').strip() for _ in stdout.splitlines()] | ['def', 'branches', '(', 'remotes', '=', 'False', ')', ':', 'stdout', '=', 'branch', '(', "'--list %s'", '%', '(', 'remotes', 'and', "'-a'", 'or', "''", ')', ',', 'quiet', '=', 'True', ')', 'return', '[', '_', '.', 'lstrip', '(', "'*'", ')', '.', 'strip', '(', ')', 'for', '_', 'in', 'stdout', '.', 'splitlines', '(', ')', ']'] | Return a list of all local branches in the repo
If remotes is true then also include remote branches
Note: the normal '*' indicator for current branch is removed
this method just gives a list of branch names
Use branch() method to determine the current branch | ['Return', 'a', 'list', 'of', 'all', 'local', 'branches', 'in', 'the', 'repo'] | train | https://github.com/jalanb/pysyte/blob/4e278101943d1ceb1a6bcaf6ddc72052ecf13114/pysyte/bash/git.py#L258-L268 |
7,248 | rtfd/sphinx-autoapi | autoapi/mappers/javascript.py | JavaScriptSphinxMapper.create_class | def create_class(self, data, options=None, **kwargs):
"""Return instance of class based on Javascript data
Data keys handled here:
type
Set the object class
consts, types, vars, funcs
Recurse into :py:meth:`create_class` to create child object
instances
:param data: dictionary data from godocjson output
"""
obj_map = dict((cls.type, cls) for cls in ALL_CLASSES)
try:
cls = obj_map[data["kind"]]
except (KeyError, TypeError):
LOGGER.warning("Unknown Type: %s" % data)
else:
# Recurse for children
obj = cls(data, jinja_env=self.jinja_env)
if "children" in data:
for child_data in data["children"]:
for child_obj in self.create_class(child_data, options=options):
obj.children.append(child_obj)
yield obj | python | def create_class(self, data, options=None, **kwargs):
"""Return instance of class based on Javascript data
Data keys handled here:
type
Set the object class
consts, types, vars, funcs
Recurse into :py:meth:`create_class` to create child object
instances
:param data: dictionary data from godocjson output
"""
obj_map = dict((cls.type, cls) for cls in ALL_CLASSES)
try:
cls = obj_map[data["kind"]]
except (KeyError, TypeError):
LOGGER.warning("Unknown Type: %s" % data)
else:
# Recurse for children
obj = cls(data, jinja_env=self.jinja_env)
if "children" in data:
for child_data in data["children"]:
for child_obj in self.create_class(child_data, options=options):
obj.children.append(child_obj)
yield obj | ['def', 'create_class', '(', 'self', ',', 'data', ',', 'options', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'obj_map', '=', 'dict', '(', '(', 'cls', '.', 'type', ',', 'cls', ')', 'for', 'cls', 'in', 'ALL_CLASSES', ')', 'try', ':', 'cls', '=', 'obj_map', '[', 'data', '[', '"kind"', ']', ']', 'except', '(', 'KeyError', ',', 'TypeError', ')', ':', 'LOGGER', '.', 'warning', '(', '"Unknown Type: %s"', '%', 'data', ')', 'else', ':', '# Recurse for children', 'obj', '=', 'cls', '(', 'data', ',', 'jinja_env', '=', 'self', '.', 'jinja_env', ')', 'if', '"children"', 'in', 'data', ':', 'for', 'child_data', 'in', 'data', '[', '"children"', ']', ':', 'for', 'child_obj', 'in', 'self', '.', 'create_class', '(', 'child_data', ',', 'options', '=', 'options', ')', ':', 'obj', '.', 'children', '.', 'append', '(', 'child_obj', ')', 'yield', 'obj'] | Return instance of class based on Javascript data
Data keys handled here:
type
Set the object class
consts, types, vars, funcs
Recurse into :py:meth:`create_class` to create child object
instances
:param data: dictionary data from godocjson output | ['Return', 'instance', 'of', 'class', 'based', 'on', 'Javascript', 'data'] | train | https://github.com/rtfd/sphinx-autoapi/blob/9735f43a8d9ff4620c7bcbd177fd1bb7608052e9/autoapi/mappers/javascript.py#L50-L76 |
7,249 | glue-viz/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/visuals/shaders/parsing.py | find_functions | def find_functions(code):
"""
Return a list of (name, arguments, return type) for all function
definition2 found in *code*. Arguments are returned as [(type, name), ...].
"""
regex = "^\s*" + re_func_decl + "\s*{"
funcs = []
while True:
m = re.search(regex, code, re.M)
if m is None:
return funcs
rtype, name, args = m.groups()[:3]
if args == 'void' or args.strip() == '':
args = []
else:
args = [tuple(arg.strip().split(' ')) for arg in args.split(',')]
funcs.append((name, args, rtype))
code = code[m.end():] | python | def find_functions(code):
"""
Return a list of (name, arguments, return type) for all function
definition2 found in *code*. Arguments are returned as [(type, name), ...].
"""
regex = "^\s*" + re_func_decl + "\s*{"
funcs = []
while True:
m = re.search(regex, code, re.M)
if m is None:
return funcs
rtype, name, args = m.groups()[:3]
if args == 'void' or args.strip() == '':
args = []
else:
args = [tuple(arg.strip().split(' ')) for arg in args.split(',')]
funcs.append((name, args, rtype))
code = code[m.end():] | ['def', 'find_functions', '(', 'code', ')', ':', 'regex', '=', '"^\\s*"', '+', 're_func_decl', '+', '"\\s*{"', 'funcs', '=', '[', ']', 'while', 'True', ':', 'm', '=', 're', '.', 'search', '(', 'regex', ',', 'code', ',', 're', '.', 'M', ')', 'if', 'm', 'is', 'None', ':', 'return', 'funcs', 'rtype', ',', 'name', ',', 'args', '=', 'm', '.', 'groups', '(', ')', '[', ':', '3', ']', 'if', 'args', '==', "'void'", 'or', 'args', '.', 'strip', '(', ')', '==', "''", ':', 'args', '=', '[', ']', 'else', ':', 'args', '=', '[', 'tuple', '(', 'arg', '.', 'strip', '(', ')', '.', 'split', '(', "' '", ')', ')', 'for', 'arg', 'in', 'args', '.', 'split', '(', "','", ')', ']', 'funcs', '.', 'append', '(', '(', 'name', ',', 'args', ',', 'rtype', ')', ')', 'code', '=', 'code', '[', 'm', '.', 'end', '(', ')', ':', ']'] | Return a list of (name, arguments, return type) for all function
definition2 found in *code*. Arguments are returned as [(type, name), ...]. | ['Return', 'a', 'list', 'of', '(', 'name', 'arguments', 'return', 'type', ')', 'for', 'all', 'function', 'definition2', 'found', 'in', '*', 'code', '*', '.', 'Arguments', 'are', 'returned', 'as', '[', '(', 'type', 'name', ')', '...', ']', '.'] | train | https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/visuals/shaders/parsing.py#L73-L93 |
7,250 | aws/sagemaker-containers | src/sagemaker_containers/_env.py | _create_training_directories | def _create_training_directories():
"""Creates the directory structure and files necessary for training under the base path
"""
logger.info('Creating a new training folder under %s .' % base_dir)
os.makedirs(model_dir)
os.makedirs(input_config_dir)
os.makedirs(output_data_dir)
_write_json({}, hyperparameters_file_dir)
_write_json({}, input_data_config_file_dir)
host_name = socket.gethostname()
resources_dict = {
"current_host": host_name,
"hosts": [host_name]
}
_write_json(resources_dict, resource_config_file_dir) | python | def _create_training_directories():
"""Creates the directory structure and files necessary for training under the base path
"""
logger.info('Creating a new training folder under %s .' % base_dir)
os.makedirs(model_dir)
os.makedirs(input_config_dir)
os.makedirs(output_data_dir)
_write_json({}, hyperparameters_file_dir)
_write_json({}, input_data_config_file_dir)
host_name = socket.gethostname()
resources_dict = {
"current_host": host_name,
"hosts": [host_name]
}
_write_json(resources_dict, resource_config_file_dir) | ['def', '_create_training_directories', '(', ')', ':', 'logger', '.', 'info', '(', "'Creating a new training folder under %s .'", '%', 'base_dir', ')', 'os', '.', 'makedirs', '(', 'model_dir', ')', 'os', '.', 'makedirs', '(', 'input_config_dir', ')', 'os', '.', 'makedirs', '(', 'output_data_dir', ')', '_write_json', '(', '{', '}', ',', 'hyperparameters_file_dir', ')', '_write_json', '(', '{', '}', ',', 'input_data_config_file_dir', ')', 'host_name', '=', 'socket', '.', 'gethostname', '(', ')', 'resources_dict', '=', '{', '"current_host"', ':', 'host_name', ',', '"hosts"', ':', '[', 'host_name', ']', '}', '_write_json', '(', 'resources_dict', ',', 'resource_config_file_dir', ')'] | Creates the directory structure and files necessary for training under the base path | ['Creates', 'the', 'directory', 'structure', 'and', 'files', 'necessary', 'for', 'training', 'under', 'the', 'base', 'path'] | train | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_env.py#L150-L168 |
7,251 | ejeschke/ginga | ginga/canvas/render.py | RendererBase.reorder | def reorder(self, dst_order, arr, src_order=None):
"""Reorder the output array to match that needed by the viewer."""
if dst_order is None:
dst_order = self.viewer.rgb_order
if src_order is None:
src_order = self.rgb_order
if src_order != dst_order:
arr = trcalc.reorder_image(dst_order, arr, src_order)
return arr | python | def reorder(self, dst_order, arr, src_order=None):
"""Reorder the output array to match that needed by the viewer."""
if dst_order is None:
dst_order = self.viewer.rgb_order
if src_order is None:
src_order = self.rgb_order
if src_order != dst_order:
arr = trcalc.reorder_image(dst_order, arr, src_order)
return arr | ['def', 'reorder', '(', 'self', ',', 'dst_order', ',', 'arr', ',', 'src_order', '=', 'None', ')', ':', 'if', 'dst_order', 'is', 'None', ':', 'dst_order', '=', 'self', '.', 'viewer', '.', 'rgb_order', 'if', 'src_order', 'is', 'None', ':', 'src_order', '=', 'self', '.', 'rgb_order', 'if', 'src_order', '!=', 'dst_order', ':', 'arr', '=', 'trcalc', '.', 'reorder_image', '(', 'dst_order', ',', 'arr', ',', 'src_order', ')', 'return', 'arr'] | Reorder the output array to match that needed by the viewer. | ['Reorder', 'the', 'output', 'array', 'to', 'match', 'that', 'needed', 'by', 'the', 'viewer', '.'] | train | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/canvas/render.py#L107-L116 |
7,252 | kivy/python-for-android | pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/sandbox.py | SandboxedEnvironment.call | def call(__self, __context, __obj, *args, **kwargs):
"""Call an object from sandboxed code."""
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
raise SecurityError('%r is not safely callable' % (__obj,))
return __context.call(__obj, *args, **kwargs) | python | def call(__self, __context, __obj, *args, **kwargs):
"""Call an object from sandboxed code."""
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
raise SecurityError('%r is not safely callable' % (__obj,))
return __context.call(__obj, *args, **kwargs) | ['def', 'call', '(', '__self', ',', '__context', ',', '__obj', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', '# the double prefixes are to avoid double keyword argument', '# errors when proxying the call.', 'if', 'not', '__self', '.', 'is_safe_callable', '(', '__obj', ')', ':', 'raise', 'SecurityError', '(', "'%r is not safely callable'", '%', '(', '__obj', ',', ')', ')', 'return', '__context', '.', 'call', '(', '__obj', ',', '*', 'args', ',', '*', '*', 'kwargs', ')'] | Call an object from sandboxed code. | ['Call', 'an', 'object', 'from', 'sandboxed', 'code', '.'] | train | https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/sandbox.py#L253-L259 |
7,253 | Erotemic/utool | utool/util_cache.py | to_json | def to_json(val, allow_pickle=False, pretty=False):
r"""
Converts a python object to a JSON string using the utool convention
Args:
val (object):
Returns:
str: json_str
References:
http://stackoverflow.com/questions/11561932/why-does-json-dumpslistnp
CommandLine:
python -m utool.util_cache --test-to_json
python3 -m utool.util_cache --test-to_json
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_cache import * # NOQA
>>> import utool as ut
>>> import numpy as np
>>> import uuid
>>> val = [
>>> '{"foo": "not a dict"}',
>>> 1.3,
>>> [1],
>>> # {1: 1, 2: 2, 3: 3}, cant use integer keys
>>> {1, 2, 3},
>>> slice(1, None, 1),
>>> b'an ascii string',
>>> np.array([1, 2, 3]),
>>> ut.get_zero_uuid(),
>>> ut.LazyDict(x='fo'),
>>> ut.LazyDict,
>>> {'x': {'a', 'b', 'cde'}, 'y': [1]}
>>> ]
>>> #val = ut.LazyDict(x='fo')
>>> allow_pickle = True
>>> if not allow_pickle:
>>> val = val[:-2]
>>> json_str = ut.to_json(val, allow_pickle=allow_pickle)
>>> result = ut.repr3(json_str)
>>> reload_val = ut.from_json(json_str, allow_pickle=allow_pickle)
>>> # Make sure pickle doesnt happen by default
>>> try:
>>> json_str = ut.to_json(val)
>>> assert False or not allow_pickle, 'expected a type error'
>>> except TypeError:
>>> print('Correctly got type error')
>>> try:
>>> json_str = ut.from_json(val)
>>> assert False, 'expected a type error'
>>> except TypeError:
>>> print('Correctly got type error')
>>> print(result)
>>> print('original = ' + ut.repr3(val, nl=1))
>>> print('reconstructed = ' + ut.repr3(reload_val, nl=1))
>>> assert reload_val[6] == val[6].tolist()
>>> assert reload_val[6] is not val[6]
"""
UtoolJSONEncoder = make_utool_json_encoder(allow_pickle)
json_kw = {}
json_kw['cls'] = UtoolJSONEncoder
if pretty:
json_kw['indent'] = 4
json_kw['separators'] = (',', ': ')
json_str = json.dumps(val, **json_kw)
return json_str | python | def to_json(val, allow_pickle=False, pretty=False):
r"""
Converts a python object to a JSON string using the utool convention
Args:
val (object):
Returns:
str: json_str
References:
http://stackoverflow.com/questions/11561932/why-does-json-dumpslistnp
CommandLine:
python -m utool.util_cache --test-to_json
python3 -m utool.util_cache --test-to_json
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_cache import * # NOQA
>>> import utool as ut
>>> import numpy as np
>>> import uuid
>>> val = [
>>> '{"foo": "not a dict"}',
>>> 1.3,
>>> [1],
>>> # {1: 1, 2: 2, 3: 3}, cant use integer keys
>>> {1, 2, 3},
>>> slice(1, None, 1),
>>> b'an ascii string',
>>> np.array([1, 2, 3]),
>>> ut.get_zero_uuid(),
>>> ut.LazyDict(x='fo'),
>>> ut.LazyDict,
>>> {'x': {'a', 'b', 'cde'}, 'y': [1]}
>>> ]
>>> #val = ut.LazyDict(x='fo')
>>> allow_pickle = True
>>> if not allow_pickle:
>>> val = val[:-2]
>>> json_str = ut.to_json(val, allow_pickle=allow_pickle)
>>> result = ut.repr3(json_str)
>>> reload_val = ut.from_json(json_str, allow_pickle=allow_pickle)
>>> # Make sure pickle doesnt happen by default
>>> try:
>>> json_str = ut.to_json(val)
>>> assert False or not allow_pickle, 'expected a type error'
>>> except TypeError:
>>> print('Correctly got type error')
>>> try:
>>> json_str = ut.from_json(val)
>>> assert False, 'expected a type error'
>>> except TypeError:
>>> print('Correctly got type error')
>>> print(result)
>>> print('original = ' + ut.repr3(val, nl=1))
>>> print('reconstructed = ' + ut.repr3(reload_val, nl=1))
>>> assert reload_val[6] == val[6].tolist()
>>> assert reload_val[6] is not val[6]
"""
UtoolJSONEncoder = make_utool_json_encoder(allow_pickle)
json_kw = {}
json_kw['cls'] = UtoolJSONEncoder
if pretty:
json_kw['indent'] = 4
json_kw['separators'] = (',', ': ')
json_str = json.dumps(val, **json_kw)
return json_str | ['def', 'to_json', '(', 'val', ',', 'allow_pickle', '=', 'False', ',', 'pretty', '=', 'False', ')', ':', 'UtoolJSONEncoder', '=', 'make_utool_json_encoder', '(', 'allow_pickle', ')', 'json_kw', '=', '{', '}', 'json_kw', '[', "'cls'", ']', '=', 'UtoolJSONEncoder', 'if', 'pretty', ':', 'json_kw', '[', "'indent'", ']', '=', '4', 'json_kw', '[', "'separators'", ']', '=', '(', "','", ',', "': '", ')', 'json_str', '=', 'json', '.', 'dumps', '(', 'val', ',', '*', '*', 'json_kw', ')', 'return', 'json_str'] | r"""
Converts a python object to a JSON string using the utool convention
Args:
val (object):
Returns:
str: json_str
References:
http://stackoverflow.com/questions/11561932/why-does-json-dumpslistnp
CommandLine:
python -m utool.util_cache --test-to_json
python3 -m utool.util_cache --test-to_json
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_cache import * # NOQA
>>> import utool as ut
>>> import numpy as np
>>> import uuid
>>> val = [
>>> '{"foo": "not a dict"}',
>>> 1.3,
>>> [1],
>>> # {1: 1, 2: 2, 3: 3}, cant use integer keys
>>> {1, 2, 3},
>>> slice(1, None, 1),
>>> b'an ascii string',
>>> np.array([1, 2, 3]),
>>> ut.get_zero_uuid(),
>>> ut.LazyDict(x='fo'),
>>> ut.LazyDict,
>>> {'x': {'a', 'b', 'cde'}, 'y': [1]}
>>> ]
>>> #val = ut.LazyDict(x='fo')
>>> allow_pickle = True
>>> if not allow_pickle:
>>> val = val[:-2]
>>> json_str = ut.to_json(val, allow_pickle=allow_pickle)
>>> result = ut.repr3(json_str)
>>> reload_val = ut.from_json(json_str, allow_pickle=allow_pickle)
>>> # Make sure pickle doesnt happen by default
>>> try:
>>> json_str = ut.to_json(val)
>>> assert False or not allow_pickle, 'expected a type error'
>>> except TypeError:
>>> print('Correctly got type error')
>>> try:
>>> json_str = ut.from_json(val)
>>> assert False, 'expected a type error'
>>> except TypeError:
>>> print('Correctly got type error')
>>> print(result)
>>> print('original = ' + ut.repr3(val, nl=1))
>>> print('reconstructed = ' + ut.repr3(reload_val, nl=1))
>>> assert reload_val[6] == val[6].tolist()
>>> assert reload_val[6] is not val[6] | ['r', 'Converts', 'a', 'python', 'object', 'to', 'a', 'JSON', 'string', 'using', 'the', 'utool', 'convention'] | train | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cache.py#L532-L600 |
7,254 | joferkington/mplstereonet | mplstereonet/contouring.py | _schmidt_count | def _schmidt_count(cos_dist, sigma=None):
"""Schmidt (a.k.a. 1%) counting kernel function."""
radius = 0.01
count = ((1 - cos_dist) <= radius).astype(float)
# To offset the count.sum() - 0.5 required for the kamb methods...
count = 0.5 / count.size + count
return count, (cos_dist.size * radius) | python | def _schmidt_count(cos_dist, sigma=None):
"""Schmidt (a.k.a. 1%) counting kernel function."""
radius = 0.01
count = ((1 - cos_dist) <= radius).astype(float)
# To offset the count.sum() - 0.5 required for the kamb methods...
count = 0.5 / count.size + count
return count, (cos_dist.size * radius) | ['def', '_schmidt_count', '(', 'cos_dist', ',', 'sigma', '=', 'None', ')', ':', 'radius', '=', '0.01', 'count', '=', '(', '(', '1', '-', 'cos_dist', ')', '<=', 'radius', ')', '.', 'astype', '(', 'float', ')', '# To offset the count.sum() - 0.5 required for the kamb methods...', 'count', '=', '0.5', '/', 'count', '.', 'size', '+', 'count', 'return', 'count', ',', '(', 'cos_dist', '.', 'size', '*', 'radius', ')'] | Schmidt (a.k.a. 1%) counting kernel function. | ['Schmidt', '(', 'a', '.', 'k', '.', 'a', '.', '1%', ')', 'counting', 'kernel', 'function', '.'] | train | https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/contouring.py#L216-L222 |
7,255 | StackStorm/pybind | pybind/slxos/v17s_1_02/qos_mpls/map_/inexp_outexp/__init__.py | inexp_outexp._set_in_exp | def _set_in_exp(self, v, load=False):
"""
Setter method for in_exp, mapped from YANG variable /qos_mpls/map/inexp_outexp/in_exp (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_in_exp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_in_exp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("in_exp_in_values",in_exp.in_exp, yang_name="in-exp", rest_name="in-exp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='in-exp-in-values', extensions={u'tailf-common': {u'info': u'Map Inexp value to Outexp value', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'QosMplsInexpOutexpCallpoint'}}), is_container='list', yang_name="in-exp", rest_name="in-exp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map Inexp value to Outexp value', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'QosMplsInexpOutexpCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mpls', defining_module='brocade-qos-mpls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """in_exp must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("in_exp_in_values",in_exp.in_exp, yang_name="in-exp", rest_name="in-exp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='in-exp-in-values', extensions={u'tailf-common': {u'info': u'Map Inexp value to Outexp value', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'QosMplsInexpOutexpCallpoint'}}), is_container='list', yang_name="in-exp", rest_name="in-exp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map Inexp value to Outexp value', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'QosMplsInexpOutexpCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mpls', defining_module='brocade-qos-mpls', yang_type='list', is_config=True)""",
})
self.__in_exp = t
if hasattr(self, '_set'):
self._set() | python | def _set_in_exp(self, v, load=False):
"""
Setter method for in_exp, mapped from YANG variable /qos_mpls/map/inexp_outexp/in_exp (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_in_exp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_in_exp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("in_exp_in_values",in_exp.in_exp, yang_name="in-exp", rest_name="in-exp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='in-exp-in-values', extensions={u'tailf-common': {u'info': u'Map Inexp value to Outexp value', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'QosMplsInexpOutexpCallpoint'}}), is_container='list', yang_name="in-exp", rest_name="in-exp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map Inexp value to Outexp value', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'QosMplsInexpOutexpCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mpls', defining_module='brocade-qos-mpls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """in_exp must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("in_exp_in_values",in_exp.in_exp, yang_name="in-exp", rest_name="in-exp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='in-exp-in-values', extensions={u'tailf-common': {u'info': u'Map Inexp value to Outexp value', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'QosMplsInexpOutexpCallpoint'}}), is_container='list', yang_name="in-exp", rest_name="in-exp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map Inexp value to Outexp value', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'QosMplsInexpOutexpCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mpls', defining_module='brocade-qos-mpls', yang_type='list', is_config=True)""",
})
self.__in_exp = t
if hasattr(self, '_set'):
self._set() | ['def', '_set_in_exp', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'YANGListType', '(', '"in_exp_in_values"', ',', 'in_exp', '.', 'in_exp', ',', 'yang_name', '=', '"in-exp"', ',', 'rest_name', '=', '"in-exp"', ',', 'parent', '=', 'self', ',', 'is_container', '=', "'list'", ',', 'user_ordered', '=', 'False', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'yang_keys', '=', "'in-exp-in-values'", ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'Map Inexp value to Outexp value'", ',', "u'cli-suppress-mode'", ':', 'None', ',', "u'cli-incomplete-no'", ':', 'None', ',', "u'cli-suppress-list-no'", ':', 'None', ',', "u'cli-compact-syntax'", ':', 'None', ',', "u'cli-sequence-commands'", ':', 'None', ',', "u'cli-suppress-key-abbreviation'", ':', 'None', ',', "u'cli-incomplete-command'", ':', 'None', ',', "u'callpoint'", ':', "u'QosMplsInexpOutexpCallpoint'", '}', '}', ')', ',', 'is_container', '=', "'list'", ',', 'yang_name', '=', '"in-exp"', ',', 'rest_name', '=', '"in-exp"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'Map Inexp value to Outexp value'", ',', "u'cli-suppress-mode'", ':', 'None', ',', "u'cli-incomplete-no'", ':', 'None', ',', "u'cli-suppress-list-no'", ':', 'None', ',', "u'cli-compact-syntax'", ':', 'None', ',', "u'cli-sequence-commands'", ':', 'None', ',', "u'cli-suppress-key-abbreviation'", ':', 'None', ',', "u'cli-incomplete-command'", ':', 'None', ',', "u'callpoint'", ':', "u'QosMplsInexpOutexpCallpoint'", '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-qos-mpls'", ',', 'defining_module', '=', "'brocade-qos-mpls'", ',', 'yang_type', '=', "'list'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""in_exp must be of a type compatible with list"""', ',', "'defined-type'", ':', '"list"', ',', "'generated-type'", ':', '"""YANGDynClass(base=YANGListType("in_exp_in_values",in_exp.in_exp, yang_name="in-exp", rest_name="in-exp", parent=self, is_container=\'list\', user_ordered=False, path_helper=self._path_helper, yang_keys=\'in-exp-in-values\', extensions={u\'tailf-common\': {u\'info\': u\'Map Inexp value to Outexp value\', u\'cli-suppress-mode\': None, u\'cli-incomplete-no\': None, u\'cli-suppress-list-no\': None, u\'cli-compact-syntax\': None, u\'cli-sequence-commands\': None, u\'cli-suppress-key-abbreviation\': None, u\'cli-incomplete-command\': None, u\'callpoint\': u\'QosMplsInexpOutexpCallpoint\'}}), is_container=\'list\', yang_name="in-exp", rest_name="in-exp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Map Inexp value to Outexp value\', u\'cli-suppress-mode\': None, u\'cli-incomplete-no\': None, u\'cli-suppress-list-no\': None, u\'cli-compact-syntax\': None, u\'cli-sequence-commands\': None, u\'cli-suppress-key-abbreviation\': None, u\'cli-incomplete-command\': None, u\'callpoint\': u\'QosMplsInexpOutexpCallpoint\'}}, namespace=\'urn:brocade.com:mgmt:brocade-qos-mpls\', defining_module=\'brocade-qos-mpls\', yang_type=\'list\', is_config=True)"""', ',', '}', ')', 'self', '.', '__in_exp', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')'] | Setter method for in_exp, mapped from YANG variable /qos_mpls/map/inexp_outexp/in_exp (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_in_exp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_in_exp() directly. | ['Setter', 'method', 'for', 'in_exp', 'mapped', 'from', 'YANG', 'variable', '/', 'qos_mpls', '/', 'map', '/', 'inexp_outexp', '/', 'in_exp', '(', 'list', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_in_exp', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_in_exp', '()', 'directly', '.'] | train | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/qos_mpls/map_/inexp_outexp/__init__.py#L131-L152 |
7,256 | abarker/pdfCropMargins | src/pdfCropMargins/calculate_bounding_boxes.py | get_bounding_box_list | def get_bounding_box_list(input_doc_fname, input_doc, full_page_box_list,
set_of_page_nums_to_crop, argparse_args, chosen_PdfFileWriter):
"""Calculate a bounding box for each page in the document. The first
argument is the filename of the document's original PDF file, the second is
the PdfFileReader for the document. The argument full_page_box_list is a list
of the full-page-size boxes (which is used to correct for any nonzero origins
in the PDF coordinates). The set_of_page_nums_to_crop argument is the set of page
numbers to crop; it is passed so that unnecessary calculations can be
skipped. The argparse_args argument should be passed the args parsed from
the command line by argparse. The chosen_PdfFileWriter is the PdfFileWriter
class from whichever pyPdf package was chosen by the main program. The
function returns the list of bounding boxes."""
global args, page_nums_to_crop, PdfFileWriter
args = argparse_args # Make args available to all funs in module, as a global.
page_nums_to_crop = set_of_page_nums_to_crop # Make the set of pages global, too.
PdfFileWriter = chosen_PdfFileWriter # Be sure correct PdfFileWriter is set.
if args.gsBbox:
if args.verbose:
print("\nUsing Ghostscript to calculate the bounding boxes.")
bbox_list = ex.get_bounding_box_list_ghostscript(input_doc_fname,
args.resX, args.resY, args.fullPageBox)
else:
if not hasPIL:
print("\nError in pdfCropMargins: No version of the PIL package (or a"
"\nfork like Pillow) was found. Either install that Python"
"\npackage or use the Ghostscript flag '--gsBbox' (or '-gs') if you"
"\nhave Ghostscript installed.", file=sys.stderr)
ex.cleanup_and_exit(1)
bbox_list = get_bounding_box_list_render_image(input_doc_fname, input_doc)
# Now we need to use the full page boxes to translate for non-zero origin.
bbox_list = correct_bounding_box_list_for_nonzero_origin(bbox_list,
full_page_box_list)
return bbox_list | python | def get_bounding_box_list(input_doc_fname, input_doc, full_page_box_list,
set_of_page_nums_to_crop, argparse_args, chosen_PdfFileWriter):
"""Calculate a bounding box for each page in the document. The first
argument is the filename of the document's original PDF file, the second is
the PdfFileReader for the document. The argument full_page_box_list is a list
of the full-page-size boxes (which is used to correct for any nonzero origins
in the PDF coordinates). The set_of_page_nums_to_crop argument is the set of page
numbers to crop; it is passed so that unnecessary calculations can be
skipped. The argparse_args argument should be passed the args parsed from
the command line by argparse. The chosen_PdfFileWriter is the PdfFileWriter
class from whichever pyPdf package was chosen by the main program. The
function returns the list of bounding boxes."""
global args, page_nums_to_crop, PdfFileWriter
args = argparse_args # Make args available to all funs in module, as a global.
page_nums_to_crop = set_of_page_nums_to_crop # Make the set of pages global, too.
PdfFileWriter = chosen_PdfFileWriter # Be sure correct PdfFileWriter is set.
if args.gsBbox:
if args.verbose:
print("\nUsing Ghostscript to calculate the bounding boxes.")
bbox_list = ex.get_bounding_box_list_ghostscript(input_doc_fname,
args.resX, args.resY, args.fullPageBox)
else:
if not hasPIL:
print("\nError in pdfCropMargins: No version of the PIL package (or a"
"\nfork like Pillow) was found. Either install that Python"
"\npackage or use the Ghostscript flag '--gsBbox' (or '-gs') if you"
"\nhave Ghostscript installed.", file=sys.stderr)
ex.cleanup_and_exit(1)
bbox_list = get_bounding_box_list_render_image(input_doc_fname, input_doc)
# Now we need to use the full page boxes to translate for non-zero origin.
bbox_list = correct_bounding_box_list_for_nonzero_origin(bbox_list,
full_page_box_list)
return bbox_list | ['def', 'get_bounding_box_list', '(', 'input_doc_fname', ',', 'input_doc', ',', 'full_page_box_list', ',', 'set_of_page_nums_to_crop', ',', 'argparse_args', ',', 'chosen_PdfFileWriter', ')', ':', 'global', 'args', ',', 'page_nums_to_crop', ',', 'PdfFileWriter', 'args', '=', 'argparse_args', '# Make args available to all funs in module, as a global.', 'page_nums_to_crop', '=', 'set_of_page_nums_to_crop', '# Make the set of pages global, too.', 'PdfFileWriter', '=', 'chosen_PdfFileWriter', '# Be sure correct PdfFileWriter is set.', 'if', 'args', '.', 'gsBbox', ':', 'if', 'args', '.', 'verbose', ':', 'print', '(', '"\\nUsing Ghostscript to calculate the bounding boxes."', ')', 'bbox_list', '=', 'ex', '.', 'get_bounding_box_list_ghostscript', '(', 'input_doc_fname', ',', 'args', '.', 'resX', ',', 'args', '.', 'resY', ',', 'args', '.', 'fullPageBox', ')', 'else', ':', 'if', 'not', 'hasPIL', ':', 'print', '(', '"\\nError in pdfCropMargins: No version of the PIL package (or a"', '"\\nfork like Pillow) was found. Either install that Python"', '"\\npackage or use the Ghostscript flag \'--gsBbox\' (or \'-gs\') if you"', '"\\nhave Ghostscript installed."', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'ex', '.', 'cleanup_and_exit', '(', '1', ')', 'bbox_list', '=', 'get_bounding_box_list_render_image', '(', 'input_doc_fname', ',', 'input_doc', ')', '# Now we need to use the full page boxes to translate for non-zero origin.', 'bbox_list', '=', 'correct_bounding_box_list_for_nonzero_origin', '(', 'bbox_list', ',', 'full_page_box_list', ')', 'return', 'bbox_list'] | Calculate a bounding box for each page in the document. The first
argument is the filename of the document's original PDF file, the second is
the PdfFileReader for the document. The argument full_page_box_list is a list
of the full-page-size boxes (which is used to correct for any nonzero origins
in the PDF coordinates). The set_of_page_nums_to_crop argument is the set of page
numbers to crop; it is passed so that unnecessary calculations can be
skipped. The argparse_args argument should be passed the args parsed from
the command line by argparse. The chosen_PdfFileWriter is the PdfFileWriter
class from whichever pyPdf package was chosen by the main program. The
function returns the list of bounding boxes. | ['Calculate', 'a', 'bounding', 'box', 'for', 'each', 'page', 'in', 'the', 'document', '.', 'The', 'first', 'argument', 'is', 'the', 'filename', 'of', 'the', 'document', 's', 'original', 'PDF', 'file', 'the', 'second', 'is', 'the', 'PdfFileReader', 'for', 'the', 'document', '.', 'The', 'argument', 'full_page_box_list', 'is', 'a', 'list', 'of', 'the', 'full', '-', 'page', '-', 'size', 'boxes', '(', 'which', 'is', 'used', 'to', 'correct', 'for', 'any', 'nonzero', 'origins', 'in', 'the', 'PDF', 'coordinates', ')', '.', 'The', 'set_of_page_nums_to_crop', 'argument', 'is', 'the', 'set', 'of', 'page', 'numbers', 'to', 'crop', ';', 'it', 'is', 'passed', 'so', 'that', 'unnecessary', 'calculations', 'can', 'be', 'skipped', '.', 'The', 'argparse_args', 'argument', 'should', 'be', 'passed', 'the', 'args', 'parsed', 'from', 'the', 'command', 'line', 'by', 'argparse', '.', 'The', 'chosen_PdfFileWriter', 'is', 'the', 'PdfFileWriter', 'class', 'from', 'whichever', 'pyPdf', 'package', 'was', 'chosen', 'by', 'the', 'main', 'program', '.', 'The', 'function', 'returns', 'the', 'list', 'of', 'bounding', 'boxes', '.'] | train | https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/calculate_bounding_boxes.py#L62-L97 |
7,257 | naphatkrit/easyci | easyci/locking.py | init | def init(vcs):
"""Initialize the locking module for a repository
"""
path = os.path.join(vcs.private_dir(), 'locks')
if not os.path.exists(path):
os.mkdir(path) | python | def init(vcs):
"""Initialize the locking module for a repository
"""
path = os.path.join(vcs.private_dir(), 'locks')
if not os.path.exists(path):
os.mkdir(path) | ['def', 'init', '(', 'vcs', ')', ':', 'path', '=', 'os', '.', 'path', '.', 'join', '(', 'vcs', '.', 'private_dir', '(', ')', ',', "'locks'", ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'path', ')', ':', 'os', '.', 'mkdir', '(', 'path', ')'] | Initialize the locking module for a repository | ['Initialize', 'the', 'locking', 'module', 'for', 'a', 'repository'] | train | https://github.com/naphatkrit/easyci/blob/7aee8d7694fe4e2da42ce35b0f700bc840c8b95f/easyci/locking.py#L24-L29 |
7,258 | minio/minio-py | minio/parsers.py | _iso8601_to_localized_time | def _iso8601_to_localized_time(date_string):
"""
Convert iso8601 date string into UTC time.
:param date_string: iso8601 formatted date string.
:return: :class:`datetime.datetime`
"""
parsed_date = datetime.strptime(date_string, '%Y-%m-%dT%H:%M:%S.%fZ')
localized_time = pytz.utc.localize(parsed_date)
return localized_time | python | def _iso8601_to_localized_time(date_string):
"""
Convert iso8601 date string into UTC time.
:param date_string: iso8601 formatted date string.
:return: :class:`datetime.datetime`
"""
parsed_date = datetime.strptime(date_string, '%Y-%m-%dT%H:%M:%S.%fZ')
localized_time = pytz.utc.localize(parsed_date)
return localized_time | ['def', '_iso8601_to_localized_time', '(', 'date_string', ')', ':', 'parsed_date', '=', 'datetime', '.', 'strptime', '(', 'date_string', ',', "'%Y-%m-%dT%H:%M:%S.%fZ'", ')', 'localized_time', '=', 'pytz', '.', 'utc', '.', 'localize', '(', 'parsed_date', ')', 'return', 'localized_time'] | Convert iso8601 date string into UTC time.
:param date_string: iso8601 formatted date string.
:return: :class:`datetime.datetime` | ['Convert', 'iso8601', 'date', 'string', 'into', 'UTC', 'time', '.'] | train | https://github.com/minio/minio-py/blob/7107c84183cf5fb4deff68c0a16ab9f1c0b4c37e/minio/parsers.py#L356-L365 |
7,259 | hydraplatform/hydra-base | hydra_base/lib/units.py | update_dimension | def update_dimension(dimension,**kwargs):
"""
Update a dimension in the DB.
Raises and exception if the dimension does not exist.
The key is ALWAYS the name and the name itself is not modificable
"""
db_dimension = None
dimension = JSONObject(dimension)
try:
db_dimension = db.DBSession.query(Dimension).filter(Dimension.id==dimension.id).filter().one()
if "description" in dimension and dimension["description"] is not None:
db_dimension.description = dimension["description"]
if "project_id" in dimension and dimension["project_id"] is not None and dimension["project_id"] != "" and dimension["project_id"].isdigit():
db_dimension.project_id = dimension["project_id"]
except NoResultFound:
raise ResourceNotFoundError("Dimension (ID=%s) does not exist"%(dimension.id))
db.DBSession.flush()
return JSONObject(db_dimension) | python | def update_dimension(dimension,**kwargs):
"""
Update a dimension in the DB.
Raises and exception if the dimension does not exist.
The key is ALWAYS the name and the name itself is not modificable
"""
db_dimension = None
dimension = JSONObject(dimension)
try:
db_dimension = db.DBSession.query(Dimension).filter(Dimension.id==dimension.id).filter().one()
if "description" in dimension and dimension["description"] is not None:
db_dimension.description = dimension["description"]
if "project_id" in dimension and dimension["project_id"] is not None and dimension["project_id"] != "" and dimension["project_id"].isdigit():
db_dimension.project_id = dimension["project_id"]
except NoResultFound:
raise ResourceNotFoundError("Dimension (ID=%s) does not exist"%(dimension.id))
db.DBSession.flush()
return JSONObject(db_dimension) | ['def', 'update_dimension', '(', 'dimension', ',', '*', '*', 'kwargs', ')', ':', 'db_dimension', '=', 'None', 'dimension', '=', 'JSONObject', '(', 'dimension', ')', 'try', ':', 'db_dimension', '=', 'db', '.', 'DBSession', '.', 'query', '(', 'Dimension', ')', '.', 'filter', '(', 'Dimension', '.', 'id', '==', 'dimension', '.', 'id', ')', '.', 'filter', '(', ')', '.', 'one', '(', ')', 'if', '"description"', 'in', 'dimension', 'and', 'dimension', '[', '"description"', ']', 'is', 'not', 'None', ':', 'db_dimension', '.', 'description', '=', 'dimension', '[', '"description"', ']', 'if', '"project_id"', 'in', 'dimension', 'and', 'dimension', '[', '"project_id"', ']', 'is', 'not', 'None', 'and', 'dimension', '[', '"project_id"', ']', '!=', '""', 'and', 'dimension', '[', '"project_id"', ']', '.', 'isdigit', '(', ')', ':', 'db_dimension', '.', 'project_id', '=', 'dimension', '[', '"project_id"', ']', 'except', 'NoResultFound', ':', 'raise', 'ResourceNotFoundError', '(', '"Dimension (ID=%s) does not exist"', '%', '(', 'dimension', '.', 'id', ')', ')', 'db', '.', 'DBSession', '.', 'flush', '(', ')', 'return', 'JSONObject', '(', 'db_dimension', ')'] | Update a dimension in the DB.
Raises and exception if the dimension does not exist.
The key is ALWAYS the name and the name itself is not modificable | ['Update', 'a', 'dimension', 'in', 'the', 'DB', '.', 'Raises', 'and', 'exception', 'if', 'the', 'dimension', 'does', 'not', 'exist', '.', 'The', 'key', 'is', 'ALWAYS', 'the', 'name', 'and', 'the', 'name', 'itself', 'is', 'not', 'modificable'] | train | https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/units.py#L340-L360 |
7,260 | brennv/namedtupled | namedtupled/integrations.py | load_lists | def load_lists(keys=[], values=[], name='NT'):
""" Map namedtuples given a pair of key, value lists. """
mapping = dict(zip(keys, values))
return mapper(mapping, _nt_name=name) | python | def load_lists(keys=[], values=[], name='NT'):
""" Map namedtuples given a pair of key, value lists. """
mapping = dict(zip(keys, values))
return mapper(mapping, _nt_name=name) | ['def', 'load_lists', '(', 'keys', '=', '[', ']', ',', 'values', '=', '[', ']', ',', 'name', '=', "'NT'", ')', ':', 'mapping', '=', 'dict', '(', 'zip', '(', 'keys', ',', 'values', ')', ')', 'return', 'mapper', '(', 'mapping', ',', '_nt_name', '=', 'name', ')'] | Map namedtuples given a pair of key, value lists. | ['Map', 'namedtuples', 'given', 'a', 'pair', 'of', 'key', 'value', 'lists', '.'] | train | https://github.com/brennv/namedtupled/blob/2b8e3bafd82835ef01549d7a266c34454637ff70/namedtupled/integrations.py#L8-L11 |
7,261 | ArduPilot/MAVProxy | MAVProxy/modules/lib/wxhorizon_ui.py | HorizonFrame.updateAARLocations | def updateAARLocations(self):
'''Update the locations of airspeed, altitude and Climb rate.'''
# Locations
self.airspeedText.set_position((self.rightPos-(self.vertSize/10.0),-0.97+(2*self.vertSize)-(self.vertSize/10.0)))
self.altitudeText.set_position((self.rightPos-(self.vertSize/10.0),-0.97+self.vertSize-(0.5*self.vertSize/10.0)))
self.climbRateText.set_position((self.rightPos-(self.vertSize/10.0),-0.97))
# Font Size
self.airspeedText.set_size(self.fontSize)
self.altitudeText.set_size(self.fontSize)
self.climbRateText.set_size(self.fontSize) | python | def updateAARLocations(self):
'''Update the locations of airspeed, altitude and Climb rate.'''
# Locations
self.airspeedText.set_position((self.rightPos-(self.vertSize/10.0),-0.97+(2*self.vertSize)-(self.vertSize/10.0)))
self.altitudeText.set_position((self.rightPos-(self.vertSize/10.0),-0.97+self.vertSize-(0.5*self.vertSize/10.0)))
self.climbRateText.set_position((self.rightPos-(self.vertSize/10.0),-0.97))
# Font Size
self.airspeedText.set_size(self.fontSize)
self.altitudeText.set_size(self.fontSize)
self.climbRateText.set_size(self.fontSize) | ['def', 'updateAARLocations', '(', 'self', ')', ':', '# Locations', 'self', '.', 'airspeedText', '.', 'set_position', '(', '(', 'self', '.', 'rightPos', '-', '(', 'self', '.', 'vertSize', '/', '10.0', ')', ',', '-', '0.97', '+', '(', '2', '*', 'self', '.', 'vertSize', ')', '-', '(', 'self', '.', 'vertSize', '/', '10.0', ')', ')', ')', 'self', '.', 'altitudeText', '.', 'set_position', '(', '(', 'self', '.', 'rightPos', '-', '(', 'self', '.', 'vertSize', '/', '10.0', ')', ',', '-', '0.97', '+', 'self', '.', 'vertSize', '-', '(', '0.5', '*', 'self', '.', 'vertSize', '/', '10.0', ')', ')', ')', 'self', '.', 'climbRateText', '.', 'set_position', '(', '(', 'self', '.', 'rightPos', '-', '(', 'self', '.', 'vertSize', '/', '10.0', ')', ',', '-', '0.97', ')', ')', '# Font Size', 'self', '.', 'airspeedText', '.', 'set_size', '(', 'self', '.', 'fontSize', ')', 'self', '.', 'altitudeText', '.', 'set_size', '(', 'self', '.', 'fontSize', ')', 'self', '.', 'climbRateText', '.', 'set_size', '(', 'self', '.', 'fontSize', ')'] | Update the locations of airspeed, altitude and Climb rate. | ['Update', 'the', 'locations', 'of', 'airspeed', 'altitude', 'and', 'Climb', 'rate', '.'] | train | https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/lib/wxhorizon_ui.py#L336-L345 |
7,262 | flowersteam/explauto | explauto/interest_model/tree.py | Tree.sample_random | def sample_random(self):
"""
Sample a point in a random leaf.
"""
if self.sampling_mode['volume']:
# Choose a leaf weighted by volume, randomly
if self.leafnode:
return self.sample_bounds()
else:
split_ratio = ((self.split_value - self.bounds_x[0,self.split_dim]) /
(self.bounds_x[1,self.split_dim] - self.bounds_x[0,self.split_dim]))
if split_ratio > np.random.random():
return self.lower.sample(sampling_mode=['random'])
else:
return self.greater.sample(sampling_mode=['random'])
else:
# Choose a leaf randomly
return np.random.choice(self.get_leaves()).sample_bounds() | python | def sample_random(self):
"""
Sample a point in a random leaf.
"""
if self.sampling_mode['volume']:
# Choose a leaf weighted by volume, randomly
if self.leafnode:
return self.sample_bounds()
else:
split_ratio = ((self.split_value - self.bounds_x[0,self.split_dim]) /
(self.bounds_x[1,self.split_dim] - self.bounds_x[0,self.split_dim]))
if split_ratio > np.random.random():
return self.lower.sample(sampling_mode=['random'])
else:
return self.greater.sample(sampling_mode=['random'])
else:
# Choose a leaf randomly
return np.random.choice(self.get_leaves()).sample_bounds() | ['def', 'sample_random', '(', 'self', ')', ':', 'if', 'self', '.', 'sampling_mode', '[', "'volume'", ']', ':', '# Choose a leaf weighted by volume, randomly', 'if', 'self', '.', 'leafnode', ':', 'return', 'self', '.', 'sample_bounds', '(', ')', 'else', ':', 'split_ratio', '=', '(', '(', 'self', '.', 'split_value', '-', 'self', '.', 'bounds_x', '[', '0', ',', 'self', '.', 'split_dim', ']', ')', '/', '(', 'self', '.', 'bounds_x', '[', '1', ',', 'self', '.', 'split_dim', ']', '-', 'self', '.', 'bounds_x', '[', '0', ',', 'self', '.', 'split_dim', ']', ')', ')', 'if', 'split_ratio', '>', 'np', '.', 'random', '.', 'random', '(', ')', ':', 'return', 'self', '.', 'lower', '.', 'sample', '(', 'sampling_mode', '=', '[', "'random'", ']', ')', 'else', ':', 'return', 'self', '.', 'greater', '.', 'sample', '(', 'sampling_mode', '=', '[', "'random'", ']', ')', 'else', ':', '# Choose a leaf randomly', 'return', 'np', '.', 'random', '.', 'choice', '(', 'self', '.', 'get_leaves', '(', ')', ')', '.', 'sample_bounds', '(', ')'] | Sample a point in a random leaf. | ['Sample', 'a', 'point', 'in', 'a', 'random', 'leaf', '.'] | train | https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/interest_model/tree.py#L244-L262 |
7,263 | ktbyers/netmiko | netmiko/_textfsm/_texttable.py | TextTable.RowWith | def RowWith(self, column, value):
"""Retrieves the first non header row with the column of the given value.
Args:
column: str, the name of the column to check.
value: str, The value of the column to check.
Returns:
A Row() of the first row found, None otherwise.
Raises:
IndexError: The specified column does not exist.
"""
for row in self._table[1:]:
if row[column] == value:
return row
return None | python | def RowWith(self, column, value):
"""Retrieves the first non header row with the column of the given value.
Args:
column: str, the name of the column to check.
value: str, The value of the column to check.
Returns:
A Row() of the first row found, None otherwise.
Raises:
IndexError: The specified column does not exist.
"""
for row in self._table[1:]:
if row[column] == value:
return row
return None | ['def', 'RowWith', '(', 'self', ',', 'column', ',', 'value', ')', ':', 'for', 'row', 'in', 'self', '.', '_table', '[', '1', ':', ']', ':', 'if', 'row', '[', 'column', ']', '==', 'value', ':', 'return', 'row', 'return', 'None'] | Retrieves the first non header row with the column of the given value.
Args:
column: str, the name of the column to check.
value: str, The value of the column to check.
Returns:
A Row() of the first row found, None otherwise.
Raises:
IndexError: The specified column does not exist. | ['Retrieves', 'the', 'first', 'non', 'header', 'row', 'with', 'the', 'column', 'of', 'the', 'given', 'value', '.'] | train | https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/_textfsm/_texttable.py#L965-L981 |
7,264 | igvteam/igv-jupyter | igv/browser.py | Browser.on | def on(self, eventName, cb):
"""
Subscribe to an igv.js event.
:param Name of the event. Currently only "locuschange" is supported.
:type str
:param cb - callback function taking a single argument. For the locuschange event this argument will contain
a dictionary of the form {chr, start, end}
:type function
"""
self.eventHandlers[eventName] = cb
return self._send({
"id": self.igv_id,
"command": "on",
"eventName": eventName
}) | python | def on(self, eventName, cb):
"""
Subscribe to an igv.js event.
:param Name of the event. Currently only "locuschange" is supported.
:type str
:param cb - callback function taking a single argument. For the locuschange event this argument will contain
a dictionary of the form {chr, start, end}
:type function
"""
self.eventHandlers[eventName] = cb
return self._send({
"id": self.igv_id,
"command": "on",
"eventName": eventName
}) | ['def', 'on', '(', 'self', ',', 'eventName', ',', 'cb', ')', ':', 'self', '.', 'eventHandlers', '[', 'eventName', ']', '=', 'cb', 'return', 'self', '.', '_send', '(', '{', '"id"', ':', 'self', '.', 'igv_id', ',', '"command"', ':', '"on"', ',', '"eventName"', ':', 'eventName', '}', ')'] | Subscribe to an igv.js event.
:param Name of the event. Currently only "locuschange" is supported.
:type str
:param cb - callback function taking a single argument. For the locuschange event this argument will contain
a dictionary of the form {chr, start, end}
:type function | ['Subscribe', 'to', 'an', 'igv', '.', 'js', 'event', '.'] | train | https://github.com/igvteam/igv-jupyter/blob/f93752ce507eae893c203325764551647e28a3dc/igv/browser.py#L151-L166 |
7,265 | osrg/ryu | ryu/controller/controller.py | _split_addr | def _split_addr(addr):
"""
Splits a str of IP address and port pair into (host, port).
Example::
>>> _split_addr('127.0.0.1:6653')
('127.0.0.1', 6653)
>>> _split_addr('[::1]:6653')
('::1', 6653)
Raises ValueError if invalid format.
:param addr: A pair of IP address and port.
:return: IP address and port
"""
e = ValueError('Invalid IP address and port pair: "%s"' % addr)
pair = addr.rsplit(':', 1)
if len(pair) != 2:
raise e
addr, port = pair
if addr.startswith('[') and addr.endswith(']'):
addr = addr.lstrip('[').rstrip(']')
if not ip.valid_ipv6(addr):
raise e
elif not ip.valid_ipv4(addr):
raise e
return addr, int(port, 0) | python | def _split_addr(addr):
"""
Splits a str of IP address and port pair into (host, port).
Example::
>>> _split_addr('127.0.0.1:6653')
('127.0.0.1', 6653)
>>> _split_addr('[::1]:6653')
('::1', 6653)
Raises ValueError if invalid format.
:param addr: A pair of IP address and port.
:return: IP address and port
"""
e = ValueError('Invalid IP address and port pair: "%s"' % addr)
pair = addr.rsplit(':', 1)
if len(pair) != 2:
raise e
addr, port = pair
if addr.startswith('[') and addr.endswith(']'):
addr = addr.lstrip('[').rstrip(']')
if not ip.valid_ipv6(addr):
raise e
elif not ip.valid_ipv4(addr):
raise e
return addr, int(port, 0) | ['def', '_split_addr', '(', 'addr', ')', ':', 'e', '=', 'ValueError', '(', '\'Invalid IP address and port pair: "%s"\'', '%', 'addr', ')', 'pair', '=', 'addr', '.', 'rsplit', '(', "':'", ',', '1', ')', 'if', 'len', '(', 'pair', ')', '!=', '2', ':', 'raise', 'e', 'addr', ',', 'port', '=', 'pair', 'if', 'addr', '.', 'startswith', '(', "'['", ')', 'and', 'addr', '.', 'endswith', '(', "']'", ')', ':', 'addr', '=', 'addr', '.', 'lstrip', '(', "'['", ')', '.', 'rstrip', '(', "']'", ')', 'if', 'not', 'ip', '.', 'valid_ipv6', '(', 'addr', ')', ':', 'raise', 'e', 'elif', 'not', 'ip', '.', 'valid_ipv4', '(', 'addr', ')', ':', 'raise', 'e', 'return', 'addr', ',', 'int', '(', 'port', ',', '0', ')'] | Splits a str of IP address and port pair into (host, port).
Example::
>>> _split_addr('127.0.0.1:6653')
('127.0.0.1', 6653)
>>> _split_addr('[::1]:6653')
('::1', 6653)
Raises ValueError if invalid format.
:param addr: A pair of IP address and port.
:return: IP address and port | ['Splits', 'a', 'str', 'of', 'IP', 'address', 'and', 'port', 'pair', 'into', '(', 'host', 'port', ')', '.'] | train | https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/controller/controller.py#L92-L121 |
7,266 | opencobra/memote | memote/utils.py | jsonify | def jsonify(obj, pretty=False):
"""
Turn a nested object into a (compressed) JSON string.
Parameters
----------
obj : dict
Any kind of dictionary structure.
pretty : bool, optional
Whether to format the resulting JSON in a more legible way (
default False).
"""
if pretty:
params = dict(sort_keys=True, indent=2, allow_nan=False,
separators=(",", ": "), ensure_ascii=False)
else:
params = dict(sort_keys=False, indent=None, allow_nan=False,
separators=(",", ":"), ensure_ascii=False)
try:
return json.dumps(obj, **params)
except (TypeError, ValueError) as error:
LOGGER.critical(
"The memote result structure is incompatible with the JSON "
"standard.")
log_json_incompatible_types(obj)
raise_with_traceback(error) | python | def jsonify(obj, pretty=False):
"""
Turn a nested object into a (compressed) JSON string.
Parameters
----------
obj : dict
Any kind of dictionary structure.
pretty : bool, optional
Whether to format the resulting JSON in a more legible way (
default False).
"""
if pretty:
params = dict(sort_keys=True, indent=2, allow_nan=False,
separators=(",", ": "), ensure_ascii=False)
else:
params = dict(sort_keys=False, indent=None, allow_nan=False,
separators=(",", ":"), ensure_ascii=False)
try:
return json.dumps(obj, **params)
except (TypeError, ValueError) as error:
LOGGER.critical(
"The memote result structure is incompatible with the JSON "
"standard.")
log_json_incompatible_types(obj)
raise_with_traceback(error) | ['def', 'jsonify', '(', 'obj', ',', 'pretty', '=', 'False', ')', ':', 'if', 'pretty', ':', 'params', '=', 'dict', '(', 'sort_keys', '=', 'True', ',', 'indent', '=', '2', ',', 'allow_nan', '=', 'False', ',', 'separators', '=', '(', '","', ',', '": "', ')', ',', 'ensure_ascii', '=', 'False', ')', 'else', ':', 'params', '=', 'dict', '(', 'sort_keys', '=', 'False', ',', 'indent', '=', 'None', ',', 'allow_nan', '=', 'False', ',', 'separators', '=', '(', '","', ',', '":"', ')', ',', 'ensure_ascii', '=', 'False', ')', 'try', ':', 'return', 'json', '.', 'dumps', '(', 'obj', ',', '*', '*', 'params', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', 'as', 'error', ':', 'LOGGER', '.', 'critical', '(', '"The memote result structure is incompatible with the JSON "', '"standard."', ')', 'log_json_incompatible_types', '(', 'obj', ')', 'raise_with_traceback', '(', 'error', ')'] | Turn a nested object into a (compressed) JSON string.
Parameters
----------
obj : dict
Any kind of dictionary structure.
pretty : bool, optional
Whether to format the resulting JSON in a more legible way (
default False). | ['Turn', 'a', 'nested', 'object', 'into', 'a', '(', 'compressed', ')', 'JSON', 'string', '.'] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/utils.py#L225-L251 |
7,267 | saltstack/salt | salt/cloud/clouds/msazure.py | avail_images | def avail_images(conn=None, call=None):
'''
List available images for Azure
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not conn:
conn = get_conn()
ret = {}
for item in conn.list_os_images():
ret[item.name] = object_to_dict(item)
for item in conn.list_vm_images():
ret[item.name] = object_to_dict(item)
return ret | python | def avail_images(conn=None, call=None):
'''
List available images for Azure
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not conn:
conn = get_conn()
ret = {}
for item in conn.list_os_images():
ret[item.name] = object_to_dict(item)
for item in conn.list_vm_images():
ret[item.name] = object_to_dict(item)
return ret | ['def', 'avail_images', '(', 'conn', '=', 'None', ',', 'call', '=', 'None', ')', ':', 'if', 'call', '==', "'action'", ':', 'raise', 'SaltCloudSystemExit', '(', "'The avail_images function must be called with '", "'-f or --function, or with the --list-images option'", ')', 'if', 'not', 'conn', ':', 'conn', '=', 'get_conn', '(', ')', 'ret', '=', '{', '}', 'for', 'item', 'in', 'conn', '.', 'list_os_images', '(', ')', ':', 'ret', '[', 'item', '.', 'name', ']', '=', 'object_to_dict', '(', 'item', ')', 'for', 'item', 'in', 'conn', '.', 'list_vm_images', '(', ')', ':', 'ret', '[', 'item', '.', 'name', ']', '=', 'object_to_dict', '(', 'item', ')', 'return', 'ret'] | List available images for Azure | ['List', 'available', 'images', 'for', 'Azure'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/msazure.py#L177-L195 |
7,268 | casastorta/python-sar | sar/parser.py | Parser.__split_info | def __split_info(self, info_part, patternsname, patterns):
"""
Splits info from SAR parts into logical stuff :-)
:param info_part: Part of SAR output we want to split into usable data
:param patternsname: ???
:param patterns: ???
:return: ``List``-style info from SAR files, now finally \
completely parsed into meaningful data for further processing
"""
pattern = patterns['PATTERN']
if pattern == '':
return False
return_dict = {}
pattern_re = re.compile(pattern)
for part_line in info_part.split('\n'):
if part_line.strip() != '' and not pattern_re.search(part_line):
# Take care of AM/PM timestamps in SAR file
is_24hr = True
is_AM = False
if part_line[9:11] == 'AM':
is_24hr = False
is_AM = True
elif part_line[9:11] == 'PM':
is_24hr = False
is_AM = False
if is_24hr is False:
part_line = ('%s_%s XX %s' % (part_line[:8], part_line[9:11], part_line[12:]))
# Line is not empty, nor it's header.
# let's hit the road Jack!
elems = part_line.split()
full_time = elems[0].strip()
if full_time != "Average:":
# Convert time to 24hr format if needed
if is_24hr is False:
full_time = full_time[:-3]
# 12 is a bitch in AM/PM notation
if full_time[:2] == '12':
if is_AM is True:
full_time = ('%s:%s' % ('00', full_time[3:]))
is_AM = not is_AM
if is_AM is False and full_time[0:2] != '00':
hours = int(full_time[:2]) + 12
hours = ('%02d' % (hours,))
full_time = ('%s:%s' % (hours, full_time[3:]))
try:
blah = return_dict[full_time]
del blah
except KeyError:
return_dict[full_time] = {}
fields = self.__fields[patternsname]
pairs = patterns["PAIRS"]
for sectionname in pairs.iterkeys():
value = elems[fields[pairs[sectionname]]]
if sectionname == 'membuffer' or \
sectionname == 'memcache' or \
sectionname == 'memfree' or \
sectionname == 'memused' or \
sectionname == 'swapfree' or \
sectionname == 'swapused':
value = int(value)
else:
value = float(value)
if patternsname == 'CPU':
cpuid = elems[(1 if is_24hr is True else 2)]
try:
blah = return_dict[full_time][cpuid]
del blah
except KeyError:
return_dict[full_time][cpuid] = {}
return_dict[full_time][cpuid][sectionname] = \
value
else:
return_dict[full_time][sectionname] = value
return return_dict | python | def __split_info(self, info_part, patternsname, patterns):
"""
Splits info from SAR parts into logical stuff :-)
:param info_part: Part of SAR output we want to split into usable data
:param patternsname: ???
:param patterns: ???
:return: ``List``-style info from SAR files, now finally \
completely parsed into meaningful data for further processing
"""
pattern = patterns['PATTERN']
if pattern == '':
return False
return_dict = {}
pattern_re = re.compile(pattern)
for part_line in info_part.split('\n'):
if part_line.strip() != '' and not pattern_re.search(part_line):
# Take care of AM/PM timestamps in SAR file
is_24hr = True
is_AM = False
if part_line[9:11] == 'AM':
is_24hr = False
is_AM = True
elif part_line[9:11] == 'PM':
is_24hr = False
is_AM = False
if is_24hr is False:
part_line = ('%s_%s XX %s' % (part_line[:8], part_line[9:11], part_line[12:]))
# Line is not empty, nor it's header.
# let's hit the road Jack!
elems = part_line.split()
full_time = elems[0].strip()
if full_time != "Average:":
# Convert time to 24hr format if needed
if is_24hr is False:
full_time = full_time[:-3]
# 12 is a bitch in AM/PM notation
if full_time[:2] == '12':
if is_AM is True:
full_time = ('%s:%s' % ('00', full_time[3:]))
is_AM = not is_AM
if is_AM is False and full_time[0:2] != '00':
hours = int(full_time[:2]) + 12
hours = ('%02d' % (hours,))
full_time = ('%s:%s' % (hours, full_time[3:]))
try:
blah = return_dict[full_time]
del blah
except KeyError:
return_dict[full_time] = {}
fields = self.__fields[patternsname]
pairs = patterns["PAIRS"]
for sectionname in pairs.iterkeys():
value = elems[fields[pairs[sectionname]]]
if sectionname == 'membuffer' or \
sectionname == 'memcache' or \
sectionname == 'memfree' or \
sectionname == 'memused' or \
sectionname == 'swapfree' or \
sectionname == 'swapused':
value = int(value)
else:
value = float(value)
if patternsname == 'CPU':
cpuid = elems[(1 if is_24hr is True else 2)]
try:
blah = return_dict[full_time][cpuid]
del blah
except KeyError:
return_dict[full_time][cpuid] = {}
return_dict[full_time][cpuid][sectionname] = \
value
else:
return_dict[full_time][sectionname] = value
return return_dict | ['def', '__split_info', '(', 'self', ',', 'info_part', ',', 'patternsname', ',', 'patterns', ')', ':', 'pattern', '=', 'patterns', '[', "'PATTERN'", ']', 'if', 'pattern', '==', "''", ':', 'return', 'False', 'return_dict', '=', '{', '}', 'pattern_re', '=', 're', '.', 'compile', '(', 'pattern', ')', 'for', 'part_line', 'in', 'info_part', '.', 'split', '(', "'\\n'", ')', ':', 'if', 'part_line', '.', 'strip', '(', ')', '!=', "''", 'and', 'not', 'pattern_re', '.', 'search', '(', 'part_line', ')', ':', '# Take care of AM/PM timestamps in SAR file', 'is_24hr', '=', 'True', 'is_AM', '=', 'False', 'if', 'part_line', '[', '9', ':', '11', ']', '==', "'AM'", ':', 'is_24hr', '=', 'False', 'is_AM', '=', 'True', 'elif', 'part_line', '[', '9', ':', '11', ']', '==', "'PM'", ':', 'is_24hr', '=', 'False', 'is_AM', '=', 'False', 'if', 'is_24hr', 'is', 'False', ':', 'part_line', '=', '(', "'%s_%s XX %s'", '%', '(', 'part_line', '[', ':', '8', ']', ',', 'part_line', '[', '9', ':', '11', ']', ',', 'part_line', '[', '12', ':', ']', ')', ')', "# Line is not empty, nor it's header.", "# let's hit the road Jack!", 'elems', '=', 'part_line', '.', 'split', '(', ')', 'full_time', '=', 'elems', '[', '0', ']', '.', 'strip', '(', ')', 'if', 'full_time', '!=', '"Average:"', ':', '# Convert time to 24hr format if needed', 'if', 'is_24hr', 'is', 'False', ':', 'full_time', '=', 'full_time', '[', ':', '-', '3', ']', '# 12 is a bitch in AM/PM notation', 'if', 'full_time', '[', ':', '2', ']', '==', "'12'", ':', 'if', 'is_AM', 'is', 'True', ':', 'full_time', '=', '(', "'%s:%s'", '%', '(', "'00'", ',', 'full_time', '[', '3', ':', ']', ')', ')', 'is_AM', '=', 'not', 'is_AM', 'if', 'is_AM', 'is', 'False', 'and', 'full_time', '[', '0', ':', '2', ']', '!=', "'00'", ':', 'hours', '=', 'int', '(', 'full_time', '[', ':', '2', ']', ')', '+', '12', 'hours', '=', '(', "'%02d'", '%', '(', 'hours', ',', ')', ')', 'full_time', '=', '(', "'%s:%s'", '%', '(', 'hours', ',', 'full_time', '[', '3', ':', ']', ')', ')', 'try', ':', 'blah', '=', 'return_dict', '[', 'full_time', ']', 'del', 'blah', 'except', 'KeyError', ':', 'return_dict', '[', 'full_time', ']', '=', '{', '}', 'fields', '=', 'self', '.', '__fields', '[', 'patternsname', ']', 'pairs', '=', 'patterns', '[', '"PAIRS"', ']', 'for', 'sectionname', 'in', 'pairs', '.', 'iterkeys', '(', ')', ':', 'value', '=', 'elems', '[', 'fields', '[', 'pairs', '[', 'sectionname', ']', ']', ']', 'if', 'sectionname', '==', "'membuffer'", 'or', 'sectionname', '==', "'memcache'", 'or', 'sectionname', '==', "'memfree'", 'or', 'sectionname', '==', "'memused'", 'or', 'sectionname', '==', "'swapfree'", 'or', 'sectionname', '==', "'swapused'", ':', 'value', '=', 'int', '(', 'value', ')', 'else', ':', 'value', '=', 'float', '(', 'value', ')', 'if', 'patternsname', '==', "'CPU'", ':', 'cpuid', '=', 'elems', '[', '(', '1', 'if', 'is_24hr', 'is', 'True', 'else', '2', ')', ']', 'try', ':', 'blah', '=', 'return_dict', '[', 'full_time', ']', '[', 'cpuid', ']', 'del', 'blah', 'except', 'KeyError', ':', 'return_dict', '[', 'full_time', ']', '[', 'cpuid', ']', '=', '{', '}', 'return_dict', '[', 'full_time', ']', '[', 'cpuid', ']', '[', 'sectionname', ']', '=', 'value', 'else', ':', 'return_dict', '[', 'full_time', ']', '[', 'sectionname', ']', '=', 'value', 'return', 'return_dict'] | Splits info from SAR parts into logical stuff :-)
:param info_part: Part of SAR output we want to split into usable data
:param patternsname: ???
:param patterns: ???
:return: ``List``-style info from SAR files, now finally \
completely parsed into meaningful data for further processing | ['Splits', 'info', 'from', 'SAR', 'parts', 'into', 'logical', 'stuff', ':', '-', ')', ':', 'param', 'info_part', ':', 'Part', 'of', 'SAR', 'output', 'we', 'want', 'to', 'split', 'into', 'usable', 'data', ':', 'param', 'patternsname', ':', '???', ':', 'param', 'patterns', ':', '???', ':', 'return', ':', 'List', '-', 'style', 'info', 'from', 'SAR', 'files', 'now', 'finally', '\\', 'completely', 'parsed', 'into', 'meaningful', 'data', 'for', 'further', 'processing'] | train | https://github.com/casastorta/python-sar/blob/e6d8bb86524102d677f37e985302fad34e3297c1/sar/parser.py#L310-L403 |
7,269 | bitesofcode/projexui | projexui/widgets/xcommentedit.py | XCommentEdit.resizeToContents | def resizeToContents(self):
"""
Resizes this toolbar based on the contents of its text.
"""
if self._toolbar.isVisible():
doc = self.document()
h = doc.documentLayout().documentSize().height()
offset = 34
# update the attachments edit
edit = self._attachmentsEdit
if self._attachments:
edit.move(2, self.height() - edit.height() - 31)
edit.setTags(sorted(self._attachments.keys()))
edit.show()
offset = 34 + edit.height()
else:
edit.hide()
offset = 34
self.setFixedHeight(h + offset)
self._toolbar.move(2, self.height() - 32)
else:
super(XCommentEdit, self).resizeToContents() | python | def resizeToContents(self):
"""
Resizes this toolbar based on the contents of its text.
"""
if self._toolbar.isVisible():
doc = self.document()
h = doc.documentLayout().documentSize().height()
offset = 34
# update the attachments edit
edit = self._attachmentsEdit
if self._attachments:
edit.move(2, self.height() - edit.height() - 31)
edit.setTags(sorted(self._attachments.keys()))
edit.show()
offset = 34 + edit.height()
else:
edit.hide()
offset = 34
self.setFixedHeight(h + offset)
self._toolbar.move(2, self.height() - 32)
else:
super(XCommentEdit, self).resizeToContents() | ['def', 'resizeToContents', '(', 'self', ')', ':', 'if', 'self', '.', '_toolbar', '.', 'isVisible', '(', ')', ':', 'doc', '=', 'self', '.', 'document', '(', ')', 'h', '=', 'doc', '.', 'documentLayout', '(', ')', '.', 'documentSize', '(', ')', '.', 'height', '(', ')', 'offset', '=', '34', '# update the attachments edit\r', 'edit', '=', 'self', '.', '_attachmentsEdit', 'if', 'self', '.', '_attachments', ':', 'edit', '.', 'move', '(', '2', ',', 'self', '.', 'height', '(', ')', '-', 'edit', '.', 'height', '(', ')', '-', '31', ')', 'edit', '.', 'setTags', '(', 'sorted', '(', 'self', '.', '_attachments', '.', 'keys', '(', ')', ')', ')', 'edit', '.', 'show', '(', ')', 'offset', '=', '34', '+', 'edit', '.', 'height', '(', ')', 'else', ':', 'edit', '.', 'hide', '(', ')', 'offset', '=', '34', 'self', '.', 'setFixedHeight', '(', 'h', '+', 'offset', ')', 'self', '.', '_toolbar', '.', 'move', '(', '2', ',', 'self', '.', 'height', '(', ')', '-', '32', ')', 'else', ':', 'super', '(', 'XCommentEdit', ',', 'self', ')', '.', 'resizeToContents', '(', ')'] | Resizes this toolbar based on the contents of its text. | ['Resizes', 'this', 'toolbar', 'based', 'on', 'the', 'contents', 'of', 'its', 'text', '.'] | train | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xcommentedit.py#L183-L208 |
7,270 | pip-services3-python/pip-services3-commons-python | pip_services3_commons/data/AnyValueMap.py | AnyValueMap.get_as_boolean_with_default | def get_as_boolean_with_default(self, key, default_value):
"""
Converts map element into a boolean or returns default value if conversion is not possible.
:param key: an index of element to get.
:param default_value: the default value
:return: boolean value ot the element or default value if conversion is not supported.
"""
value = self.get(key)
return BooleanConverter.to_boolean_with_default(value, default_value) | python | def get_as_boolean_with_default(self, key, default_value):
"""
Converts map element into a boolean or returns default value if conversion is not possible.
:param key: an index of element to get.
:param default_value: the default value
:return: boolean value ot the element or default value if conversion is not supported.
"""
value = self.get(key)
return BooleanConverter.to_boolean_with_default(value, default_value) | ['def', 'get_as_boolean_with_default', '(', 'self', ',', 'key', ',', 'default_value', ')', ':', 'value', '=', 'self', '.', 'get', '(', 'key', ')', 'return', 'BooleanConverter', '.', 'to_boolean_with_default', '(', 'value', ',', 'default_value', ')'] | Converts map element into a boolean or returns default value if conversion is not possible.
:param key: an index of element to get.
:param default_value: the default value
:return: boolean value ot the element or default value if conversion is not supported. | ['Converts', 'map', 'element', 'into', 'a', 'boolean', 'or', 'returns', 'default', 'value', 'if', 'conversion', 'is', 'not', 'possible', '.'] | train | https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/data/AnyValueMap.py#L221-L232 |
7,271 | m32/endesive | endesive/pdf/fpdf/fpdf.py | FPDF.set_link | def set_link(self, link,y=0,page=-1):
"Set destination of internal link"
if(y==-1):
y=self.y
if(page==-1):
page=self.page
self.links[link]=[page,y] | python | def set_link(self, link,y=0,page=-1):
"Set destination of internal link"
if(y==-1):
y=self.y
if(page==-1):
page=self.page
self.links[link]=[page,y] | ['def', 'set_link', '(', 'self', ',', 'link', ',', 'y', '=', '0', ',', 'page', '=', '-', '1', ')', ':', 'if', '(', 'y', '==', '-', '1', ')', ':', 'y', '=', 'self', '.', 'y', 'if', '(', 'page', '==', '-', '1', ')', ':', 'page', '=', 'self', '.', 'page', 'self', '.', 'links', '[', 'link', ']', '=', '[', 'page', ',', 'y', ']'] | Set destination of internal link | ['Set', 'destination', 'of', 'internal', 'link'] | train | https://github.com/m32/endesive/blob/973091dc69847fe2df594c80ac9235a8d08460ff/endesive/pdf/fpdf/fpdf.py#L629-L635 |
7,272 | bitesofcode/projexui | projexui/widgets/xloggerwidget/xloggerwidget.py | XLoggerWidget.color | def color(self, key):
"""
Returns the color value for the given key for this console.
:param key | <unicode>
:return <QtGui.QColor>
"""
if type(key) == int:
key = self.LoggingMap.get(key, ('NotSet', ''))[0]
name = nativestring(key).capitalize()
return self._colorSet.color(name) | python | def color(self, key):
"""
Returns the color value for the given key for this console.
:param key | <unicode>
:return <QtGui.QColor>
"""
if type(key) == int:
key = self.LoggingMap.get(key, ('NotSet', ''))[0]
name = nativestring(key).capitalize()
return self._colorSet.color(name) | ['def', 'color', '(', 'self', ',', 'key', ')', ':', 'if', 'type', '(', 'key', ')', '==', 'int', ':', 'key', '=', 'self', '.', 'LoggingMap', '.', 'get', '(', 'key', ',', '(', "'NotSet'", ',', "''", ')', ')', '[', '0', ']', 'name', '=', 'nativestring', '(', 'key', ')', '.', 'capitalize', '(', ')', 'return', 'self', '.', '_colorSet', '.', 'color', '(', 'name', ')'] | Returns the color value for the given key for this console.
:param key | <unicode>
:return <QtGui.QColor> | ['Returns', 'the', 'color', 'value', 'for', 'the', 'given', 'key', 'for', 'this', 'console', '.', ':', 'param', 'key', '|', '<unicode', '>', ':', 'return', '<QtGui', '.', 'QColor', '>'] | train | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xloggerwidget/xloggerwidget.py#L185-L196 |
7,273 | astooke/gtimer | gtimer/public/io.py | get_times | def get_times():
"""
Produce a deepcopy of the current timing data (no risk of interference
with active timing or other operaitons).
Returns:
Times: gtimer timing data structure object.
"""
if f.root.stopped:
return copy.deepcopy(f.root.times)
else:
t = timer()
times = collapse.collapse_times()
f.root.self_cut += timer() - t
return times | python | def get_times():
"""
Produce a deepcopy of the current timing data (no risk of interference
with active timing or other operaitons).
Returns:
Times: gtimer timing data structure object.
"""
if f.root.stopped:
return copy.deepcopy(f.root.times)
else:
t = timer()
times = collapse.collapse_times()
f.root.self_cut += timer() - t
return times | ['def', 'get_times', '(', ')', ':', 'if', 'f', '.', 'root', '.', 'stopped', ':', 'return', 'copy', '.', 'deepcopy', '(', 'f', '.', 'root', '.', 'times', ')', 'else', ':', 't', '=', 'timer', '(', ')', 'times', '=', 'collapse', '.', 'collapse_times', '(', ')', 'f', '.', 'root', '.', 'self_cut', '+=', 'timer', '(', ')', '-', 't', 'return', 'times'] | Produce a deepcopy of the current timing data (no risk of interference
with active timing or other operaitons).
Returns:
Times: gtimer timing data structure object. | ['Produce', 'a', 'deepcopy', 'of', 'the', 'current', 'timing', 'data', '(', 'no', 'risk', 'of', 'interference', 'with', 'active', 'timing', 'or', 'other', 'operaitons', ')', '.'] | train | https://github.com/astooke/gtimer/blob/2146dab459e5d959feb291821733d3d3ba7c523c/gtimer/public/io.py#L25-L39 |
7,274 | BerkeleyAutomation/perception | perception/phoxi_sensor.py | PhoXiSensor.frames | def frames(self):
"""Retrieve a new frame from the PhoXi and convert it to a ColorImage,
a DepthImage, and an IrImage.
Returns
-------
:obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray`
The ColorImage, DepthImage, and IrImage of the current frame.
"""
# Run a software trigger
times = []
rospy.ServiceProxy('phoxi_camera/start_acquisition', Empty)()
rospy.ServiceProxy('phoxi_camera/trigger_image', TriggerImage)()
self._cur_color_im = None
self._cur_depth_im = None
self._cur_normal_map = None
rospy.ServiceProxy('phoxi_camera/get_frame', GetFrame)(-1)
max_time = 5.0
time_waiting = 0.0
while self._cur_color_im is None or self._cur_depth_im is None or self._cur_normal_map is None:
time.sleep(0.05)
time_waiting += 0.05
if time_waiting > max_time:
raise SensorUnresponsiveException('PhoXi sensor seems to be non-responsive')
return self._cur_color_im, self._cur_depth_im, None | python | def frames(self):
"""Retrieve a new frame from the PhoXi and convert it to a ColorImage,
a DepthImage, and an IrImage.
Returns
-------
:obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray`
The ColorImage, DepthImage, and IrImage of the current frame.
"""
# Run a software trigger
times = []
rospy.ServiceProxy('phoxi_camera/start_acquisition', Empty)()
rospy.ServiceProxy('phoxi_camera/trigger_image', TriggerImage)()
self._cur_color_im = None
self._cur_depth_im = None
self._cur_normal_map = None
rospy.ServiceProxy('phoxi_camera/get_frame', GetFrame)(-1)
max_time = 5.0
time_waiting = 0.0
while self._cur_color_im is None or self._cur_depth_im is None or self._cur_normal_map is None:
time.sleep(0.05)
time_waiting += 0.05
if time_waiting > max_time:
raise SensorUnresponsiveException('PhoXi sensor seems to be non-responsive')
return self._cur_color_im, self._cur_depth_im, None | ['def', 'frames', '(', 'self', ')', ':', '# Run a software trigger', 'times', '=', '[', ']', 'rospy', '.', 'ServiceProxy', '(', "'phoxi_camera/start_acquisition'", ',', 'Empty', ')', '(', ')', 'rospy', '.', 'ServiceProxy', '(', "'phoxi_camera/trigger_image'", ',', 'TriggerImage', ')', '(', ')', 'self', '.', '_cur_color_im', '=', 'None', 'self', '.', '_cur_depth_im', '=', 'None', 'self', '.', '_cur_normal_map', '=', 'None', 'rospy', '.', 'ServiceProxy', '(', "'phoxi_camera/get_frame'", ',', 'GetFrame', ')', '(', '-', '1', ')', 'max_time', '=', '5.0', 'time_waiting', '=', '0.0', 'while', 'self', '.', '_cur_color_im', 'is', 'None', 'or', 'self', '.', '_cur_depth_im', 'is', 'None', 'or', 'self', '.', '_cur_normal_map', 'is', 'None', ':', 'time', '.', 'sleep', '(', '0.05', ')', 'time_waiting', '+=', '0.05', 'if', 'time_waiting', '>', 'max_time', ':', 'raise', 'SensorUnresponsiveException', '(', "'PhoXi sensor seems to be non-responsive'", ')', 'return', 'self', '.', '_cur_color_im', ',', 'self', '.', '_cur_depth_im', ',', 'None'] | Retrieve a new frame from the PhoXi and convert it to a ColorImage,
a DepthImage, and an IrImage.
Returns
-------
:obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray`
The ColorImage, DepthImage, and IrImage of the current frame. | ['Retrieve', 'a', 'new', 'frame', 'from', 'the', 'PhoXi', 'and', 'convert', 'it', 'to', 'a', 'ColorImage', 'a', 'DepthImage', 'and', 'an', 'IrImage', '.'] | train | https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/phoxi_sensor.py#L148-L176 |
7,275 | faucamp/python-gsmmodem | gsmmodem/modem.py | IncomingCall.answer | def answer(self):
""" Answer the phone call.
:return: self (for chaining method calls)
"""
if self.ringing:
self._gsmModem.write('ATA')
self.ringing = False
self.answered = True
return self | python | def answer(self):
""" Answer the phone call.
:return: self (for chaining method calls)
"""
if self.ringing:
self._gsmModem.write('ATA')
self.ringing = False
self.answered = True
return self | ['def', 'answer', '(', 'self', ')', ':', 'if', 'self', '.', 'ringing', ':', 'self', '.', '_gsmModem', '.', 'write', '(', "'ATA'", ')', 'self', '.', 'ringing', '=', 'False', 'self', '.', 'answered', '=', 'True', 'return', 'self'] | Answer the phone call.
:return: self (for chaining method calls) | ['Answer', 'the', 'phone', 'call', '.', ':', 'return', ':', 'self', '(', 'for', 'chaining', 'method', 'calls', ')'] | train | https://github.com/faucamp/python-gsmmodem/blob/834c68b1387ca2c91e2210faa8f75526b39723b5/gsmmodem/modem.py#L1308-L1316 |
7,276 | walkr/nanoservice | nanoservice/core.py | Endpoint.encode | def encode(self, payload):
""" Encode payload """
try:
return self.encoder.encode(payload)
except Exception as exception:
raise EncodeError(str(exception)) | python | def encode(self, payload):
""" Encode payload """
try:
return self.encoder.encode(payload)
except Exception as exception:
raise EncodeError(str(exception)) | ['def', 'encode', '(', 'self', ',', 'payload', ')', ':', 'try', ':', 'return', 'self', '.', 'encoder', '.', 'encode', '(', 'payload', ')', 'except', 'Exception', 'as', 'exception', ':', 'raise', 'EncodeError', '(', 'str', '(', 'exception', ')', ')'] | Encode payload | ['Encode', 'payload'] | train | https://github.com/walkr/nanoservice/blob/e2098986b1baa5f283167ae487d14f3c6c21961a/nanoservice/core.py#L138-L143 |
7,277 | eaton-lab/toytree | toytree/etemini.py | TreeNode.get_farthest_node | def get_farthest_node(self, topology_only=False):
"""
Returns the node's farthest descendant or ancestor node, and the
distance to it.
:argument False topology_only: If set to True, distance
between nodes will be referred to the number of nodes
between them. In other words, topological distance will be
used instead of branch length distances.
:return: A tuple containing the farthest node referred to the
current node and the distance to it.
"""
# Init fasthest node to current farthest leaf
farthest_node, farthest_dist = self.get_farthest_leaf(
topology_only=topology_only)
prev = self
cdist = 0.0 if topology_only else prev.dist
current = prev.up
while current is not None:
for ch in current.children:
if ch != prev:
if not ch.is_leaf():
fnode, fdist = ch.get_farthest_leaf(
topology_only=topology_only)
else:
fnode = ch
fdist = 0
if topology_only:
fdist += 1.0
else:
fdist += ch.dist
if cdist+fdist > farthest_dist:
farthest_dist = cdist + fdist
farthest_node = fnode
prev = current
if topology_only:
cdist += 1
else:
cdist += prev.dist
current = prev.up
return farthest_node, farthest_dist | python | def get_farthest_node(self, topology_only=False):
"""
Returns the node's farthest descendant or ancestor node, and the
distance to it.
:argument False topology_only: If set to True, distance
between nodes will be referred to the number of nodes
between them. In other words, topological distance will be
used instead of branch length distances.
:return: A tuple containing the farthest node referred to the
current node and the distance to it.
"""
# Init fasthest node to current farthest leaf
farthest_node, farthest_dist = self.get_farthest_leaf(
topology_only=topology_only)
prev = self
cdist = 0.0 if topology_only else prev.dist
current = prev.up
while current is not None:
for ch in current.children:
if ch != prev:
if not ch.is_leaf():
fnode, fdist = ch.get_farthest_leaf(
topology_only=topology_only)
else:
fnode = ch
fdist = 0
if topology_only:
fdist += 1.0
else:
fdist += ch.dist
if cdist+fdist > farthest_dist:
farthest_dist = cdist + fdist
farthest_node = fnode
prev = current
if topology_only:
cdist += 1
else:
cdist += prev.dist
current = prev.up
return farthest_node, farthest_dist | ['def', 'get_farthest_node', '(', 'self', ',', 'topology_only', '=', 'False', ')', ':', '# Init fasthest node to current farthest leaf', 'farthest_node', ',', 'farthest_dist', '=', 'self', '.', 'get_farthest_leaf', '(', 'topology_only', '=', 'topology_only', ')', 'prev', '=', 'self', 'cdist', '=', '0.0', 'if', 'topology_only', 'else', 'prev', '.', 'dist', 'current', '=', 'prev', '.', 'up', 'while', 'current', 'is', 'not', 'None', ':', 'for', 'ch', 'in', 'current', '.', 'children', ':', 'if', 'ch', '!=', 'prev', ':', 'if', 'not', 'ch', '.', 'is_leaf', '(', ')', ':', 'fnode', ',', 'fdist', '=', 'ch', '.', 'get_farthest_leaf', '(', 'topology_only', '=', 'topology_only', ')', 'else', ':', 'fnode', '=', 'ch', 'fdist', '=', '0', 'if', 'topology_only', ':', 'fdist', '+=', '1.0', 'else', ':', 'fdist', '+=', 'ch', '.', 'dist', 'if', 'cdist', '+', 'fdist', '>', 'farthest_dist', ':', 'farthest_dist', '=', 'cdist', '+', 'fdist', 'farthest_node', '=', 'fnode', 'prev', '=', 'current', 'if', 'topology_only', ':', 'cdist', '+=', '1', 'else', ':', 'cdist', '+=', 'prev', '.', 'dist', 'current', '=', 'prev', '.', 'up', 'return', 'farthest_node', ',', 'farthest_dist'] | Returns the node's farthest descendant or ancestor node, and the
distance to it.
:argument False topology_only: If set to True, distance
between nodes will be referred to the number of nodes
between them. In other words, topological distance will be
used instead of branch length distances.
:return: A tuple containing the farthest node referred to the
current node and the distance to it. | ['Returns', 'the', 'node', 's', 'farthest', 'descendant', 'or', 'ancestor', 'node', 'and', 'the', 'distance', 'to', 'it', '.'] | train | https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/toytree/etemini.py#L926-L969 |
7,278 | PythonOptimizers/cygenja | cygenja/treemap/location_descriptor.py | LocationDescriptor.get_locations_list | def get_locations_list(self, lower_bound=0, upper_bound=None):
"""
Return the internal location list.
Args:
lower_bound:
upper_bound:
Returns:
"""
real_upper_bound = upper_bound
if upper_bound is None:
real_upper_bound = self.nbr_of_sub_locations()
try:
return self._locations_list[lower_bound:real_upper_bound]
except:
return list() | python | def get_locations_list(self, lower_bound=0, upper_bound=None):
"""
Return the internal location list.
Args:
lower_bound:
upper_bound:
Returns:
"""
real_upper_bound = upper_bound
if upper_bound is None:
real_upper_bound = self.nbr_of_sub_locations()
try:
return self._locations_list[lower_bound:real_upper_bound]
except:
return list() | ['def', 'get_locations_list', '(', 'self', ',', 'lower_bound', '=', '0', ',', 'upper_bound', '=', 'None', ')', ':', 'real_upper_bound', '=', 'upper_bound', 'if', 'upper_bound', 'is', 'None', ':', 'real_upper_bound', '=', 'self', '.', 'nbr_of_sub_locations', '(', ')', 'try', ':', 'return', 'self', '.', '_locations_list', '[', 'lower_bound', ':', 'real_upper_bound', ']', 'except', ':', 'return', 'list', '(', ')'] | Return the internal location list.
Args:
lower_bound:
upper_bound:
Returns: | ['Return', 'the', 'internal', 'location', 'list', '.'] | train | https://github.com/PythonOptimizers/cygenja/blob/a9ef91cdfa8452beeeec4f050f928b830379f91c/cygenja/treemap/location_descriptor.py#L132-L149 |
7,279 | ansible/ansible-container | container/templates/wait_on_host.py | wait_on_hosts | def wait_on_hosts(hosts, max_attempts=3, sleep_time=1):
'''
Wait for a container to have a State.Running value = true.
:param hosts: list of service names taken from container.yml
:param max_attempts: Max number of times to inspect the container and check State.Running
:param sleep_time: Number of seconds to wait between attempts.
:return: dict of host:running pairs
'''
results = {}
for host in hosts:
container = "ansible_{}_1".format(host)
tries = max_attempts
host_ready = False
output = None
results[host] = False
while tries > 0 and not host_ready:
try:
output = subprocess.check_output(["docker", "inspect", "--format", "{{ .State.Running }}",
container], stderr=STDOUT)
except CalledProcessError:
pass
tries -= 1
if output and 'true' in output:
host_ready = True
results[host] = True
else:
sleep(sleep_time)
return results | python | def wait_on_hosts(hosts, max_attempts=3, sleep_time=1):
'''
Wait for a container to have a State.Running value = true.
:param hosts: list of service names taken from container.yml
:param max_attempts: Max number of times to inspect the container and check State.Running
:param sleep_time: Number of seconds to wait between attempts.
:return: dict of host:running pairs
'''
results = {}
for host in hosts:
container = "ansible_{}_1".format(host)
tries = max_attempts
host_ready = False
output = None
results[host] = False
while tries > 0 and not host_ready:
try:
output = subprocess.check_output(["docker", "inspect", "--format", "{{ .State.Running }}",
container], stderr=STDOUT)
except CalledProcessError:
pass
tries -= 1
if output and 'true' in output:
host_ready = True
results[host] = True
else:
sleep(sleep_time)
return results | ['def', 'wait_on_hosts', '(', 'hosts', ',', 'max_attempts', '=', '3', ',', 'sleep_time', '=', '1', ')', ':', 'results', '=', '{', '}', 'for', 'host', 'in', 'hosts', ':', 'container', '=', '"ansible_{}_1"', '.', 'format', '(', 'host', ')', 'tries', '=', 'max_attempts', 'host_ready', '=', 'False', 'output', '=', 'None', 'results', '[', 'host', ']', '=', 'False', 'while', 'tries', '>', '0', 'and', 'not', 'host_ready', ':', 'try', ':', 'output', '=', 'subprocess', '.', 'check_output', '(', '[', '"docker"', ',', '"inspect"', ',', '"--format"', ',', '"{{ .State.Running }}"', ',', 'container', ']', ',', 'stderr', '=', 'STDOUT', ')', 'except', 'CalledProcessError', ':', 'pass', 'tries', '-=', '1', 'if', 'output', 'and', "'true'", 'in', 'output', ':', 'host_ready', '=', 'True', 'results', '[', 'host', ']', '=', 'True', 'else', ':', 'sleep', '(', 'sleep_time', ')', 'return', 'results'] | Wait for a container to have a State.Running value = true.
:param hosts: list of service names taken from container.yml
:param max_attempts: Max number of times to inspect the container and check State.Running
:param sleep_time: Number of seconds to wait between attempts.
:return: dict of host:running pairs | ['Wait', 'for', 'a', 'container', 'to', 'have', 'a', 'State', '.', 'Running', 'value', '=', 'true', '.', ':', 'param', 'hosts', ':', 'list', 'of', 'service', 'names', 'taken', 'from', 'container', '.', 'yml', ':', 'param', 'max_attempts', ':', 'Max', 'number', 'of', 'times', 'to', 'inspect', 'the', 'container', 'and', 'check', 'State', '.', 'Running', ':', 'param', 'sleep_time', ':', 'Number', 'of', 'seconds', 'to', 'wait', 'between', 'attempts', '.', ':', 'return', ':', 'dict', 'of', 'host', ':', 'running', 'pairs'] | train | https://github.com/ansible/ansible-container/blob/d031c1a6133d5482a5d054fcbdbecafb923f8b4b/container/templates/wait_on_host.py#L12-L39 |
7,280 | mbedmicro/pyOCD | pyocd/target/pack/flash_algo.py | PackFlashAlgo._algo_fill_zi_if_missing | def _algo_fill_zi_if_missing(self, ro_rw_zi):
"""! @brief Create an empty zi section if it is missing"""
s_ro, s_rw, s_zi = ro_rw_zi
if s_rw is None:
return ro_rw_zi
if s_zi is not None:
return ro_rw_zi
s_zi = MemoryRange(start=(s_rw.start + s_rw.length), length=0)
return s_ro, s_rw, s_zi | python | def _algo_fill_zi_if_missing(self, ro_rw_zi):
"""! @brief Create an empty zi section if it is missing"""
s_ro, s_rw, s_zi = ro_rw_zi
if s_rw is None:
return ro_rw_zi
if s_zi is not None:
return ro_rw_zi
s_zi = MemoryRange(start=(s_rw.start + s_rw.length), length=0)
return s_ro, s_rw, s_zi | ['def', '_algo_fill_zi_if_missing', '(', 'self', ',', 'ro_rw_zi', ')', ':', 's_ro', ',', 's_rw', ',', 's_zi', '=', 'ro_rw_zi', 'if', 's_rw', 'is', 'None', ':', 'return', 'ro_rw_zi', 'if', 's_zi', 'is', 'not', 'None', ':', 'return', 'ro_rw_zi', 's_zi', '=', 'MemoryRange', '(', 'start', '=', '(', 's_rw', '.', 'start', '+', 's_rw', '.', 'length', ')', ',', 'length', '=', '0', ')', 'return', 's_ro', ',', 's_rw', ',', 's_zi'] | ! @brief Create an empty zi section if it is missing | ['!'] | train | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/target/pack/flash_algo.py#L205-L213 |
7,281 | gem/oq-engine | openquake/baselib/general.py | assert_independent | def assert_independent(package, *packages):
"""
:param package: Python name of a module/package
:param packages: Python names of modules/packages
Make sure the `package` does not depend from the `packages`.
"""
assert packages, 'At least one package must be specified'
import_package = 'from openquake.baselib.general import import_all\n' \
'print(import_all("%s"))' % package
imported_modules = run_in_process(import_package)
for mod in imported_modules:
for pkg in packages:
if mod.startswith(pkg):
raise CodeDependencyError('%s depends on %s' % (package, pkg)) | python | def assert_independent(package, *packages):
"""
:param package: Python name of a module/package
:param packages: Python names of modules/packages
Make sure the `package` does not depend from the `packages`.
"""
assert packages, 'At least one package must be specified'
import_package = 'from openquake.baselib.general import import_all\n' \
'print(import_all("%s"))' % package
imported_modules = run_in_process(import_package)
for mod in imported_modules:
for pkg in packages:
if mod.startswith(pkg):
raise CodeDependencyError('%s depends on %s' % (package, pkg)) | ['def', 'assert_independent', '(', 'package', ',', '*', 'packages', ')', ':', 'assert', 'packages', ',', "'At least one package must be specified'", 'import_package', '=', "'from openquake.baselib.general import import_all\\n'", '\'print(import_all("%s"))\'', '%', 'package', 'imported_modules', '=', 'run_in_process', '(', 'import_package', ')', 'for', 'mod', 'in', 'imported_modules', ':', 'for', 'pkg', 'in', 'packages', ':', 'if', 'mod', '.', 'startswith', '(', 'pkg', ')', ':', 'raise', 'CodeDependencyError', '(', "'%s depends on %s'", '%', '(', 'package', ',', 'pkg', ')', ')'] | :param package: Python name of a module/package
:param packages: Python names of modules/packages
Make sure the `package` does not depend from the `packages`. | [':', 'param', 'package', ':', 'Python', 'name', 'of', 'a', 'module', '/', 'package', ':', 'param', 'packages', ':', 'Python', 'names', 'of', 'modules', '/', 'packages'] | train | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/baselib/general.py#L475-L489 |
7,282 | MarcMeszaros/envitro | envitro/core.py | int | def int(name, default=None, allow_none=False, fallback=None):
"""Get a string environment value or the default.
Args:
name: The environment variable name
default: The default value to use if no environment variable is found
allow_none: If the return value can be `None` (i.e. optional)
"""
value = read(name, default, allow_none, fallback=fallback)
if isinstance(value, builtins.str):
value = value.strip()
if value is None and allow_none:
return None
else:
return builtins.int(value) | python | def int(name, default=None, allow_none=False, fallback=None):
"""Get a string environment value or the default.
Args:
name: The environment variable name
default: The default value to use if no environment variable is found
allow_none: If the return value can be `None` (i.e. optional)
"""
value = read(name, default, allow_none, fallback=fallback)
if isinstance(value, builtins.str):
value = value.strip()
if value is None and allow_none:
return None
else:
return builtins.int(value) | ['def', 'int', '(', 'name', ',', 'default', '=', 'None', ',', 'allow_none', '=', 'False', ',', 'fallback', '=', 'None', ')', ':', 'value', '=', 'read', '(', 'name', ',', 'default', ',', 'allow_none', ',', 'fallback', '=', 'fallback', ')', 'if', 'isinstance', '(', 'value', ',', 'builtins', '.', 'str', ')', ':', 'value', '=', 'value', '.', 'strip', '(', ')', 'if', 'value', 'is', 'None', 'and', 'allow_none', ':', 'return', 'None', 'else', ':', 'return', 'builtins', '.', 'int', '(', 'value', ')'] | Get a string environment value or the default.
Args:
name: The environment variable name
default: The default value to use if no environment variable is found
allow_none: If the return value can be `None` (i.e. optional) | ['Get', 'a', 'string', 'environment', 'value', 'or', 'the', 'default', '.'] | train | https://github.com/MarcMeszaros/envitro/blob/19e925cd152c08d4db8126542afed35188cafff4/envitro/core.py#L144-L159 |
7,283 | manns/pyspread | pyspread/src/lib/vlc.py | libvlc_media_list_player_set_media_list | def libvlc_media_list_player_set_media_list(p_mlp, p_mlist):
'''Set the media list associated with the player.
@param p_mlp: media list player instance.
@param p_mlist: list of media.
'''
f = _Cfunctions.get('libvlc_media_list_player_set_media_list', None) or \
_Cfunction('libvlc_media_list_player_set_media_list', ((1,), (1,),), None,
None, MediaListPlayer, MediaList)
return f(p_mlp, p_mlist) | python | def libvlc_media_list_player_set_media_list(p_mlp, p_mlist):
'''Set the media list associated with the player.
@param p_mlp: media list player instance.
@param p_mlist: list of media.
'''
f = _Cfunctions.get('libvlc_media_list_player_set_media_list', None) or \
_Cfunction('libvlc_media_list_player_set_media_list', ((1,), (1,),), None,
None, MediaListPlayer, MediaList)
return f(p_mlp, p_mlist) | ['def', 'libvlc_media_list_player_set_media_list', '(', 'p_mlp', ',', 'p_mlist', ')', ':', 'f', '=', '_Cfunctions', '.', 'get', '(', "'libvlc_media_list_player_set_media_list'", ',', 'None', ')', 'or', '_Cfunction', '(', "'libvlc_media_list_player_set_media_list'", ',', '(', '(', '1', ',', ')', ',', '(', '1', ',', ')', ',', ')', ',', 'None', ',', 'None', ',', 'MediaListPlayer', ',', 'MediaList', ')', 'return', 'f', '(', 'p_mlp', ',', 'p_mlist', ')'] | Set the media list associated with the player.
@param p_mlp: media list player instance.
@param p_mlist: list of media. | ['Set', 'the', 'media', 'list', 'associated', 'with', 'the', 'player', '.'] | train | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/vlc.py#L4820-L4828 |
7,284 | gplepage/gvar | src/gvar/linalg.py | solve | def solve(a, b):
""" Find ``x`` such that ``a @ x = b`` for matrix ``a``.
Args:
a: Two-dimensional, square matrix/array of numbers
and/or :class:`gvar.GVar`\s.
b: One-dimensional vector/array of numbers and/or
:class:`gvar.GVar`\s, or an array of such vectors.
Requires ``b.shape[0] == a.shape[1]``.
Returns:
The solution ``x`` of ``a.dot(x) = b``, which is equivalent
to ``inv(a).dot(b)``.
Raises:
ValueError: If ``a`` is not square and two-dimensional.
ValueError: If shape of ``b`` does not match that of ``a``
(that is ``b.shape[0] != a.shape[1]``).
"""
amean = gvar.mean(a)
if amean.ndim != 2 or amean.shape[0] != amean.shape[1]:
raise ValueError('bad matrix shape: ' + str(a.shape))
bmean = gvar.mean(b)
if bmean.shape[0] != a.shape[1]:
raise ValueError(
'Mismatch between shapes of a and b: {} {}'.format(a.shape, b.shape)
)
# xmean = numpy.linalg.solve(amean, bmean)
ainv = numpy.linalg.inv(amean)
xmean = ainv.dot(bmean)
return xmean + ainv.dot(b-bmean - (a-amean).dot(xmean)) | python | def solve(a, b):
""" Find ``x`` such that ``a @ x = b`` for matrix ``a``.
Args:
a: Two-dimensional, square matrix/array of numbers
and/or :class:`gvar.GVar`\s.
b: One-dimensional vector/array of numbers and/or
:class:`gvar.GVar`\s, or an array of such vectors.
Requires ``b.shape[0] == a.shape[1]``.
Returns:
The solution ``x`` of ``a.dot(x) = b``, which is equivalent
to ``inv(a).dot(b)``.
Raises:
ValueError: If ``a`` is not square and two-dimensional.
ValueError: If shape of ``b`` does not match that of ``a``
(that is ``b.shape[0] != a.shape[1]``).
"""
amean = gvar.mean(a)
if amean.ndim != 2 or amean.shape[0] != amean.shape[1]:
raise ValueError('bad matrix shape: ' + str(a.shape))
bmean = gvar.mean(b)
if bmean.shape[0] != a.shape[1]:
raise ValueError(
'Mismatch between shapes of a and b: {} {}'.format(a.shape, b.shape)
)
# xmean = numpy.linalg.solve(amean, bmean)
ainv = numpy.linalg.inv(amean)
xmean = ainv.dot(bmean)
return xmean + ainv.dot(b-bmean - (a-amean).dot(xmean)) | ['def', 'solve', '(', 'a', ',', 'b', ')', ':', 'amean', '=', 'gvar', '.', 'mean', '(', 'a', ')', 'if', 'amean', '.', 'ndim', '!=', '2', 'or', 'amean', '.', 'shape', '[', '0', ']', '!=', 'amean', '.', 'shape', '[', '1', ']', ':', 'raise', 'ValueError', '(', "'bad matrix shape: '", '+', 'str', '(', 'a', '.', 'shape', ')', ')', 'bmean', '=', 'gvar', '.', 'mean', '(', 'b', ')', 'if', 'bmean', '.', 'shape', '[', '0', ']', '!=', 'a', '.', 'shape', '[', '1', ']', ':', 'raise', 'ValueError', '(', "'Mismatch between shapes of a and b: {} {}'", '.', 'format', '(', 'a', '.', 'shape', ',', 'b', '.', 'shape', ')', ')', '# xmean = numpy.linalg.solve(amean, bmean)', 'ainv', '=', 'numpy', '.', 'linalg', '.', 'inv', '(', 'amean', ')', 'xmean', '=', 'ainv', '.', 'dot', '(', 'bmean', ')', 'return', 'xmean', '+', 'ainv', '.', 'dot', '(', 'b', '-', 'bmean', '-', '(', 'a', '-', 'amean', ')', '.', 'dot', '(', 'xmean', ')', ')'] | Find ``x`` such that ``a @ x = b`` for matrix ``a``.
Args:
a: Two-dimensional, square matrix/array of numbers
and/or :class:`gvar.GVar`\s.
b: One-dimensional vector/array of numbers and/or
:class:`gvar.GVar`\s, or an array of such vectors.
Requires ``b.shape[0] == a.shape[1]``.
Returns:
The solution ``x`` of ``a.dot(x) = b``, which is equivalent
to ``inv(a).dot(b)``.
Raises:
ValueError: If ``a`` is not square and two-dimensional.
ValueError: If shape of ``b`` does not match that of ``a``
(that is ``b.shape[0] != a.shape[1]``). | ['Find', 'x', 'such', 'that', 'a', '@', 'x', '=', 'b', 'for', 'matrix', 'a', '.'] | train | https://github.com/gplepage/gvar/blob/d6671697319eb6280de3793c9a1c2b616c6f2ae0/src/gvar/linalg.py#L316-L346 |
7,285 | wummel/linkchecker | linkcheck/i18n.py | get_translator | def get_translator (domain, directory, languages=None,
translatorklass=Translator, fallback=False,
fallbackklass=NullTranslator):
"""Search the appropriate GNUTranslations class."""
translator = gettext.translation(domain, localedir=directory,
languages=languages, class_=translatorklass, fallback=fallback)
if not isinstance(translator, gettext.GNUTranslations) and fallbackklass:
translator = fallbackklass()
return translator | python | def get_translator (domain, directory, languages=None,
translatorklass=Translator, fallback=False,
fallbackklass=NullTranslator):
"""Search the appropriate GNUTranslations class."""
translator = gettext.translation(domain, localedir=directory,
languages=languages, class_=translatorklass, fallback=fallback)
if not isinstance(translator, gettext.GNUTranslations) and fallbackklass:
translator = fallbackklass()
return translator | ['def', 'get_translator', '(', 'domain', ',', 'directory', ',', 'languages', '=', 'None', ',', 'translatorklass', '=', 'Translator', ',', 'fallback', '=', 'False', ',', 'fallbackklass', '=', 'NullTranslator', ')', ':', 'translator', '=', 'gettext', '.', 'translation', '(', 'domain', ',', 'localedir', '=', 'directory', ',', 'languages', '=', 'languages', ',', 'class_', '=', 'translatorklass', ',', 'fallback', '=', 'fallback', ')', 'if', 'not', 'isinstance', '(', 'translator', ',', 'gettext', '.', 'GNUTranslations', ')', 'and', 'fallbackklass', ':', 'translator', '=', 'fallbackklass', '(', ')', 'return', 'translator'] | Search the appropriate GNUTranslations class. | ['Search', 'the', 'appropriate', 'GNUTranslations', 'class', '.'] | train | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/i18n.py#L105-L113 |
7,286 | flatangle/flatlib | flatlib/chart.py | Chart.copy | def copy(self):
""" Returns a deep copy of this chart. """
chart = Chart.__new__(Chart)
chart.date = self.date
chart.pos = self.pos
chart.hsys = self.hsys
chart.objects = self.objects.copy()
chart.houses = self.houses.copy()
chart.angles = self.angles.copy()
return chart | python | def copy(self):
""" Returns a deep copy of this chart. """
chart = Chart.__new__(Chart)
chart.date = self.date
chart.pos = self.pos
chart.hsys = self.hsys
chart.objects = self.objects.copy()
chart.houses = self.houses.copy()
chart.angles = self.angles.copy()
return chart | ['def', 'copy', '(', 'self', ')', ':', 'chart', '=', 'Chart', '.', '__new__', '(', 'Chart', ')', 'chart', '.', 'date', '=', 'self', '.', 'date', 'chart', '.', 'pos', '=', 'self', '.', 'pos', 'chart', '.', 'hsys', '=', 'self', '.', 'hsys', 'chart', '.', 'objects', '=', 'self', '.', 'objects', '.', 'copy', '(', ')', 'chart', '.', 'houses', '=', 'self', '.', 'houses', '.', 'copy', '(', ')', 'chart', '.', 'angles', '=', 'self', '.', 'angles', '.', 'copy', '(', ')', 'return', 'chart'] | Returns a deep copy of this chart. | ['Returns', 'a', 'deep', 'copy', 'of', 'this', 'chart', '.'] | train | https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/chart.py#L56-L65 |
7,287 | StackStorm/pybind | pybind/slxos/v17s_1_02/resource_monitor/cpu/__init__.py | cpu._set_offset_cpu | def _set_offset_cpu(self, v, load=False):
"""
Setter method for offset_cpu, mapped from YANG variable /resource_monitor/cpu/offset_cpu (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_offset_cpu is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_offset_cpu() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 70']}), is_leaf=True, yang_name="offset-cpu", rest_name="thresh-offset", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Offset to CPU threshold for testing', u'hidden': u'debug', u'alt-name': u'thresh-offset', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-resource-monitor', defining_module='brocade-resource-monitor', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """offset_cpu must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 70']}), is_leaf=True, yang_name="offset-cpu", rest_name="thresh-offset", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Offset to CPU threshold for testing', u'hidden': u'debug', u'alt-name': u'thresh-offset', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-resource-monitor', defining_module='brocade-resource-monitor', yang_type='uint32', is_config=True)""",
})
self.__offset_cpu = t
if hasattr(self, '_set'):
self._set() | python | def _set_offset_cpu(self, v, load=False):
"""
Setter method for offset_cpu, mapped from YANG variable /resource_monitor/cpu/offset_cpu (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_offset_cpu is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_offset_cpu() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 70']}), is_leaf=True, yang_name="offset-cpu", rest_name="thresh-offset", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Offset to CPU threshold for testing', u'hidden': u'debug', u'alt-name': u'thresh-offset', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-resource-monitor', defining_module='brocade-resource-monitor', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """offset_cpu must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 70']}), is_leaf=True, yang_name="offset-cpu", rest_name="thresh-offset", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Offset to CPU threshold for testing', u'hidden': u'debug', u'alt-name': u'thresh-offset', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-resource-monitor', defining_module='brocade-resource-monitor', yang_type='uint32', is_config=True)""",
})
self.__offset_cpu = t
if hasattr(self, '_set'):
self._set() | ['def', '_set_offset_cpu', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'RestrictedClassType', '(', 'base_type', '=', 'RestrictedClassType', '(', 'base_type', '=', 'long', ',', 'restriction_dict', '=', '{', "'range'", ':', '[', "'0..4294967295'", ']', '}', ',', 'int_size', '=', '32', ')', ',', 'restriction_dict', '=', '{', "'range'", ':', '[', "u'0 .. 70'", ']', '}', ')', ',', 'is_leaf', '=', 'True', ',', 'yang_name', '=', '"offset-cpu"', ',', 'rest_name', '=', '"thresh-offset"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'Offset to CPU threshold for testing'", ',', "u'hidden'", ':', "u'debug'", ',', "u'alt-name'", ':', "u'thresh-offset'", ',', "u'cli-suppress-no'", ':', 'None', '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-resource-monitor'", ',', 'defining_module', '=', "'brocade-resource-monitor'", ',', 'yang_type', '=', "'uint32'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""offset_cpu must be of a type compatible with uint32"""', ',', "'defined-type'", ':', '"uint32"', ',', "'generated-type'", ':', '"""YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={\'range\': [\'0..4294967295\']}, int_size=32), restriction_dict={\'range\': [u\'0 .. 70\']}), is_leaf=True, yang_name="offset-cpu", rest_name="thresh-offset", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Offset to CPU threshold for testing\', u\'hidden\': u\'debug\', u\'alt-name\': u\'thresh-offset\', u\'cli-suppress-no\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-resource-monitor\', defining_module=\'brocade-resource-monitor\', yang_type=\'uint32\', is_config=True)"""', ',', '}', ')', 'self', '.', '__offset_cpu', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')'] | Setter method for offset_cpu, mapped from YANG variable /resource_monitor/cpu/offset_cpu (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_offset_cpu is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_offset_cpu() directly. | ['Setter', 'method', 'for', 'offset_cpu', 'mapped', 'from', 'YANG', 'variable', '/', 'resource_monitor', '/', 'cpu', '/', 'offset_cpu', '(', 'uint32', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_offset_cpu', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_offset_cpu', '()', 'directly', '.'] | train | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/resource_monitor/cpu/__init__.py#L295-L316 |
7,288 | shad7/tvrenamer | tvrenamer/cache/api.py | DatabaseAPI.save | def save(self, instance):
"""Save (create or update) the instance to the database
:param instance: an instance of modeled data object
"""
cond = tinydb.where('original') == instance.original
eid = self.update(instance, cond)
if eid is None:
return self.create(instance)
return eid | python | def save(self, instance):
"""Save (create or update) the instance to the database
:param instance: an instance of modeled data object
"""
cond = tinydb.where('original') == instance.original
eid = self.update(instance, cond)
if eid is None:
return self.create(instance)
return eid | ['def', 'save', '(', 'self', ',', 'instance', ')', ':', 'cond', '=', 'tinydb', '.', 'where', '(', "'original'", ')', '==', 'instance', '.', 'original', 'eid', '=', 'self', '.', 'update', '(', 'instance', ',', 'cond', ')', 'if', 'eid', 'is', 'None', ':', 'return', 'self', '.', 'create', '(', 'instance', ')', 'return', 'eid'] | Save (create or update) the instance to the database
:param instance: an instance of modeled data object | ['Save', '(', 'create', 'or', 'update', ')', 'the', 'instance', 'to', 'the', 'database'] | train | https://github.com/shad7/tvrenamer/blob/7fb59cb02669357e73b7acb92dcb6d74fdff4654/tvrenamer/cache/api.py#L49-L58 |
7,289 | redcap-tools/PyCap | redcap/project.py | Project.export_fem | def export_fem(self, arms=None, format='json', df_kwargs=None):
"""
Export the project's form to event mapping
Parameters
----------
arms : list
Limit exported form event mappings to these arm numbers
format : (``'json'``), ``'csv'``, ``'xml'``
Return the form event mappings in native objects,
csv or xml, ``'df''`` will return a ``pandas.DataFrame``
df_kwargs : dict
Passed to pandas.read_csv to control construction of
returned DataFrame
Returns
-------
fem : list, str, ``pandas.DataFrame``
form-event mapping for the project
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('formEventMapping', format=ret_format)
to_add = [arms]
str_add = ['arms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'exp_fem')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
return read_csv(StringIO(response))
else:
return read_csv(StringIO(response), **df_kwargs) | python | def export_fem(self, arms=None, format='json', df_kwargs=None):
"""
Export the project's form to event mapping
Parameters
----------
arms : list
Limit exported form event mappings to these arm numbers
format : (``'json'``), ``'csv'``, ``'xml'``
Return the form event mappings in native objects,
csv or xml, ``'df''`` will return a ``pandas.DataFrame``
df_kwargs : dict
Passed to pandas.read_csv to control construction of
returned DataFrame
Returns
-------
fem : list, str, ``pandas.DataFrame``
form-event mapping for the project
"""
ret_format = format
if format == 'df':
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('formEventMapping', format=ret_format)
to_add = [arms]
str_add = ['arms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'exp_fem')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
return read_csv(StringIO(response))
else:
return read_csv(StringIO(response), **df_kwargs) | ['def', 'export_fem', '(', 'self', ',', 'arms', '=', 'None', ',', 'format', '=', "'json'", ',', 'df_kwargs', '=', 'None', ')', ':', 'ret_format', '=', 'format', 'if', 'format', '==', "'df'", ':', 'from', 'pandas', 'import', 'read_csv', 'ret_format', '=', "'csv'", 'pl', '=', 'self', '.', '__basepl', '(', "'formEventMapping'", ',', 'format', '=', 'ret_format', ')', 'to_add', '=', '[', 'arms', ']', 'str_add', '=', '[', "'arms'", ']', 'for', 'key', ',', 'data', 'in', 'zip', '(', 'str_add', ',', 'to_add', ')', ':', 'if', 'data', ':', 'pl', '[', 'key', ']', '=', "','", '.', 'join', '(', 'data', ')', 'response', ',', '_', '=', 'self', '.', '_call_api', '(', 'pl', ',', "'exp_fem'", ')', 'if', 'format', 'in', '(', "'json'", ',', "'csv'", ',', "'xml'", ')', ':', 'return', 'response', 'elif', 'format', '==', "'df'", ':', 'if', 'not', 'df_kwargs', ':', 'return', 'read_csv', '(', 'StringIO', '(', 'response', ')', ')', 'else', ':', 'return', 'read_csv', '(', 'StringIO', '(', 'response', ')', ',', '*', '*', 'df_kwargs', ')'] | Export the project's form to event mapping
Parameters
----------
arms : list
Limit exported form event mappings to these arm numbers
format : (``'json'``), ``'csv'``, ``'xml'``
Return the form event mappings in native objects,
csv or xml, ``'df''`` will return a ``pandas.DataFrame``
df_kwargs : dict
Passed to pandas.read_csv to control construction of
returned DataFrame
Returns
-------
fem : list, str, ``pandas.DataFrame``
form-event mapping for the project | ['Export', 'the', 'project', 's', 'form', 'to', 'event', 'mapping'] | train | https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L157-L194 |
7,290 | fabioz/PyDev.Debugger | pydevd_attach_to_process/winappdbg/textio.py | Color.red | def red(cls):
"Make the text foreground color red."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_MASK
wAttributes |= win32.FOREGROUND_RED
cls._set_text_attributes(wAttributes) | python | def red(cls):
"Make the text foreground color red."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_MASK
wAttributes |= win32.FOREGROUND_RED
cls._set_text_attributes(wAttributes) | ['def', 'red', '(', 'cls', ')', ':', 'wAttributes', '=', 'cls', '.', '_get_text_attributes', '(', ')', 'wAttributes', '&=', '~', 'win32', '.', 'FOREGROUND_MASK', 'wAttributes', '|=', 'win32', '.', 'FOREGROUND_RED', 'cls', '.', '_set_text_attributes', '(', 'wAttributes', ')'] | Make the text foreground color red. | ['Make', 'the', 'text', 'foreground', 'color', 'red', '.'] | train | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/textio.py#L959-L964 |
7,291 | amoffat/sh | sh.py | RunningCommand.handle_command_exit_code | def handle_command_exit_code(self, code):
""" here we determine if we had an exception, or an error code that we
weren't expecting to see. if we did, we create and raise an exception
"""
ca = self.call_args
exc_class = get_exc_exit_code_would_raise(code, ca["ok_code"],
ca["piped"])
if exc_class:
exc = exc_class(self.ran, self.process.stdout, self.process.stderr,
ca["truncate_exc"])
raise exc | python | def handle_command_exit_code(self, code):
""" here we determine if we had an exception, or an error code that we
weren't expecting to see. if we did, we create and raise an exception
"""
ca = self.call_args
exc_class = get_exc_exit_code_would_raise(code, ca["ok_code"],
ca["piped"])
if exc_class:
exc = exc_class(self.ran, self.process.stdout, self.process.stderr,
ca["truncate_exc"])
raise exc | ['def', 'handle_command_exit_code', '(', 'self', ',', 'code', ')', ':', 'ca', '=', 'self', '.', 'call_args', 'exc_class', '=', 'get_exc_exit_code_would_raise', '(', 'code', ',', 'ca', '[', '"ok_code"', ']', ',', 'ca', '[', '"piped"', ']', ')', 'if', 'exc_class', ':', 'exc', '=', 'exc_class', '(', 'self', '.', 'ran', ',', 'self', '.', 'process', '.', 'stdout', ',', 'self', '.', 'process', '.', 'stderr', ',', 'ca', '[', '"truncate_exc"', ']', ')', 'raise', 'exc'] | here we determine if we had an exception, or an error code that we
weren't expecting to see. if we did, we create and raise an exception | ['here', 'we', 'determine', 'if', 'we', 'had', 'an', 'exception', 'or', 'an', 'error', 'code', 'that', 'we', 'weren', 't', 'expecting', 'to', 'see', '.', 'if', 'we', 'did', 'we', 'create', 'and', 'raise', 'an', 'exception'] | train | https://github.com/amoffat/sh/blob/858adf0c682af4c40e41f34d6926696b7a5d3b12/sh.py#L805-L815 |
7,292 | OpenTreeOfLife/peyotl | peyotl/collections_store/collections_shard.py | TreeCollectionsShard.write_configuration | def write_configuration(self, out, secret_attrs=False):
"""Generic configuration, may be overridden by type-specific version"""
key_order = ['name', 'path', 'git_dir', 'doc_dir', 'assumed_doc_version',
'git_ssh', 'pkey', 'has_aliases', 'number of collections']
cd = self.get_configuration_dict(secret_attrs=secret_attrs)
for k in key_order:
if k in cd:
out.write(' {} = {}'.format(k, cd[k]))
out.write(' collections in alias groups:\n')
for o in cd['collections']:
out.write(' {} ==> {}\n'.format(o['keys'], o['relpath'])) | python | def write_configuration(self, out, secret_attrs=False):
"""Generic configuration, may be overridden by type-specific version"""
key_order = ['name', 'path', 'git_dir', 'doc_dir', 'assumed_doc_version',
'git_ssh', 'pkey', 'has_aliases', 'number of collections']
cd = self.get_configuration_dict(secret_attrs=secret_attrs)
for k in key_order:
if k in cd:
out.write(' {} = {}'.format(k, cd[k]))
out.write(' collections in alias groups:\n')
for o in cd['collections']:
out.write(' {} ==> {}\n'.format(o['keys'], o['relpath'])) | ['def', 'write_configuration', '(', 'self', ',', 'out', ',', 'secret_attrs', '=', 'False', ')', ':', 'key_order', '=', '[', "'name'", ',', "'path'", ',', "'git_dir'", ',', "'doc_dir'", ',', "'assumed_doc_version'", ',', "'git_ssh'", ',', "'pkey'", ',', "'has_aliases'", ',', "'number of collections'", ']', 'cd', '=', 'self', '.', 'get_configuration_dict', '(', 'secret_attrs', '=', 'secret_attrs', ')', 'for', 'k', 'in', 'key_order', ':', 'if', 'k', 'in', 'cd', ':', 'out', '.', 'write', '(', "' {} = {}'", '.', 'format', '(', 'k', ',', 'cd', '[', 'k', ']', ')', ')', 'out', '.', 'write', '(', "' collections in alias groups:\\n'", ')', 'for', 'o', 'in', 'cd', '[', "'collections'", ']', ':', 'out', '.', 'write', '(', "' {} ==> {}\\n'", '.', 'format', '(', 'o', '[', "'keys'", ']', ',', 'o', '[', "'relpath'", ']', ')', ')'] | Generic configuration, may be overridden by type-specific version | ['Generic', 'configuration', 'may', 'be', 'overridden', 'by', 'type', '-', 'specific', 'version'] | train | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/collections_store/collections_shard.py#L105-L115 |
7,293 | wrongwaycn/ssdb-py | ssdb/client.py | StrictSSDB.zrlist | def zrlist(self, name_start, name_end, limit=10):
"""
Return a list of the top ``limit`` zset's name between ``name_start`` and
``name_end`` in descending order
.. note:: The range is (``name_start``, ``name_end``]. The ``name_start``
isn't in the range, but ``name_end`` is.
:param string name_start: The lower bound(not included) of zset names to
be returned, empty string ``''`` means +inf
:param string name_end: The upper bound(included) of zset names to be
returned, empty string ``''`` means -inf
:param int limit: number of elements will be returned.
:return: a list of zset's name
:rtype: list
>>> ssdb.zlist('zset_ ', 'zset_z', 10)
['zset_2', 'zset_1']
>>> ssdb.zlist('zset_ ', '', 3)
['zset_2', 'zset_1']
>>> ssdb.zlist('', 'aaa_not_exist', 10)
[]
"""
limit = get_positive_integer('limit', limit)
return self.execute_command('zrlist', name_start, name_end, limit) | python | def zrlist(self, name_start, name_end, limit=10):
"""
Return a list of the top ``limit`` zset's name between ``name_start`` and
``name_end`` in descending order
.. note:: The range is (``name_start``, ``name_end``]. The ``name_start``
isn't in the range, but ``name_end`` is.
:param string name_start: The lower bound(not included) of zset names to
be returned, empty string ``''`` means +inf
:param string name_end: The upper bound(included) of zset names to be
returned, empty string ``''`` means -inf
:param int limit: number of elements will be returned.
:return: a list of zset's name
:rtype: list
>>> ssdb.zlist('zset_ ', 'zset_z', 10)
['zset_2', 'zset_1']
>>> ssdb.zlist('zset_ ', '', 3)
['zset_2', 'zset_1']
>>> ssdb.zlist('', 'aaa_not_exist', 10)
[]
"""
limit = get_positive_integer('limit', limit)
return self.execute_command('zrlist', name_start, name_end, limit) | ['def', 'zrlist', '(', 'self', ',', 'name_start', ',', 'name_end', ',', 'limit', '=', '10', ')', ':', 'limit', '=', 'get_positive_integer', '(', "'limit'", ',', 'limit', ')', 'return', 'self', '.', 'execute_command', '(', "'zrlist'", ',', 'name_start', ',', 'name_end', ',', 'limit', ')'] | Return a list of the top ``limit`` zset's name between ``name_start`` and
``name_end`` in descending order
.. note:: The range is (``name_start``, ``name_end``]. The ``name_start``
isn't in the range, but ``name_end`` is.
:param string name_start: The lower bound(not included) of zset names to
be returned, empty string ``''`` means +inf
:param string name_end: The upper bound(included) of zset names to be
returned, empty string ``''`` means -inf
:param int limit: number of elements will be returned.
:return: a list of zset's name
:rtype: list
>>> ssdb.zlist('zset_ ', 'zset_z', 10)
['zset_2', 'zset_1']
>>> ssdb.zlist('zset_ ', '', 3)
['zset_2', 'zset_1']
>>> ssdb.zlist('', 'aaa_not_exist', 10)
[] | ['Return', 'a', 'list', 'of', 'the', 'top', 'limit', 'zset', 's', 'name', 'between', 'name_start', 'and', 'name_end', 'in', 'descending', 'order'] | train | https://github.com/wrongwaycn/ssdb-py/blob/ce7b1542f0faa06fe71a60c667fe15992af0f621/ssdb/client.py#L1427-L1451 |
7,294 | exosite-labs/pyonep | pyonep/onep.py | OnepV1.flush | def flush(self, auth, resource, options=None, defer=False):
""" Empties the specified resource of data per specified constraints.
Args:
auth: <cik>
resource: resource to empty.
options: Time limits.
"""
args = [resource]
if options is not None:
args.append(options)
return self._call('flush', auth, args, defer) | python | def flush(self, auth, resource, options=None, defer=False):
""" Empties the specified resource of data per specified constraints.
Args:
auth: <cik>
resource: resource to empty.
options: Time limits.
"""
args = [resource]
if options is not None:
args.append(options)
return self._call('flush', auth, args, defer) | ['def', 'flush', '(', 'self', ',', 'auth', ',', 'resource', ',', 'options', '=', 'None', ',', 'defer', '=', 'False', ')', ':', 'args', '=', '[', 'resource', ']', 'if', 'options', 'is', 'not', 'None', ':', 'args', '.', 'append', '(', 'options', ')', 'return', 'self', '.', '_call', '(', "'flush'", ',', 'auth', ',', 'args', ',', 'defer', ')'] | Empties the specified resource of data per specified constraints.
Args:
auth: <cik>
resource: resource to empty.
options: Time limits. | ['Empties', 'the', 'specified', 'resource', 'of', 'data', 'per', 'specified', 'constraints', '.'] | train | https://github.com/exosite-labs/pyonep/blob/d27b621b00688a542e0adcc01f3e3354c05238a1/pyonep/onep.py#L319-L330 |
7,295 | ctuning/ck | ck/kernel.py | load_repo_info_from_cache | def load_repo_info_from_cache(i):
"""
Input: {
repo_uoa - repo_uoa
}
Output: {
return - return code = 0, if successful
16, if repo not found (may be warning)
> 0, if error
(error) - error text if return > 0
repo_uoa - repo UOA
repo_uid - repo UID
repo_alias - repo alias
all other info from repo dict
}
"""
ruoa=i['repo_uoa']
ruid=ruoa
if ruoa==cfg['repo_name_default'] or ruoa==cfg['repo_uid_default']:
d={}
d["path_to_repo_desc"]=work['dir_default_repo_path']
d["data_uid"]=cfg['repo_uid_default']
d["data_alias"]=cfg['repo_name_default']
d["data_uoa"]=cfg['repo_name_default']
d["dict"]={"default":"yes"}
elif ruoa==cfg['repo_name_local'] or ruoa==cfg['repo_uid_local']:
d={}
d["path_to_repo_desc"]=work['dir_local_repo_path']
d["data_uid"]=cfg['repo_uid_local']
d["data_alias"]=cfg['repo_name_local']
d["data_uoa"]=cfg['repo_name_local']
d["dict"]={"default":"yes"}
else:
r=reload_repo_cache({}) # Ignore errors
if r['return']>0: return r
if not is_uid(ruoa):
ruid=cache_repo_uoa.get(ruoa,'')
if ruid=='':
return {'return':1, 'error':'repository "'+ruoa+'" is not found in the cache. Check if repository exists or try "ck recache repo"'}
d=cache_repo_info.get(ruid,{})
if len(d)==0:
return {'return':1, 'error':'repository is not found in the cache'}
r={'return':0}
r.update(d)
return r | python | def load_repo_info_from_cache(i):
"""
Input: {
repo_uoa - repo_uoa
}
Output: {
return - return code = 0, if successful
16, if repo not found (may be warning)
> 0, if error
(error) - error text if return > 0
repo_uoa - repo UOA
repo_uid - repo UID
repo_alias - repo alias
all other info from repo dict
}
"""
ruoa=i['repo_uoa']
ruid=ruoa
if ruoa==cfg['repo_name_default'] or ruoa==cfg['repo_uid_default']:
d={}
d["path_to_repo_desc"]=work['dir_default_repo_path']
d["data_uid"]=cfg['repo_uid_default']
d["data_alias"]=cfg['repo_name_default']
d["data_uoa"]=cfg['repo_name_default']
d["dict"]={"default":"yes"}
elif ruoa==cfg['repo_name_local'] or ruoa==cfg['repo_uid_local']:
d={}
d["path_to_repo_desc"]=work['dir_local_repo_path']
d["data_uid"]=cfg['repo_uid_local']
d["data_alias"]=cfg['repo_name_local']
d["data_uoa"]=cfg['repo_name_local']
d["dict"]={"default":"yes"}
else:
r=reload_repo_cache({}) # Ignore errors
if r['return']>0: return r
if not is_uid(ruoa):
ruid=cache_repo_uoa.get(ruoa,'')
if ruid=='':
return {'return':1, 'error':'repository "'+ruoa+'" is not found in the cache. Check if repository exists or try "ck recache repo"'}
d=cache_repo_info.get(ruid,{})
if len(d)==0:
return {'return':1, 'error':'repository is not found in the cache'}
r={'return':0}
r.update(d)
return r | ['def', 'load_repo_info_from_cache', '(', 'i', ')', ':', 'ruoa', '=', 'i', '[', "'repo_uoa'", ']', 'ruid', '=', 'ruoa', 'if', 'ruoa', '==', 'cfg', '[', "'repo_name_default'", ']', 'or', 'ruoa', '==', 'cfg', '[', "'repo_uid_default'", ']', ':', 'd', '=', '{', '}', 'd', '[', '"path_to_repo_desc"', ']', '=', 'work', '[', "'dir_default_repo_path'", ']', 'd', '[', '"data_uid"', ']', '=', 'cfg', '[', "'repo_uid_default'", ']', 'd', '[', '"data_alias"', ']', '=', 'cfg', '[', "'repo_name_default'", ']', 'd', '[', '"data_uoa"', ']', '=', 'cfg', '[', "'repo_name_default'", ']', 'd', '[', '"dict"', ']', '=', '{', '"default"', ':', '"yes"', '}', 'elif', 'ruoa', '==', 'cfg', '[', "'repo_name_local'", ']', 'or', 'ruoa', '==', 'cfg', '[', "'repo_uid_local'", ']', ':', 'd', '=', '{', '}', 'd', '[', '"path_to_repo_desc"', ']', '=', 'work', '[', "'dir_local_repo_path'", ']', 'd', '[', '"data_uid"', ']', '=', 'cfg', '[', "'repo_uid_local'", ']', 'd', '[', '"data_alias"', ']', '=', 'cfg', '[', "'repo_name_local'", ']', 'd', '[', '"data_uoa"', ']', '=', 'cfg', '[', "'repo_name_local'", ']', 'd', '[', '"dict"', ']', '=', '{', '"default"', ':', '"yes"', '}', 'else', ':', 'r', '=', 'reload_repo_cache', '(', '{', '}', ')', '# Ignore errors', 'if', 'r', '[', "'return'", ']', '>', '0', ':', 'return', 'r', 'if', 'not', 'is_uid', '(', 'ruoa', ')', ':', 'ruid', '=', 'cache_repo_uoa', '.', 'get', '(', 'ruoa', ',', "''", ')', 'if', 'ruid', '==', "''", ':', 'return', '{', "'return'", ':', '1', ',', "'error'", ':', '\'repository "\'', '+', 'ruoa', '+', '\'" is not found in the cache. Check if repository exists or try "ck recache repo"\'', '}', 'd', '=', 'cache_repo_info', '.', 'get', '(', 'ruid', ',', '{', '}', ')', 'if', 'len', '(', 'd', ')', '==', '0', ':', 'return', '{', "'return'", ':', '1', ',', "'error'", ':', "'repository is not found in the cache'", '}', 'r', '=', '{', "'return'", ':', '0', '}', 'r', '.', 'update', '(', 'd', ')', 'return', 'r'] | Input: {
repo_uoa - repo_uoa
}
Output: {
return - return code = 0, if successful
16, if repo not found (may be warning)
> 0, if error
(error) - error text if return > 0
repo_uoa - repo UOA
repo_uid - repo UID
repo_alias - repo alias
all other info from repo dict
} | ['Input', ':', '{', 'repo_uoa', '-', 'repo_uoa', '}'] | train | https://github.com/ctuning/ck/blob/7e009814e975f8742790d3106340088a46223714/ck/kernel.py#L2558-L2611 |
7,296 | HewlettPackard/python-hpOneView | hpOneView/resources/networking/logical_interconnects.py | LogicalInterconnects.get_snmp_configuration | def get_snmp_configuration(self):
"""
Gets the SNMP configuration for a logical interconnect.
Returns:
dict: SNMP configuration.
"""
uri = "{}{}".format(self.data["uri"], self.SNMP_CONFIGURATION_PATH)
return self._helper.do_get(uri) | python | def get_snmp_configuration(self):
"""
Gets the SNMP configuration for a logical interconnect.
Returns:
dict: SNMP configuration.
"""
uri = "{}{}".format(self.data["uri"], self.SNMP_CONFIGURATION_PATH)
return self._helper.do_get(uri) | ['def', 'get_snmp_configuration', '(', 'self', ')', ':', 'uri', '=', '"{}{}"', '.', 'format', '(', 'self', '.', 'data', '[', '"uri"', ']', ',', 'self', '.', 'SNMP_CONFIGURATION_PATH', ')', 'return', 'self', '.', '_helper', '.', 'do_get', '(', 'uri', ')'] | Gets the SNMP configuration for a logical interconnect.
Returns:
dict: SNMP configuration. | ['Gets', 'the', 'SNMP', 'configuration', 'for', 'a', 'logical', 'interconnect', '.'] | train | https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/networking/logical_interconnects.py#L247-L255 |
7,297 | bcbio/bcbio-nextgen | bcbio/variation/genotype.py | variant_filtration | def variant_filtration(call_file, ref_file, vrn_files, data, items):
"""Filter variant calls using Variant Quality Score Recalibration.
Newer GATK with Haplotype calling has combined SNP/indel filtering.
"""
caller = data["config"]["algorithm"].get("variantcaller")
if "gvcf" not in dd.get_tools_on(data):
call_file = ploidy.filter_vcf_by_sex(call_file, items)
if caller in ["freebayes"]:
return vfilter.freebayes(call_file, ref_file, vrn_files, data)
elif caller in ["platypus"]:
return vfilter.platypus(call_file, data)
elif caller in ["samtools"]:
return vfilter.samtools(call_file, data)
elif caller in ["gatk", "gatk-haplotype", "haplotyper"]:
if dd.get_analysis(data).lower().find("rna-seq") >= 0:
from bcbio.rnaseq import variation as rnaseq_variation
return rnaseq_variation.gatk_filter_rnaseq(call_file, data)
else:
return gatkfilter.run(call_file, ref_file, vrn_files, data)
# no additional filtration for callers that filter as part of call process
else:
return call_file | python | def variant_filtration(call_file, ref_file, vrn_files, data, items):
"""Filter variant calls using Variant Quality Score Recalibration.
Newer GATK with Haplotype calling has combined SNP/indel filtering.
"""
caller = data["config"]["algorithm"].get("variantcaller")
if "gvcf" not in dd.get_tools_on(data):
call_file = ploidy.filter_vcf_by_sex(call_file, items)
if caller in ["freebayes"]:
return vfilter.freebayes(call_file, ref_file, vrn_files, data)
elif caller in ["platypus"]:
return vfilter.platypus(call_file, data)
elif caller in ["samtools"]:
return vfilter.samtools(call_file, data)
elif caller in ["gatk", "gatk-haplotype", "haplotyper"]:
if dd.get_analysis(data).lower().find("rna-seq") >= 0:
from bcbio.rnaseq import variation as rnaseq_variation
return rnaseq_variation.gatk_filter_rnaseq(call_file, data)
else:
return gatkfilter.run(call_file, ref_file, vrn_files, data)
# no additional filtration for callers that filter as part of call process
else:
return call_file | ['def', 'variant_filtration', '(', 'call_file', ',', 'ref_file', ',', 'vrn_files', ',', 'data', ',', 'items', ')', ':', 'caller', '=', 'data', '[', '"config"', ']', '[', '"algorithm"', ']', '.', 'get', '(', '"variantcaller"', ')', 'if', '"gvcf"', 'not', 'in', 'dd', '.', 'get_tools_on', '(', 'data', ')', ':', 'call_file', '=', 'ploidy', '.', 'filter_vcf_by_sex', '(', 'call_file', ',', 'items', ')', 'if', 'caller', 'in', '[', '"freebayes"', ']', ':', 'return', 'vfilter', '.', 'freebayes', '(', 'call_file', ',', 'ref_file', ',', 'vrn_files', ',', 'data', ')', 'elif', 'caller', 'in', '[', '"platypus"', ']', ':', 'return', 'vfilter', '.', 'platypus', '(', 'call_file', ',', 'data', ')', 'elif', 'caller', 'in', '[', '"samtools"', ']', ':', 'return', 'vfilter', '.', 'samtools', '(', 'call_file', ',', 'data', ')', 'elif', 'caller', 'in', '[', '"gatk"', ',', '"gatk-haplotype"', ',', '"haplotyper"', ']', ':', 'if', 'dd', '.', 'get_analysis', '(', 'data', ')', '.', 'lower', '(', ')', '.', 'find', '(', '"rna-seq"', ')', '>=', '0', ':', 'from', 'bcbio', '.', 'rnaseq', 'import', 'variation', 'as', 'rnaseq_variation', 'return', 'rnaseq_variation', '.', 'gatk_filter_rnaseq', '(', 'call_file', ',', 'data', ')', 'else', ':', 'return', 'gatkfilter', '.', 'run', '(', 'call_file', ',', 'ref_file', ',', 'vrn_files', ',', 'data', ')', '# no additional filtration for callers that filter as part of call process', 'else', ':', 'return', 'call_file'] | Filter variant calls using Variant Quality Score Recalibration.
Newer GATK with Haplotype calling has combined SNP/indel filtering. | ['Filter', 'variant', 'calls', 'using', 'Variant', 'Quality', 'Score', 'Recalibration', '.'] | train | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L23-L45 |
7,298 | kiwi0fruit/sugartex | sugartex/sugartex_filter.py | SugarTeX._su_scripts_regex | def _su_scripts_regex(self):
"""
:return:
[compiled regex, function]
"""
sups = re.escape(''.join([k for k in self.superscripts.keys()]))
subs = re.escape(''.join([k for k in self.subscripts.keys()])) # language=PythonRegExp
su_regex = (r'\\([{su_}])|([{sub}]+|‹[{sub}]+›|˹[{sub}]+˺)' +
r'|([{sup}]+)(?=√)|([{sup}]+(?!√)|‹[{sup}]+›|˹[{sup}]+˺)').format(
su_=subs + sups, sub=subs, sup=sups)
su_regex = re.compile(su_regex)
def su_replace(m):
esc, sub, root_sup, sup = m.groups()
if esc is not None:
return esc
elif sub is not None:
return '_{' + ''.join([c if (c in ['‹', '›', '˹', '˺']) else self.subscripts[c] for c in sub]) + '}'
elif root_sup is not None:
return ''.join([self.superscripts[c] for c in root_sup])
elif sup is not None:
return '^{' + ''.join([c if (c in ['‹', '›', '˹', '˺']) else self.superscripts[c] for c in sup]) + '}'
else:
raise TypeError("Regex bug: this should never be reached")
return [su_regex, su_replace] | python | def _su_scripts_regex(self):
"""
:return:
[compiled regex, function]
"""
sups = re.escape(''.join([k for k in self.superscripts.keys()]))
subs = re.escape(''.join([k for k in self.subscripts.keys()])) # language=PythonRegExp
su_regex = (r'\\([{su_}])|([{sub}]+|‹[{sub}]+›|˹[{sub}]+˺)' +
r'|([{sup}]+)(?=√)|([{sup}]+(?!√)|‹[{sup}]+›|˹[{sup}]+˺)').format(
su_=subs + sups, sub=subs, sup=sups)
su_regex = re.compile(su_regex)
def su_replace(m):
esc, sub, root_sup, sup = m.groups()
if esc is not None:
return esc
elif sub is not None:
return '_{' + ''.join([c if (c in ['‹', '›', '˹', '˺']) else self.subscripts[c] for c in sub]) + '}'
elif root_sup is not None:
return ''.join([self.superscripts[c] for c in root_sup])
elif sup is not None:
return '^{' + ''.join([c if (c in ['‹', '›', '˹', '˺']) else self.superscripts[c] for c in sup]) + '}'
else:
raise TypeError("Regex bug: this should never be reached")
return [su_regex, su_replace] | ['def', '_su_scripts_regex', '(', 'self', ')', ':', 'sups', '=', 're', '.', 'escape', '(', "''", '.', 'join', '(', '[', 'k', 'for', 'k', 'in', 'self', '.', 'superscripts', '.', 'keys', '(', ')', ']', ')', ')', 'subs', '=', 're', '.', 'escape', '(', "''", '.', 'join', '(', '[', 'k', 'for', 'k', 'in', 'self', '.', 'subscripts', '.', 'keys', '(', ')', ']', ')', ')', '# language=PythonRegExp', 'su_regex', '=', '(', "r'\\\\([{su_}])|([{sub}]+|‹[{sub}]+›|˹[{sub}]+˺)' +", '', "r'|([{sup}]+)(?=√)|([{sup}]+(?!√)|‹[{sup}]+›|˹[{sup}]+˺)').format(", '', '', '', '', 'su_', '=', 'subs', '+', 'sups', ',', 'sub', '=', 'subs', ',', 'sup', '=', 'sups', ')', 'su_regex', '=', 're', '.', 'compile', '(', 'su_regex', ')', 'def', 'su_replace', '(', 'm', ')', ':', 'esc', ',', 'sub', ',', 'root_sup', ',', 'sup', '=', 'm', '.', 'groups', '(', ')', 'if', 'esc', 'is', 'not', 'None', ':', 'return', 'esc', 'elif', 'sub', 'is', 'not', 'None', ':', 'return', "'_{'", '+', "''", '.', 'join', '(', '[', 'c', 'if', '(', 'c', 'in', '[', "'‹', ", "'", "', '˹", "'", " '˺'", ']', ' els', 'e', ' ', 'elf.', 'ubsc', 'r', 'ipts[c] fo', 'r', ' ', 'c', 'in ', 'u', '])', "+ '", '}', "'", '', '', 'elif', 'root_sup', 'is', 'not', 'None', ':', 'return', "''", '.', 'join', '(', '[', 'self', '.', 'superscripts', '[', 'c', ']', 'for', 'c', 'in', 'root_sup', ']', ')', 'elif', 'sup', 'is', 'not', 'None', ':', 'return', "'^{'", '+', "''", '.', 'join', '(', '[', 'c', 'if', '(', 'c', 'in', '[', "'‹', ", "'", "', '˹", "'", " '˺'", ']', ' els', 'e', ' ', 'elf.', 'uper', 's', 'cripts[c] fo', 'r', ' ', 'c', 'in ', 'u', '])', "+ '", '}', "'", '', '', 'else', ':', 'raise', 'TypeError', '(', '"Regex bug: this should never be reached"', ')', 'return', '[', 'su_regex', ',', 'su_replace', ']'] | :return:
[compiled regex, function] | [':', 'return', ':', '[', 'compiled', 'regex', 'function', ']'] | train | https://github.com/kiwi0fruit/sugartex/blob/9eb13703cb02d3e2163c9c5f29df280f6bf49cec/sugartex/sugartex_filter.py#L671-L696 |
7,299 | jaraco/irc | irc/features.py | FeatureSet.load | def load(self, arguments):
"Load the values from the a ServerConnection arguments"
features = arguments[1:-1]
list(map(self.load_feature, features)) | python | def load(self, arguments):
"Load the values from the a ServerConnection arguments"
features = arguments[1:-1]
list(map(self.load_feature, features)) | ['def', 'load', '(', 'self', ',', 'arguments', ')', ':', 'features', '=', 'arguments', '[', '1', ':', '-', '1', ']', 'list', '(', 'map', '(', 'self', '.', 'load_feature', ',', 'features', ')', ')'] | Load the values from the a ServerConnection arguments | ['Load', 'the', 'values', 'from', 'the', 'a', 'ServerConnection', 'arguments'] | train | https://github.com/jaraco/irc/blob/571c1f448d5d5bb92bbe2605c33148bf6e698413/irc/features.py#L44-L47 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.