Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
7,500 | cloudera/cm_api | python/src/cm_api/endpoints/services.py | ApiService.disable_jt_ha | def disable_jt_ha(self, active_name):
"""
Disable high availability for a MR JobTracker active-standby pair.
@param active_name: name of the JobTracker that will be active after
the disable operation. The other JobTracker and
Failover Controllers will be removed.
@return: Reference to the submitted command.
"""
args = dict(
activeName = active_name,
)
return self._cmd('disableJtHa', data=args) | python | def disable_jt_ha(self, active_name):
"""
Disable high availability for a MR JobTracker active-standby pair.
@param active_name: name of the JobTracker that will be active after
the disable operation. The other JobTracker and
Failover Controllers will be removed.
@return: Reference to the submitted command.
"""
args = dict(
activeName = active_name,
)
return self._cmd('disableJtHa', data=args) | ['def', 'disable_jt_ha', '(', 'self', ',', 'active_name', ')', ':', 'args', '=', 'dict', '(', 'activeName', '=', 'active_name', ',', ')', 'return', 'self', '.', '_cmd', '(', "'disableJtHa'", ',', 'data', '=', 'args', ')'] | Disable high availability for a MR JobTracker active-standby pair.
@param active_name: name of the JobTracker that will be active after
the disable operation. The other JobTracker and
Failover Controllers will be removed.
@return: Reference to the submitted command. | ['Disable', 'high', 'availability', 'for', 'a', 'MR', 'JobTracker', 'active', '-', 'standby', 'pair', '.'] | train | https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/services.py#L1102-L1114 |
7,501 | pazz/urwidtrees | urwidtrees/widgets.py | TreeBox.focus_next_sibling | def focus_next_sibling(self):
"""move focus to next sibling of currently focussed one"""
w, focuspos = self.get_focus()
sib = self._tree.next_sibling_position(focuspos)
if sib is not None:
self.set_focus(sib) | python | def focus_next_sibling(self):
"""move focus to next sibling of currently focussed one"""
w, focuspos = self.get_focus()
sib = self._tree.next_sibling_position(focuspos)
if sib is not None:
self.set_focus(sib) | ['def', 'focus_next_sibling', '(', 'self', ')', ':', 'w', ',', 'focuspos', '=', 'self', '.', 'get_focus', '(', ')', 'sib', '=', 'self', '.', '_tree', '.', 'next_sibling_position', '(', 'focuspos', ')', 'if', 'sib', 'is', 'not', 'None', ':', 'self', '.', 'set_focus', '(', 'sib', ')'] | move focus to next sibling of currently focussed one | ['move', 'focus', 'to', 'next', 'sibling', 'of', 'currently', 'focussed', 'one'] | train | https://github.com/pazz/urwidtrees/blob/d1fa38ce4f37db00bdfc574b856023b5db4c7ead/urwidtrees/widgets.py#L219-L224 |
7,502 | aacanakin/glim | glim/command.py | CommandAdapter.retrieve_commands | def retrieve_commands(self, module):
"""
Function smartly imports Command type classes given module
Args
----
module (module):
The module which Command classes will be extracted from
Returns
-------
commands (list):
A list of Command instances
Note:
This function will not register any command class
named "Command" or "GlimCommand".
When extending Command class, be sure to have "Command"
string on your custom commands.
"""
commands = []
for name, obj in inspect.getmembers(module):
if name != 'Command' and 'Command' in name:
if name != 'GlimCommand':
cobject = getattr(module, name)
commands.append(cobject)
return commands | python | def retrieve_commands(self, module):
"""
Function smartly imports Command type classes given module
Args
----
module (module):
The module which Command classes will be extracted from
Returns
-------
commands (list):
A list of Command instances
Note:
This function will not register any command class
named "Command" or "GlimCommand".
When extending Command class, be sure to have "Command"
string on your custom commands.
"""
commands = []
for name, obj in inspect.getmembers(module):
if name != 'Command' and 'Command' in name:
if name != 'GlimCommand':
cobject = getattr(module, name)
commands.append(cobject)
return commands | ['def', 'retrieve_commands', '(', 'self', ',', 'module', ')', ':', 'commands', '=', '[', ']', 'for', 'name', ',', 'obj', 'in', 'inspect', '.', 'getmembers', '(', 'module', ')', ':', 'if', 'name', '!=', "'Command'", 'and', "'Command'", 'in', 'name', ':', 'if', 'name', '!=', "'GlimCommand'", ':', 'cobject', '=', 'getattr', '(', 'module', ',', 'name', ')', 'commands', '.', 'append', '(', 'cobject', ')', 'return', 'commands'] | Function smartly imports Command type classes given module
Args
----
module (module):
The module which Command classes will be extracted from
Returns
-------
commands (list):
A list of Command instances
Note:
This function will not register any command class
named "Command" or "GlimCommand".
When extending Command class, be sure to have "Command"
string on your custom commands. | ['Function', 'smartly', 'imports', 'Command', 'type', 'classes', 'given', 'module'] | train | https://github.com/aacanakin/glim/blob/71a20ac149a1292c0d6c1dc7414985ea51854f7a/glim/command.py#L33-L62 |
7,503 | iotile/coretools | iotilegateway/iotilegateway/device.py | AggregatingDeviceAdapter.disconnect | async def disconnect(self, conn_id):
"""Disconnect from a connected device.
See :meth:`AbstractDeviceAdapter.disconnect`.
"""
adapter_id = self._get_property(conn_id, 'adapter')
await self.adapters[adapter_id].disconnect(conn_id)
self._teardown_connection(conn_id) | python | async def disconnect(self, conn_id):
"""Disconnect from a connected device.
See :meth:`AbstractDeviceAdapter.disconnect`.
"""
adapter_id = self._get_property(conn_id, 'adapter')
await self.adapters[adapter_id].disconnect(conn_id)
self._teardown_connection(conn_id) | ['async', 'def', 'disconnect', '(', 'self', ',', 'conn_id', ')', ':', 'adapter_id', '=', 'self', '.', '_get_property', '(', 'conn_id', ',', "'adapter'", ')', 'await', 'self', '.', 'adapters', '[', 'adapter_id', ']', '.', 'disconnect', '(', 'conn_id', ')', 'self', '.', '_teardown_connection', '(', 'conn_id', ')'] | Disconnect from a connected device.
See :meth:`AbstractDeviceAdapter.disconnect`. | ['Disconnect', 'from', 'a', 'connected', 'device', '.'] | train | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilegateway/iotilegateway/device.py#L245-L254 |
7,504 | yamcs/yamcs-python | yamcs-client/yamcs/client.py | YamcsClient.create_event_subscription | def create_event_subscription(self, instance, on_data, timeout=60):
"""
Create a new subscription for receiving events of an instance.
This method returns a future, then returns immediately. Stop the
subscription by canceling the future.
:param str instance: A Yamcs instance name
:param on_data: Function that gets called on each :class:`.Event`.
:type on_data: Optional[Callable[.Event])
:param timeout: The amount of seconds to wait for the request to
complete.
:type timeout: Optional[float]
:return: Future that can be used to manage the background websocket
subscription.
:rtype: .WebSocketSubscriptionFuture
"""
manager = WebSocketSubscriptionManager(self, resource='events')
# Represent subscription as a future
subscription = WebSocketSubscriptionFuture(manager)
wrapped_callback = functools.partial(
_wrap_callback_parse_event, on_data)
manager.open(wrapped_callback, instance)
# Wait until a reply or exception is received
subscription.reply(timeout=timeout)
return subscription | python | def create_event_subscription(self, instance, on_data, timeout=60):
"""
Create a new subscription for receiving events of an instance.
This method returns a future, then returns immediately. Stop the
subscription by canceling the future.
:param str instance: A Yamcs instance name
:param on_data: Function that gets called on each :class:`.Event`.
:type on_data: Optional[Callable[.Event])
:param timeout: The amount of seconds to wait for the request to
complete.
:type timeout: Optional[float]
:return: Future that can be used to manage the background websocket
subscription.
:rtype: .WebSocketSubscriptionFuture
"""
manager = WebSocketSubscriptionManager(self, resource='events')
# Represent subscription as a future
subscription = WebSocketSubscriptionFuture(manager)
wrapped_callback = functools.partial(
_wrap_callback_parse_event, on_data)
manager.open(wrapped_callback, instance)
# Wait until a reply or exception is received
subscription.reply(timeout=timeout)
return subscription | ['def', 'create_event_subscription', '(', 'self', ',', 'instance', ',', 'on_data', ',', 'timeout', '=', '60', ')', ':', 'manager', '=', 'WebSocketSubscriptionManager', '(', 'self', ',', 'resource', '=', "'events'", ')', '# Represent subscription as a future', 'subscription', '=', 'WebSocketSubscriptionFuture', '(', 'manager', ')', 'wrapped_callback', '=', 'functools', '.', 'partial', '(', '_wrap_callback_parse_event', ',', 'on_data', ')', 'manager', '.', 'open', '(', 'wrapped_callback', ',', 'instance', ')', '# Wait until a reply or exception is received', 'subscription', '.', 'reply', '(', 'timeout', '=', 'timeout', ')', 'return', 'subscription'] | Create a new subscription for receiving events of an instance.
This method returns a future, then returns immediately. Stop the
subscription by canceling the future.
:param str instance: A Yamcs instance name
:param on_data: Function that gets called on each :class:`.Event`.
:type on_data: Optional[Callable[.Event])
:param timeout: The amount of seconds to wait for the request to
complete.
:type timeout: Optional[float]
:return: Future that can be used to manage the background websocket
subscription.
:rtype: .WebSocketSubscriptionFuture | ['Create', 'a', 'new', 'subscription', 'for', 'receiving', 'events', 'of', 'an', 'instance', '.'] | train | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/client.py#L556-L589 |
7,505 | zhmcclient/python-zhmcclient | zhmcclient_mock/_hmc.py | FakedMetricsContext.get_metric_group_infos | def get_metric_group_infos(self):
"""
Get the faked metric group definitions for this context object
that are to be returned from its create operation, in the format
needed for the "Create Metrics Context" operation response.
Returns:
"metric-group-infos" JSON object as described for the "Create Metrics
Context "operation response.
"""
mg_defs = self.get_metric_group_definitions()
mg_infos = []
for mg_def in mg_defs:
metric_infos = []
for metric_name, metric_type in mg_def.types:
metric_infos.append({
'metric-name': metric_name,
'metric-type': metric_type,
})
mg_info = {
'group-name': mg_def.name,
'metric-infos': metric_infos,
}
mg_infos.append(mg_info)
return mg_infos | python | def get_metric_group_infos(self):
"""
Get the faked metric group definitions for this context object
that are to be returned from its create operation, in the format
needed for the "Create Metrics Context" operation response.
Returns:
"metric-group-infos" JSON object as described for the "Create Metrics
Context "operation response.
"""
mg_defs = self.get_metric_group_definitions()
mg_infos = []
for mg_def in mg_defs:
metric_infos = []
for metric_name, metric_type in mg_def.types:
metric_infos.append({
'metric-name': metric_name,
'metric-type': metric_type,
})
mg_info = {
'group-name': mg_def.name,
'metric-infos': metric_infos,
}
mg_infos.append(mg_info)
return mg_infos | ['def', 'get_metric_group_infos', '(', 'self', ')', ':', 'mg_defs', '=', 'self', '.', 'get_metric_group_definitions', '(', ')', 'mg_infos', '=', '[', ']', 'for', 'mg_def', 'in', 'mg_defs', ':', 'metric_infos', '=', '[', ']', 'for', 'metric_name', ',', 'metric_type', 'in', 'mg_def', '.', 'types', ':', 'metric_infos', '.', 'append', '(', '{', "'metric-name'", ':', 'metric_name', ',', "'metric-type'", ':', 'metric_type', ',', '}', ')', 'mg_info', '=', '{', "'group-name'", ':', 'mg_def', '.', 'name', ',', "'metric-infos'", ':', 'metric_infos', ',', '}', 'mg_infos', '.', 'append', '(', 'mg_info', ')', 'return', 'mg_infos'] | Get the faked metric group definitions for this context object
that are to be returned from its create operation, in the format
needed for the "Create Metrics Context" operation response.
Returns:
"metric-group-infos" JSON object as described for the "Create Metrics
Context "operation response. | ['Get', 'the', 'faked', 'metric', 'group', 'definitions', 'for', 'this', 'context', 'object', 'that', 'are', 'to', 'be', 'returned', 'from', 'its', 'create', 'operation', 'in', 'the', 'format', 'needed', 'for', 'the', 'Create', 'Metrics', 'Context', 'operation', 'response', '.'] | train | https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient_mock/_hmc.py#L3022-L3047 |
7,506 | kodexlab/reliure | reliure/web.py | EngineView.add_output | def add_output(self, out_name, type_or_serialize=None, **kwargs):
""" Declare an output
"""
if out_name not in self.engine.all_outputs():
raise ValueError("'%s' is not generated by the engine %s" % (out_name, self.engine.all_outputs()))
if type_or_serialize is None:
type_or_serialize = GenericType()
if not isinstance(type_or_serialize, GenericType) and callable(type_or_serialize):
type_or_serialize = GenericType(serialize=type_or_serialize)
elif not isinstance(type_or_serialize, GenericType):
raise ValueError("the given 'type_or_serialize' is invalid")
# register outpurs
self._outputs[out_name] = {
'serializer': type_or_serialize,
'parameters': kwargs if kwargs else {}
} | python | def add_output(self, out_name, type_or_serialize=None, **kwargs):
""" Declare an output
"""
if out_name not in self.engine.all_outputs():
raise ValueError("'%s' is not generated by the engine %s" % (out_name, self.engine.all_outputs()))
if type_or_serialize is None:
type_or_serialize = GenericType()
if not isinstance(type_or_serialize, GenericType) and callable(type_or_serialize):
type_or_serialize = GenericType(serialize=type_or_serialize)
elif not isinstance(type_or_serialize, GenericType):
raise ValueError("the given 'type_or_serialize' is invalid")
# register outpurs
self._outputs[out_name] = {
'serializer': type_or_serialize,
'parameters': kwargs if kwargs else {}
} | ['def', 'add_output', '(', 'self', ',', 'out_name', ',', 'type_or_serialize', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'if', 'out_name', 'not', 'in', 'self', '.', 'engine', '.', 'all_outputs', '(', ')', ':', 'raise', 'ValueError', '(', '"\'%s\' is not generated by the engine %s"', '%', '(', 'out_name', ',', 'self', '.', 'engine', '.', 'all_outputs', '(', ')', ')', ')', 'if', 'type_or_serialize', 'is', 'None', ':', 'type_or_serialize', '=', 'GenericType', '(', ')', 'if', 'not', 'isinstance', '(', 'type_or_serialize', ',', 'GenericType', ')', 'and', 'callable', '(', 'type_or_serialize', ')', ':', 'type_or_serialize', '=', 'GenericType', '(', 'serialize', '=', 'type_or_serialize', ')', 'elif', 'not', 'isinstance', '(', 'type_or_serialize', ',', 'GenericType', ')', ':', 'raise', 'ValueError', '(', '"the given \'type_or_serialize\' is invalid"', ')', '# register outpurs', 'self', '.', '_outputs', '[', 'out_name', ']', '=', '{', "'serializer'", ':', 'type_or_serialize', ',', "'parameters'", ':', 'kwargs', 'if', 'kwargs', 'else', '{', '}', '}'] | Declare an output | ['Declare', 'an', 'output'] | train | https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/web.py#L115-L130 |
7,507 | googlefonts/fontbakery | Lib/fontbakery/profiles/googlefonts.py | com_google_fonts_check_metadata_canonical_weight_value | def com_google_fonts_check_metadata_canonical_weight_value(font_metadata):
"""METADATA.pb: Check that font weight has a canonical value."""
first_digit = font_metadata.weight / 100
if (font_metadata.weight % 100) != 0 or \
(first_digit < 1 or first_digit > 9):
yield FAIL, ("METADATA.pb: The weight is declared"
" as {} which is not a"
" multiple of 100"
" between 100 and 900.").format(font_metadata.weight)
else:
yield PASS, "Font weight has a canonical value." | python | def com_google_fonts_check_metadata_canonical_weight_value(font_metadata):
"""METADATA.pb: Check that font weight has a canonical value."""
first_digit = font_metadata.weight / 100
if (font_metadata.weight % 100) != 0 or \
(first_digit < 1 or first_digit > 9):
yield FAIL, ("METADATA.pb: The weight is declared"
" as {} which is not a"
" multiple of 100"
" between 100 and 900.").format(font_metadata.weight)
else:
yield PASS, "Font weight has a canonical value." | ['def', 'com_google_fonts_check_metadata_canonical_weight_value', '(', 'font_metadata', ')', ':', 'first_digit', '=', 'font_metadata', '.', 'weight', '/', '100', 'if', '(', 'font_metadata', '.', 'weight', '%', '100', ')', '!=', '0', 'or', '(', 'first_digit', '<', '1', 'or', 'first_digit', '>', '9', ')', ':', 'yield', 'FAIL', ',', '(', '"METADATA.pb: The weight is declared"', '" as {} which is not a"', '" multiple of 100"', '" between 100 and 900."', ')', '.', 'format', '(', 'font_metadata', '.', 'weight', ')', 'else', ':', 'yield', 'PASS', ',', '"Font weight has a canonical value."'] | METADATA.pb: Check that font weight has a canonical value. | ['METADATA', '.', 'pb', ':', 'Check', 'that', 'font', 'weight', 'has', 'a', 'canonical', 'value', '.'] | train | https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L2198-L2208 |
7,508 | dagwieers/vmguestlib | vmguestlib.py | VMGuestLib.GetMemActiveMB | def GetMemActiveMB(self):
'''Retrieves the amount of memory the virtual machine is actively using its
estimated working set size.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetMemActiveMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value | python | def GetMemActiveMB(self):
'''Retrieves the amount of memory the virtual machine is actively using its
estimated working set size.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetMemActiveMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value | ['def', 'GetMemActiveMB', '(', 'self', ')', ':', 'counter', '=', 'c_uint', '(', ')', 'ret', '=', 'vmGuestLib', '.', 'VMGuestLib_GetMemActiveMB', '(', 'self', '.', 'handle', '.', 'value', ',', 'byref', '(', 'counter', ')', ')', 'if', 'ret', '!=', 'VMGUESTLIB_ERROR_SUCCESS', ':', 'raise', 'VMGuestLibException', '(', 'ret', ')', 'return', 'counter', '.', 'value'] | Retrieves the amount of memory the virtual machine is actively using its
estimated working set size. | ['Retrieves', 'the', 'amount', 'of', 'memory', 'the', 'virtual', 'machine', 'is', 'actively', 'using', 'its', 'estimated', 'working', 'set', 'size', '.'] | train | https://github.com/dagwieers/vmguestlib/blob/2ba9333a745628cf9e6b4c767427a5bd997a71ad/vmguestlib.py#L307-L313 |
7,509 | limpyd/redis-limpyd-extensions | limpyd_extensions/dynamic/related.py | DynamicRelatedFieldMixin.get_name_for | def get_name_for(self, dynamic_part):
"""
Return the name for the current dynamic field, accepting a limpyd
instance for the dynamic part
"""
dynamic_part = self.from_python(dynamic_part)
return super(DynamicRelatedFieldMixin, self).get_name_for(dynamic_part) | python | def get_name_for(self, dynamic_part):
"""
Return the name for the current dynamic field, accepting a limpyd
instance for the dynamic part
"""
dynamic_part = self.from_python(dynamic_part)
return super(DynamicRelatedFieldMixin, self).get_name_for(dynamic_part) | ['def', 'get_name_for', '(', 'self', ',', 'dynamic_part', ')', ':', 'dynamic_part', '=', 'self', '.', 'from_python', '(', 'dynamic_part', ')', 'return', 'super', '(', 'DynamicRelatedFieldMixin', ',', 'self', ')', '.', 'get_name_for', '(', 'dynamic_part', ')'] | Return the name for the current dynamic field, accepting a limpyd
instance for the dynamic part | ['Return', 'the', 'name', 'for', 'the', 'current', 'dynamic', 'field', 'accepting', 'a', 'limpyd', 'instance', 'for', 'the', 'dynamic', 'part'] | train | https://github.com/limpyd/redis-limpyd-extensions/blob/13f34e39efd2f802761457da30ab2a4213b63934/limpyd_extensions/dynamic/related.py#L67-L73 |
7,510 | twilio/twilio-python | twilio/rest/__init__.py | Client.accounts | def accounts(self):
"""
Access the Accounts Twilio Domain
:returns: Accounts Twilio Domain
:rtype: twilio.rest.accounts.Accounts
"""
if self._accounts is None:
from twilio.rest.accounts import Accounts
self._accounts = Accounts(self)
return self._accounts | python | def accounts(self):
"""
Access the Accounts Twilio Domain
:returns: Accounts Twilio Domain
:rtype: twilio.rest.accounts.Accounts
"""
if self._accounts is None:
from twilio.rest.accounts import Accounts
self._accounts = Accounts(self)
return self._accounts | ['def', 'accounts', '(', 'self', ')', ':', 'if', 'self', '.', '_accounts', 'is', 'None', ':', 'from', 'twilio', '.', 'rest', '.', 'accounts', 'import', 'Accounts', 'self', '.', '_accounts', '=', 'Accounts', '(', 'self', ')', 'return', 'self', '.', '_accounts'] | Access the Accounts Twilio Domain
:returns: Accounts Twilio Domain
:rtype: twilio.rest.accounts.Accounts | ['Access', 'the', 'Accounts', 'Twilio', 'Domain'] | train | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/__init__.py#L133-L143 |
7,511 | corpusops/pdbclone | lib/pdb_clone/pdb.py | Pdb.do_break | def do_break(self, arg, temporary = 0):
"""b(reak) [ ([filename:]lineno | function) [, condition] ]
Without argument, list all breaks.
With a line number argument, set a break at this line in the
current file. With a function name, set a break at the first
executable line of that function. If a second argument is
present, it is a string specifying an expression which must
evaluate to true before the breakpoint is honored.
The line number may be prefixed with a filename and a colon,
to specify a breakpoint in another file (probably one that
hasn't been loaded yet). The file is searched for on
sys.path; the .py suffix may be omitted.
"""
if not arg:
all_breaks = '\n'.join(bp.bpformat() for bp in
bdb.Breakpoint.bpbynumber if bp)
if all_breaks:
self.message("Num Type Disp Enb Where")
self.message(all_breaks)
return
# Parse arguments, comma has lowest precedence and cannot occur in
# filename.
args = arg.rsplit(',', 1)
cond = args[1].strip() if len(args) == 2 else None
# Parse stuff before comma: [filename:]lineno | function.
args = args[0].rsplit(':', 1)
name = args[0].strip()
lineno = args[1] if len(args) == 2 else args[0]
try:
lineno = int(lineno)
except ValueError:
if len(args) == 2:
self.error('Bad lineno: "{}".'.format(lineno))
else:
# Attempt the list of possible function or method fully
# qualified names and corresponding filenames.
candidates = get_fqn_fname(name, self.curframe)
for fqn, fname in candidates:
try:
bp = self.set_break(fname, None, temporary, cond, fqn)
self.message('Breakpoint {:d} at {}:{:d}'.format(
bp.number, bp.file, bp.line))
return
except bdb.BdbError:
pass
if not candidates:
self.error(
'Not a function or a built-in: "{}"'.format(name))
else:
self.error('Bad name: "{}".'.format(name))
else:
filename = self.curframe.f_code.co_filename
if len(args) == 2 and name:
filename = name
if filename.startswith('<') and filename.endswith('>'):
# allow <doctest name>: doctest installs a hook at
# linecache.getlines to allow <doctest name> to be
# linecached and readable.
if filename == '<string>' and self.mainpyfile:
filename = self.mainpyfile
else:
root, ext = os.path.splitext(filename)
if ext == '':
filename = filename + '.py'
if not os.path.exists(filename):
self.error('Bad filename: "{}".'.format(arg))
return
try:
bp = self.set_break(filename, lineno, temporary, cond)
except bdb.BdbError as err:
self.error(err)
else:
self.message('Breakpoint {:d} at {}:{:d}'.format(
bp.number, bp.file, bp.line)) | python | def do_break(self, arg, temporary = 0):
"""b(reak) [ ([filename:]lineno | function) [, condition] ]
Without argument, list all breaks.
With a line number argument, set a break at this line in the
current file. With a function name, set a break at the first
executable line of that function. If a second argument is
present, it is a string specifying an expression which must
evaluate to true before the breakpoint is honored.
The line number may be prefixed with a filename and a colon,
to specify a breakpoint in another file (probably one that
hasn't been loaded yet). The file is searched for on
sys.path; the .py suffix may be omitted.
"""
if not arg:
all_breaks = '\n'.join(bp.bpformat() for bp in
bdb.Breakpoint.bpbynumber if bp)
if all_breaks:
self.message("Num Type Disp Enb Where")
self.message(all_breaks)
return
# Parse arguments, comma has lowest precedence and cannot occur in
# filename.
args = arg.rsplit(',', 1)
cond = args[1].strip() if len(args) == 2 else None
# Parse stuff before comma: [filename:]lineno | function.
args = args[0].rsplit(':', 1)
name = args[0].strip()
lineno = args[1] if len(args) == 2 else args[0]
try:
lineno = int(lineno)
except ValueError:
if len(args) == 2:
self.error('Bad lineno: "{}".'.format(lineno))
else:
# Attempt the list of possible function or method fully
# qualified names and corresponding filenames.
candidates = get_fqn_fname(name, self.curframe)
for fqn, fname in candidates:
try:
bp = self.set_break(fname, None, temporary, cond, fqn)
self.message('Breakpoint {:d} at {}:{:d}'.format(
bp.number, bp.file, bp.line))
return
except bdb.BdbError:
pass
if not candidates:
self.error(
'Not a function or a built-in: "{}"'.format(name))
else:
self.error('Bad name: "{}".'.format(name))
else:
filename = self.curframe.f_code.co_filename
if len(args) == 2 and name:
filename = name
if filename.startswith('<') and filename.endswith('>'):
# allow <doctest name>: doctest installs a hook at
# linecache.getlines to allow <doctest name> to be
# linecached and readable.
if filename == '<string>' and self.mainpyfile:
filename = self.mainpyfile
else:
root, ext = os.path.splitext(filename)
if ext == '':
filename = filename + '.py'
if not os.path.exists(filename):
self.error('Bad filename: "{}".'.format(arg))
return
try:
bp = self.set_break(filename, lineno, temporary, cond)
except bdb.BdbError as err:
self.error(err)
else:
self.message('Breakpoint {:d} at {}:{:d}'.format(
bp.number, bp.file, bp.line)) | ['def', 'do_break', '(', 'self', ',', 'arg', ',', 'temporary', '=', '0', ')', ':', 'if', 'not', 'arg', ':', 'all_breaks', '=', "'\\n'", '.', 'join', '(', 'bp', '.', 'bpformat', '(', ')', 'for', 'bp', 'in', 'bdb', '.', 'Breakpoint', '.', 'bpbynumber', 'if', 'bp', ')', 'if', 'all_breaks', ':', 'self', '.', 'message', '(', '"Num Type Disp Enb Where"', ')', 'self', '.', 'message', '(', 'all_breaks', ')', 'return', '# Parse arguments, comma has lowest precedence and cannot occur in', '# filename.', 'args', '=', 'arg', '.', 'rsplit', '(', "','", ',', '1', ')', 'cond', '=', 'args', '[', '1', ']', '.', 'strip', '(', ')', 'if', 'len', '(', 'args', ')', '==', '2', 'else', 'None', '# Parse stuff before comma: [filename:]lineno | function.', 'args', '=', 'args', '[', '0', ']', '.', 'rsplit', '(', "':'", ',', '1', ')', 'name', '=', 'args', '[', '0', ']', '.', 'strip', '(', ')', 'lineno', '=', 'args', '[', '1', ']', 'if', 'len', '(', 'args', ')', '==', '2', 'else', 'args', '[', '0', ']', 'try', ':', 'lineno', '=', 'int', '(', 'lineno', ')', 'except', 'ValueError', ':', 'if', 'len', '(', 'args', ')', '==', '2', ':', 'self', '.', 'error', '(', '\'Bad lineno: "{}".\'', '.', 'format', '(', 'lineno', ')', ')', 'else', ':', '# Attempt the list of possible function or method fully', '# qualified names and corresponding filenames.', 'candidates', '=', 'get_fqn_fname', '(', 'name', ',', 'self', '.', 'curframe', ')', 'for', 'fqn', ',', 'fname', 'in', 'candidates', ':', 'try', ':', 'bp', '=', 'self', '.', 'set_break', '(', 'fname', ',', 'None', ',', 'temporary', ',', 'cond', ',', 'fqn', ')', 'self', '.', 'message', '(', "'Breakpoint {:d} at {}:{:d}'", '.', 'format', '(', 'bp', '.', 'number', ',', 'bp', '.', 'file', ',', 'bp', '.', 'line', ')', ')', 'return', 'except', 'bdb', '.', 'BdbError', ':', 'pass', 'if', 'not', 'candidates', ':', 'self', '.', 'error', '(', '\'Not a function or a built-in: "{}"\'', '.', 'format', '(', 'name', ')', ')', 'else', ':', 'self', '.', 'error', '(', '\'Bad name: "{}".\'', '.', 'format', '(', 'name', ')', ')', 'else', ':', 'filename', '=', 'self', '.', 'curframe', '.', 'f_code', '.', 'co_filename', 'if', 'len', '(', 'args', ')', '==', '2', 'and', 'name', ':', 'filename', '=', 'name', 'if', 'filename', '.', 'startswith', '(', "'<'", ')', 'and', 'filename', '.', 'endswith', '(', "'>'", ')', ':', '# allow <doctest name>: doctest installs a hook at', '# linecache.getlines to allow <doctest name> to be', '# linecached and readable.', 'if', 'filename', '==', "'<string>'", 'and', 'self', '.', 'mainpyfile', ':', 'filename', '=', 'self', '.', 'mainpyfile', 'else', ':', 'root', ',', 'ext', '=', 'os', '.', 'path', '.', 'splitext', '(', 'filename', ')', 'if', 'ext', '==', "''", ':', 'filename', '=', 'filename', '+', "'.py'", 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'filename', ')', ':', 'self', '.', 'error', '(', '\'Bad filename: "{}".\'', '.', 'format', '(', 'arg', ')', ')', 'return', 'try', ':', 'bp', '=', 'self', '.', 'set_break', '(', 'filename', ',', 'lineno', ',', 'temporary', ',', 'cond', ')', 'except', 'bdb', '.', 'BdbError', 'as', 'err', ':', 'self', '.', 'error', '(', 'err', ')', 'else', ':', 'self', '.', 'message', '(', "'Breakpoint {:d} at {}:{:d}'", '.', 'format', '(', 'bp', '.', 'number', ',', 'bp', '.', 'file', ',', 'bp', '.', 'line', ')', ')'] | b(reak) [ ([filename:]lineno | function) [, condition] ]
Without argument, list all breaks.
With a line number argument, set a break at this line in the
current file. With a function name, set a break at the first
executable line of that function. If a second argument is
present, it is a string specifying an expression which must
evaluate to true before the breakpoint is honored.
The line number may be prefixed with a filename and a colon,
to specify a breakpoint in another file (probably one that
hasn't been loaded yet). The file is searched for on
sys.path; the .py suffix may be omitted. | ['b', '(', 'reak', ')', '[', '(', '[', 'filename', ':', ']', 'lineno', '|', 'function', ')', '[', 'condition', ']', ']', 'Without', 'argument', 'list', 'all', 'breaks', '.'] | train | https://github.com/corpusops/pdbclone/blob/f781537c243a4874b246d43dbdef8c4279f0094d/lib/pdb_clone/pdb.py#L894-L970 |
7,512 | ray-project/ray | python/ray/worker.py | register_custom_serializer | def register_custom_serializer(cls,
use_pickle=False,
use_dict=False,
serializer=None,
deserializer=None,
local=False,
driver_id=None,
class_id=None):
"""Enable serialization and deserialization for a particular class.
This method runs the register_class function defined below on every worker,
which will enable ray to properly serialize and deserialize objects of
this class.
Args:
cls (type): The class that ray should use this custom serializer for.
use_pickle (bool): If true, then objects of this class will be
serialized using pickle.
use_dict: If true, then objects of this class be serialized turning
their __dict__ fields into a dictionary. Must be False if
use_pickle is true.
serializer: The custom serializer to use. This should be provided if
and only if use_pickle and use_dict are False.
deserializer: The custom deserializer to use. This should be provided
if and only if use_pickle and use_dict are False.
local: True if the serializers should only be registered on the current
worker. This should usually be False.
driver_id: ID of the driver that we want to register the class for.
class_id: ID of the class that we are registering. If this is not
specified, we will calculate a new one inside the function.
Raises:
Exception: An exception is raised if pickle=False and the class cannot
be efficiently serialized by Ray. This can also raise an exception
if use_dict is true and cls is not pickleable.
"""
worker = global_worker
assert (serializer is None) == (deserializer is None), (
"The serializer/deserializer arguments must both be provided or "
"both not be provided.")
use_custom_serializer = (serializer is not None)
assert use_custom_serializer + use_pickle + use_dict == 1, (
"Exactly one of use_pickle, use_dict, or serializer/deserializer must "
"be specified.")
if use_dict:
# Raise an exception if cls cannot be serialized efficiently by Ray.
serialization.check_serializable(cls)
if class_id is None:
if not local:
# In this case, the class ID will be used to deduplicate the class
# across workers. Note that cloudpickle unfortunately does not
# produce deterministic strings, so these IDs could be different
# on different workers. We could use something weaker like
# cls.__name__, however that would run the risk of having
# collisions.
# TODO(rkn): We should improve this.
try:
# Attempt to produce a class ID that will be the same on each
# worker. However, determinism is not guaranteed, and the
# result may be different on different workers.
class_id = _try_to_compute_deterministic_class_id(cls)
except Exception:
raise serialization.CloudPickleError("Failed to pickle class "
"'{}'".format(cls))
else:
# In this case, the class ID only needs to be meaningful on this
# worker and not across workers.
class_id = _random_string()
# Make sure class_id is a string.
class_id = ray.utils.binary_to_hex(class_id)
if driver_id is None:
driver_id = worker.task_driver_id
assert isinstance(driver_id, DriverID)
def register_class_for_serialization(worker_info):
# TODO(rkn): We need to be more thoughtful about what to do if custom
# serializers have already been registered for class_id. In some cases,
# we may want to use the last user-defined serializers and ignore
# subsequent calls to register_custom_serializer that were made by the
# system.
serialization_context = worker_info[
"worker"].get_serialization_context(driver_id)
serialization_context.register_type(
cls,
class_id,
pickle=use_pickle,
custom_serializer=serializer,
custom_deserializer=deserializer)
if not local:
worker.run_function_on_all_workers(register_class_for_serialization)
else:
# Since we are pickling objects of this class, we don't actually need
# to ship the class definition.
register_class_for_serialization({"worker": worker}) | python | def register_custom_serializer(cls,
use_pickle=False,
use_dict=False,
serializer=None,
deserializer=None,
local=False,
driver_id=None,
class_id=None):
"""Enable serialization and deserialization for a particular class.
This method runs the register_class function defined below on every worker,
which will enable ray to properly serialize and deserialize objects of
this class.
Args:
cls (type): The class that ray should use this custom serializer for.
use_pickle (bool): If true, then objects of this class will be
serialized using pickle.
use_dict: If true, then objects of this class be serialized turning
their __dict__ fields into a dictionary. Must be False if
use_pickle is true.
serializer: The custom serializer to use. This should be provided if
and only if use_pickle and use_dict are False.
deserializer: The custom deserializer to use. This should be provided
if and only if use_pickle and use_dict are False.
local: True if the serializers should only be registered on the current
worker. This should usually be False.
driver_id: ID of the driver that we want to register the class for.
class_id: ID of the class that we are registering. If this is not
specified, we will calculate a new one inside the function.
Raises:
Exception: An exception is raised if pickle=False and the class cannot
be efficiently serialized by Ray. This can also raise an exception
if use_dict is true and cls is not pickleable.
"""
worker = global_worker
assert (serializer is None) == (deserializer is None), (
"The serializer/deserializer arguments must both be provided or "
"both not be provided.")
use_custom_serializer = (serializer is not None)
assert use_custom_serializer + use_pickle + use_dict == 1, (
"Exactly one of use_pickle, use_dict, or serializer/deserializer must "
"be specified.")
if use_dict:
# Raise an exception if cls cannot be serialized efficiently by Ray.
serialization.check_serializable(cls)
if class_id is None:
if not local:
# In this case, the class ID will be used to deduplicate the class
# across workers. Note that cloudpickle unfortunately does not
# produce deterministic strings, so these IDs could be different
# on different workers. We could use something weaker like
# cls.__name__, however that would run the risk of having
# collisions.
# TODO(rkn): We should improve this.
try:
# Attempt to produce a class ID that will be the same on each
# worker. However, determinism is not guaranteed, and the
# result may be different on different workers.
class_id = _try_to_compute_deterministic_class_id(cls)
except Exception:
raise serialization.CloudPickleError("Failed to pickle class "
"'{}'".format(cls))
else:
# In this case, the class ID only needs to be meaningful on this
# worker and not across workers.
class_id = _random_string()
# Make sure class_id is a string.
class_id = ray.utils.binary_to_hex(class_id)
if driver_id is None:
driver_id = worker.task_driver_id
assert isinstance(driver_id, DriverID)
def register_class_for_serialization(worker_info):
# TODO(rkn): We need to be more thoughtful about what to do if custom
# serializers have already been registered for class_id. In some cases,
# we may want to use the last user-defined serializers and ignore
# subsequent calls to register_custom_serializer that were made by the
# system.
serialization_context = worker_info[
"worker"].get_serialization_context(driver_id)
serialization_context.register_type(
cls,
class_id,
pickle=use_pickle,
custom_serializer=serializer,
custom_deserializer=deserializer)
if not local:
worker.run_function_on_all_workers(register_class_for_serialization)
else:
# Since we are pickling objects of this class, we don't actually need
# to ship the class definition.
register_class_for_serialization({"worker": worker}) | ['def', 'register_custom_serializer', '(', 'cls', ',', 'use_pickle', '=', 'False', ',', 'use_dict', '=', 'False', ',', 'serializer', '=', 'None', ',', 'deserializer', '=', 'None', ',', 'local', '=', 'False', ',', 'driver_id', '=', 'None', ',', 'class_id', '=', 'None', ')', ':', 'worker', '=', 'global_worker', 'assert', '(', 'serializer', 'is', 'None', ')', '==', '(', 'deserializer', 'is', 'None', ')', ',', '(', '"The serializer/deserializer arguments must both be provided or "', '"both not be provided."', ')', 'use_custom_serializer', '=', '(', 'serializer', 'is', 'not', 'None', ')', 'assert', 'use_custom_serializer', '+', 'use_pickle', '+', 'use_dict', '==', '1', ',', '(', '"Exactly one of use_pickle, use_dict, or serializer/deserializer must "', '"be specified."', ')', 'if', 'use_dict', ':', '# Raise an exception if cls cannot be serialized efficiently by Ray.', 'serialization', '.', 'check_serializable', '(', 'cls', ')', 'if', 'class_id', 'is', 'None', ':', 'if', 'not', 'local', ':', '# In this case, the class ID will be used to deduplicate the class', '# across workers. Note that cloudpickle unfortunately does not', '# produce deterministic strings, so these IDs could be different', '# on different workers. We could use something weaker like', '# cls.__name__, however that would run the risk of having', '# collisions.', '# TODO(rkn): We should improve this.', 'try', ':', '# Attempt to produce a class ID that will be the same on each', '# worker. However, determinism is not guaranteed, and the', '# result may be different on different workers.', 'class_id', '=', '_try_to_compute_deterministic_class_id', '(', 'cls', ')', 'except', 'Exception', ':', 'raise', 'serialization', '.', 'CloudPickleError', '(', '"Failed to pickle class "', '"\'{}\'"', '.', 'format', '(', 'cls', ')', ')', 'else', ':', '# In this case, the class ID only needs to be meaningful on this', '# worker and not across workers.', 'class_id', '=', '_random_string', '(', ')', '# Make sure class_id is a string.', 'class_id', '=', 'ray', '.', 'utils', '.', 'binary_to_hex', '(', 'class_id', ')', 'if', 'driver_id', 'is', 'None', ':', 'driver_id', '=', 'worker', '.', 'task_driver_id', 'assert', 'isinstance', '(', 'driver_id', ',', 'DriverID', ')', 'def', 'register_class_for_serialization', '(', 'worker_info', ')', ':', '# TODO(rkn): We need to be more thoughtful about what to do if custom', '# serializers have already been registered for class_id. In some cases,', '# we may want to use the last user-defined serializers and ignore', '# subsequent calls to register_custom_serializer that were made by the', '# system.', 'serialization_context', '=', 'worker_info', '[', '"worker"', ']', '.', 'get_serialization_context', '(', 'driver_id', ')', 'serialization_context', '.', 'register_type', '(', 'cls', ',', 'class_id', ',', 'pickle', '=', 'use_pickle', ',', 'custom_serializer', '=', 'serializer', ',', 'custom_deserializer', '=', 'deserializer', ')', 'if', 'not', 'local', ':', 'worker', '.', 'run_function_on_all_workers', '(', 'register_class_for_serialization', ')', 'else', ':', "# Since we are pickling objects of this class, we don't actually need", '# to ship the class definition.', 'register_class_for_serialization', '(', '{', '"worker"', ':', 'worker', '}', ')'] | Enable serialization and deserialization for a particular class.
This method runs the register_class function defined below on every worker,
which will enable ray to properly serialize and deserialize objects of
this class.
Args:
cls (type): The class that ray should use this custom serializer for.
use_pickle (bool): If true, then objects of this class will be
serialized using pickle.
use_dict: If true, then objects of this class be serialized turning
their __dict__ fields into a dictionary. Must be False if
use_pickle is true.
serializer: The custom serializer to use. This should be provided if
and only if use_pickle and use_dict are False.
deserializer: The custom deserializer to use. This should be provided
if and only if use_pickle and use_dict are False.
local: True if the serializers should only be registered on the current
worker. This should usually be False.
driver_id: ID of the driver that we want to register the class for.
class_id: ID of the class that we are registering. If this is not
specified, we will calculate a new one inside the function.
Raises:
Exception: An exception is raised if pickle=False and the class cannot
be efficiently serialized by Ray. This can also raise an exception
if use_dict is true and cls is not pickleable. | ['Enable', 'serialization', 'and', 'deserialization', 'for', 'a', 'particular', 'class', '.'] | train | https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/worker.py#L2048-L2148 |
7,513 | fr33jc/bang | bang/providers/aws.py | EC2.find_servers | def find_servers(self, tags, running=True):
"""
Returns any servers in the region that have tags that match the
key-value pairs in :attr:`tags`.
:param Mapping tags: A mapping object in which the keys are the tag
names and the values are the tag values.
:param bool running: A flag to limit server list to instances that are
actually *running*.
:rtype: :class:`list` of :class:`dict` objects. Each :class:`dict`
describes a single server instance.
"""
filters = dict([('tag:%s' % key, val) for key, val in tags.items()])
if running:
filters['instance-state-name'] = 'running'
res = self.ec2.get_all_instances(filters=filters)
instances = [server_to_dict(i) for r in res for i in r.instances]
log.debug('instances: %s' % instances)
return instances | python | def find_servers(self, tags, running=True):
"""
Returns any servers in the region that have tags that match the
key-value pairs in :attr:`tags`.
:param Mapping tags: A mapping object in which the keys are the tag
names and the values are the tag values.
:param bool running: A flag to limit server list to instances that are
actually *running*.
:rtype: :class:`list` of :class:`dict` objects. Each :class:`dict`
describes a single server instance.
"""
filters = dict([('tag:%s' % key, val) for key, val in tags.items()])
if running:
filters['instance-state-name'] = 'running'
res = self.ec2.get_all_instances(filters=filters)
instances = [server_to_dict(i) for r in res for i in r.instances]
log.debug('instances: %s' % instances)
return instances | ['def', 'find_servers', '(', 'self', ',', 'tags', ',', 'running', '=', 'True', ')', ':', 'filters', '=', 'dict', '(', '[', '(', "'tag:%s'", '%', 'key', ',', 'val', ')', 'for', 'key', ',', 'val', 'in', 'tags', '.', 'items', '(', ')', ']', ')', 'if', 'running', ':', 'filters', '[', "'instance-state-name'", ']', '=', "'running'", 'res', '=', 'self', '.', 'ec2', '.', 'get_all_instances', '(', 'filters', '=', 'filters', ')', 'instances', '=', '[', 'server_to_dict', '(', 'i', ')', 'for', 'r', 'in', 'res', 'for', 'i', 'in', 'r', '.', 'instances', ']', 'log', '.', 'debug', '(', "'instances: %s'", '%', 'instances', ')', 'return', 'instances'] | Returns any servers in the region that have tags that match the
key-value pairs in :attr:`tags`.
:param Mapping tags: A mapping object in which the keys are the tag
names and the values are the tag values.
:param bool running: A flag to limit server list to instances that are
actually *running*.
:rtype: :class:`list` of :class:`dict` objects. Each :class:`dict`
describes a single server instance. | ['Returns', 'any', 'servers', 'in', 'the', 'region', 'that', 'have', 'tags', 'that', 'match', 'the', 'key', '-', 'value', 'pairs', 'in', ':', 'attr', ':', 'tags', '.'] | train | https://github.com/fr33jc/bang/blob/8f000713f88d2a9a8c1193b63ca10a6578560c16/bang/providers/aws.py#L133-L155 |
7,514 | abnerjacobsen/tinydb-jsonorm | src/tinydb_jsonorm/cuid.py | _pad | def _pad(string, size):
"""
'Pad' a string with leading zeroes to fit the given size, truncating
if necessary.
"""
strlen = len(string)
if strlen == size:
return string
if strlen < size:
return _padding[0:size-strlen] + string
return string[-size:] | python | def _pad(string, size):
"""
'Pad' a string with leading zeroes to fit the given size, truncating
if necessary.
"""
strlen = len(string)
if strlen == size:
return string
if strlen < size:
return _padding[0:size-strlen] + string
return string[-size:] | ['def', '_pad', '(', 'string', ',', 'size', ')', ':', 'strlen', '=', 'len', '(', 'string', ')', 'if', 'strlen', '==', 'size', ':', 'return', 'string', 'if', 'strlen', '<', 'size', ':', 'return', '_padding', '[', '0', ':', 'size', '-', 'strlen', ']', '+', 'string', 'return', 'string', '[', '-', 'size', ':', ']'] | 'Pad' a string with leading zeroes to fit the given size, truncating
if necessary. | ['Pad', 'a', 'string', 'with', 'leading', 'zeroes', 'to', 'fit', 'the', 'given', 'size', 'truncating', 'if', 'necessary', '.'] | train | https://github.com/abnerjacobsen/tinydb-jsonorm/blob/704d3f887cc8963769ffbb116eb7e6909deeaecd/src/tinydb_jsonorm/cuid.py#L38-L48 |
7,515 | brutasse/rache | rache/__init__.py | schedule_job | def schedule_job(job_id, schedule_in, connection=None, **kwargs):
"""Schedules a job.
:param job_id: unique identifier for this job
:param schedule_in: number of seconds from now in which to schedule the
job or timedelta object.
:param **kwargs: parameters to attach to the job, key-value structure.
>>> schedule_job('http://example.com/test', schedule_in=10, num_retries=10)
"""
if not isinstance(schedule_in, int): # assumed to be a timedelta
schedule_in = schedule_in.days * 3600 * 24 + schedule_in.seconds
schedule_at = int(time.time()) + schedule_in
if connection is None:
connection = r
if 'id' in kwargs:
raise RuntimeError("'id' is a reserved key for the job ID")
with connection.pipeline() as pipe:
if schedule_at is not None:
args = (schedule_at, job_id)
if isinstance(connection, redis.Redis):
# StrictRedis or Redis don't have the same argument order
args = (job_id, schedule_at)
pipe.zadd(REDIS_KEY, *args)
delete = []
hmset = {}
for key, value in kwargs.items():
if value is None:
delete.append(key)
else:
hmset[key] = value
if hmset:
pipe.hmset(job_key(job_id), hmset)
if len(delete) > 0:
pipe.hdel(job_key(job_id), *delete)
pipe.execute() | python | def schedule_job(job_id, schedule_in, connection=None, **kwargs):
"""Schedules a job.
:param job_id: unique identifier for this job
:param schedule_in: number of seconds from now in which to schedule the
job or timedelta object.
:param **kwargs: parameters to attach to the job, key-value structure.
>>> schedule_job('http://example.com/test', schedule_in=10, num_retries=10)
"""
if not isinstance(schedule_in, int): # assumed to be a timedelta
schedule_in = schedule_in.days * 3600 * 24 + schedule_in.seconds
schedule_at = int(time.time()) + schedule_in
if connection is None:
connection = r
if 'id' in kwargs:
raise RuntimeError("'id' is a reserved key for the job ID")
with connection.pipeline() as pipe:
if schedule_at is not None:
args = (schedule_at, job_id)
if isinstance(connection, redis.Redis):
# StrictRedis or Redis don't have the same argument order
args = (job_id, schedule_at)
pipe.zadd(REDIS_KEY, *args)
delete = []
hmset = {}
for key, value in kwargs.items():
if value is None:
delete.append(key)
else:
hmset[key] = value
if hmset:
pipe.hmset(job_key(job_id), hmset)
if len(delete) > 0:
pipe.hdel(job_key(job_id), *delete)
pipe.execute() | ['def', 'schedule_job', '(', 'job_id', ',', 'schedule_in', ',', 'connection', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'if', 'not', 'isinstance', '(', 'schedule_in', ',', 'int', ')', ':', '# assumed to be a timedelta', 'schedule_in', '=', 'schedule_in', '.', 'days', '*', '3600', '*', '24', '+', 'schedule_in', '.', 'seconds', 'schedule_at', '=', 'int', '(', 'time', '.', 'time', '(', ')', ')', '+', 'schedule_in', 'if', 'connection', 'is', 'None', ':', 'connection', '=', 'r', 'if', "'id'", 'in', 'kwargs', ':', 'raise', 'RuntimeError', '(', '"\'id\' is a reserved key for the job ID"', ')', 'with', 'connection', '.', 'pipeline', '(', ')', 'as', 'pipe', ':', 'if', 'schedule_at', 'is', 'not', 'None', ':', 'args', '=', '(', 'schedule_at', ',', 'job_id', ')', 'if', 'isinstance', '(', 'connection', ',', 'redis', '.', 'Redis', ')', ':', "# StrictRedis or Redis don't have the same argument order", 'args', '=', '(', 'job_id', ',', 'schedule_at', ')', 'pipe', '.', 'zadd', '(', 'REDIS_KEY', ',', '*', 'args', ')', 'delete', '=', '[', ']', 'hmset', '=', '{', '}', 'for', 'key', ',', 'value', 'in', 'kwargs', '.', 'items', '(', ')', ':', 'if', 'value', 'is', 'None', ':', 'delete', '.', 'append', '(', 'key', ')', 'else', ':', 'hmset', '[', 'key', ']', '=', 'value', 'if', 'hmset', ':', 'pipe', '.', 'hmset', '(', 'job_key', '(', 'job_id', ')', ',', 'hmset', ')', 'if', 'len', '(', 'delete', ')', '>', '0', ':', 'pipe', '.', 'hdel', '(', 'job_key', '(', 'job_id', ')', ',', '*', 'delete', ')', 'pipe', '.', 'execute', '(', ')'] | Schedules a job.
:param job_id: unique identifier for this job
:param schedule_in: number of seconds from now in which to schedule the
job or timedelta object.
:param **kwargs: parameters to attach to the job, key-value structure.
>>> schedule_job('http://example.com/test', schedule_in=10, num_retries=10) | ['Schedules', 'a', 'job', '.'] | train | https://github.com/brutasse/rache/blob/fa9cf073376a8c731a13924b84fb8422a771a4ab/rache/__init__.py#L48-L87 |
7,516 | facelessuser/bracex | bracex/__init__.py | ExpandBrace.get_escape | def get_escape(self, c, i):
"""Get an escape."""
try:
escaped = next(i)
except StopIteration:
escaped = ''
return c + escaped if self.keep_escapes else escaped | python | def get_escape(self, c, i):
"""Get an escape."""
try:
escaped = next(i)
except StopIteration:
escaped = ''
return c + escaped if self.keep_escapes else escaped | ['def', 'get_escape', '(', 'self', ',', 'c', ',', 'i', ')', ':', 'try', ':', 'escaped', '=', 'next', '(', 'i', ')', 'except', 'StopIteration', ':', 'escaped', '=', "''", 'return', 'c', '+', 'escaped', 'if', 'self', '.', 'keep_escapes', 'else', 'escaped'] | Get an escape. | ['Get', 'an', 'escape', '.'] | train | https://github.com/facelessuser/bracex/blob/1fdf83e2bdfb939e78ba9966bcef80cd7a5c8534/bracex/__init__.py#L159-L166 |
7,517 | aio-libs/yarl | yarl/__init__.py | URL.is_default_port | def is_default_port(self):
"""A check for default port.
Return True if port is default for specified scheme,
e.g. 'http://python.org' or 'http://python.org:80', False
otherwise.
"""
if self.port is None:
return False
default = DEFAULT_PORTS.get(self.scheme)
if default is None:
return False
return self.port == default | python | def is_default_port(self):
"""A check for default port.
Return True if port is default for specified scheme,
e.g. 'http://python.org' or 'http://python.org:80', False
otherwise.
"""
if self.port is None:
return False
default = DEFAULT_PORTS.get(self.scheme)
if default is None:
return False
return self.port == default | ['def', 'is_default_port', '(', 'self', ')', ':', 'if', 'self', '.', 'port', 'is', 'None', ':', 'return', 'False', 'default', '=', 'DEFAULT_PORTS', '.', 'get', '(', 'self', '.', 'scheme', ')', 'if', 'default', 'is', 'None', ':', 'return', 'False', 'return', 'self', '.', 'port', '==', 'default'] | A check for default port.
Return True if port is default for specified scheme,
e.g. 'http://python.org' or 'http://python.org:80', False
otherwise. | ['A', 'check', 'for', 'default', 'port', '.'] | train | https://github.com/aio-libs/yarl/blob/e47da02c00ad764e030ca7647a9565548c97d362/yarl/__init__.py#L332-L345 |
7,518 | googlemaps/google-maps-services-python | googlemaps/geocoding.py | reverse_geocode | def reverse_geocode(client, latlng, result_type=None, location_type=None,
language=None):
"""
Reverse geocoding is the process of converting geographic coordinates into a
human-readable address.
:param latlng: The latitude/longitude value or place_id for which you wish
to obtain the closest, human-readable address.
:type latlng: string, dict, list, or tuple
:param result_type: One or more address types to restrict results to.
:type result_type: string or list of strings
:param location_type: One or more location types to restrict results to.
:type location_type: list of strings
:param language: The language in which to return results.
:type language: string
:rtype: list of reverse geocoding results.
"""
# Check if latlng param is a place_id string.
# place_id strings do not contain commas; latlng strings do.
if convert.is_string(latlng) and ',' not in latlng:
params = {"place_id": latlng}
else:
params = {"latlng": convert.latlng(latlng)}
if result_type:
params["result_type"] = convert.join_list("|", result_type)
if location_type:
params["location_type"] = convert.join_list("|", location_type)
if language:
params["language"] = language
return client._request("/maps/api/geocode/json", params).get("results", []) | python | def reverse_geocode(client, latlng, result_type=None, location_type=None,
language=None):
"""
Reverse geocoding is the process of converting geographic coordinates into a
human-readable address.
:param latlng: The latitude/longitude value or place_id for which you wish
to obtain the closest, human-readable address.
:type latlng: string, dict, list, or tuple
:param result_type: One or more address types to restrict results to.
:type result_type: string or list of strings
:param location_type: One or more location types to restrict results to.
:type location_type: list of strings
:param language: The language in which to return results.
:type language: string
:rtype: list of reverse geocoding results.
"""
# Check if latlng param is a place_id string.
# place_id strings do not contain commas; latlng strings do.
if convert.is_string(latlng) and ',' not in latlng:
params = {"place_id": latlng}
else:
params = {"latlng": convert.latlng(latlng)}
if result_type:
params["result_type"] = convert.join_list("|", result_type)
if location_type:
params["location_type"] = convert.join_list("|", location_type)
if language:
params["language"] = language
return client._request("/maps/api/geocode/json", params).get("results", []) | ['def', 'reverse_geocode', '(', 'client', ',', 'latlng', ',', 'result_type', '=', 'None', ',', 'location_type', '=', 'None', ',', 'language', '=', 'None', ')', ':', '# Check if latlng param is a place_id string.', '# place_id strings do not contain commas; latlng strings do.', 'if', 'convert', '.', 'is_string', '(', 'latlng', ')', 'and', "','", 'not', 'in', 'latlng', ':', 'params', '=', '{', '"place_id"', ':', 'latlng', '}', 'else', ':', 'params', '=', '{', '"latlng"', ':', 'convert', '.', 'latlng', '(', 'latlng', ')', '}', 'if', 'result_type', ':', 'params', '[', '"result_type"', ']', '=', 'convert', '.', 'join_list', '(', '"|"', ',', 'result_type', ')', 'if', 'location_type', ':', 'params', '[', '"location_type"', ']', '=', 'convert', '.', 'join_list', '(', '"|"', ',', 'location_type', ')', 'if', 'language', ':', 'params', '[', '"language"', ']', '=', 'language', 'return', 'client', '.', '_request', '(', '"/maps/api/geocode/json"', ',', 'params', ')', '.', 'get', '(', '"results"', ',', '[', ']', ')'] | Reverse geocoding is the process of converting geographic coordinates into a
human-readable address.
:param latlng: The latitude/longitude value or place_id for which you wish
to obtain the closest, human-readable address.
:type latlng: string, dict, list, or tuple
:param result_type: One or more address types to restrict results to.
:type result_type: string or list of strings
:param location_type: One or more location types to restrict results to.
:type location_type: list of strings
:param language: The language in which to return results.
:type language: string
:rtype: list of reverse geocoding results. | ['Reverse', 'geocoding', 'is', 'the', 'process', 'of', 'converting', 'geographic', 'coordinates', 'into', 'a', 'human', '-', 'readable', 'address', '.'] | train | https://github.com/googlemaps/google-maps-services-python/blob/7ed40b4d8df63479794c46ce29d03ed6083071d7/googlemaps/geocoding.py#L71-L109 |
7,519 | tkf/rash | rash/daemon.py | daemon_run | def daemon_run(no_error, restart, record_path, keep_json, check_duplicate,
use_polling, log_level):
"""
Run RASH index daemon.
This daemon watches the directory ``~/.config/rash/data/record``
and translate the JSON files dumped by ``record`` command into
sqlite3 DB at ``~/.config/rash/data/db.sqlite``.
``rash init`` will start RASH automatically by default.
But there are alternative ways to start daemon.
If you want to organize background process in one place such
as supervisord_, it is good to add `--restart` option to force
stop other daemon process if you accidentally started it in
other place. Here is an example of supervisord_ setup::
[program:rash-daemon]
command=rash daemon --restart
.. _supervisord: http://supervisord.org/
Alternatively, you can call ``rash index`` in cron job to
avoid using daemon. It is useful if you want to use RASH
on NFS, as it looks like watchdog does not work on NFS.::
# Refresh RASH DB every 10 minutes
*/10 * * * * rash index
"""
# Probably it makes sense to use this daemon to provide search
# API, so that this daemon is going to be the only process that
# is connected to the DB?
from .config import ConfigStore
from .indexer import Indexer
from .log import setup_daemon_log_file, LogForTheFuture
from .watchrecord import watch_record, install_sigterm_handler
install_sigterm_handler()
cfstore = ConfigStore()
if log_level:
cfstore.daemon_log_level = log_level
flogger = LogForTheFuture()
# SOMEDAY: make PID checking/writing atomic if possible
flogger.debug('Checking old PID file %r.', cfstore.daemon_pid_path)
if os.path.exists(cfstore.daemon_pid_path):
flogger.debug('Old PID file exists. Reading from it.')
with open(cfstore.daemon_pid_path, 'rt') as f:
pid = int(f.read().strip())
flogger.debug('Checking if old process with PID=%d is alive', pid)
try:
os.kill(pid, 0) # check if `pid` is alive
except OSError:
flogger.info(
'Process with PID=%d is already dead. '
'So just go on and use this daemon.', pid)
else:
if restart:
flogger.info('Stopping old daemon with PID=%d.', pid)
stop_running_daemon(cfstore, pid)
else:
message = ('There is already a running daemon (PID={0})!'
.format(pid))
if no_error:
flogger.debug(message)
# FIXME: Setup log handler and flogger.dump().
# Note that using the default log file is not safe
# since it has already been used.
return
else:
raise RuntimeError(message)
else:
flogger.debug('Daemon PID file %r does not exists. '
'So just go on and use this daemon.',
cfstore.daemon_pid_path)
with open(cfstore.daemon_pid_path, 'w') as f:
f.write(str(os.getpid()))
try:
setup_daemon_log_file(cfstore)
flogger.dump()
indexer = Indexer(cfstore, check_duplicate, keep_json, record_path)
indexer.index_all()
watch_record(indexer, use_polling)
finally:
os.remove(cfstore.daemon_pid_path) | python | def daemon_run(no_error, restart, record_path, keep_json, check_duplicate,
use_polling, log_level):
"""
Run RASH index daemon.
This daemon watches the directory ``~/.config/rash/data/record``
and translate the JSON files dumped by ``record`` command into
sqlite3 DB at ``~/.config/rash/data/db.sqlite``.
``rash init`` will start RASH automatically by default.
But there are alternative ways to start daemon.
If you want to organize background process in one place such
as supervisord_, it is good to add `--restart` option to force
stop other daemon process if you accidentally started it in
other place. Here is an example of supervisord_ setup::
[program:rash-daemon]
command=rash daemon --restart
.. _supervisord: http://supervisord.org/
Alternatively, you can call ``rash index`` in cron job to
avoid using daemon. It is useful if you want to use RASH
on NFS, as it looks like watchdog does not work on NFS.::
# Refresh RASH DB every 10 minutes
*/10 * * * * rash index
"""
# Probably it makes sense to use this daemon to provide search
# API, so that this daemon is going to be the only process that
# is connected to the DB?
from .config import ConfigStore
from .indexer import Indexer
from .log import setup_daemon_log_file, LogForTheFuture
from .watchrecord import watch_record, install_sigterm_handler
install_sigterm_handler()
cfstore = ConfigStore()
if log_level:
cfstore.daemon_log_level = log_level
flogger = LogForTheFuture()
# SOMEDAY: make PID checking/writing atomic if possible
flogger.debug('Checking old PID file %r.', cfstore.daemon_pid_path)
if os.path.exists(cfstore.daemon_pid_path):
flogger.debug('Old PID file exists. Reading from it.')
with open(cfstore.daemon_pid_path, 'rt') as f:
pid = int(f.read().strip())
flogger.debug('Checking if old process with PID=%d is alive', pid)
try:
os.kill(pid, 0) # check if `pid` is alive
except OSError:
flogger.info(
'Process with PID=%d is already dead. '
'So just go on and use this daemon.', pid)
else:
if restart:
flogger.info('Stopping old daemon with PID=%d.', pid)
stop_running_daemon(cfstore, pid)
else:
message = ('There is already a running daemon (PID={0})!'
.format(pid))
if no_error:
flogger.debug(message)
# FIXME: Setup log handler and flogger.dump().
# Note that using the default log file is not safe
# since it has already been used.
return
else:
raise RuntimeError(message)
else:
flogger.debug('Daemon PID file %r does not exists. '
'So just go on and use this daemon.',
cfstore.daemon_pid_path)
with open(cfstore.daemon_pid_path, 'w') as f:
f.write(str(os.getpid()))
try:
setup_daemon_log_file(cfstore)
flogger.dump()
indexer = Indexer(cfstore, check_duplicate, keep_json, record_path)
indexer.index_all()
watch_record(indexer, use_polling)
finally:
os.remove(cfstore.daemon_pid_path) | ['def', 'daemon_run', '(', 'no_error', ',', 'restart', ',', 'record_path', ',', 'keep_json', ',', 'check_duplicate', ',', 'use_polling', ',', 'log_level', ')', ':', '# Probably it makes sense to use this daemon to provide search', '# API, so that this daemon is going to be the only process that', '# is connected to the DB?', 'from', '.', 'config', 'import', 'ConfigStore', 'from', '.', 'indexer', 'import', 'Indexer', 'from', '.', 'log', 'import', 'setup_daemon_log_file', ',', 'LogForTheFuture', 'from', '.', 'watchrecord', 'import', 'watch_record', ',', 'install_sigterm_handler', 'install_sigterm_handler', '(', ')', 'cfstore', '=', 'ConfigStore', '(', ')', 'if', 'log_level', ':', 'cfstore', '.', 'daemon_log_level', '=', 'log_level', 'flogger', '=', 'LogForTheFuture', '(', ')', '# SOMEDAY: make PID checking/writing atomic if possible', 'flogger', '.', 'debug', '(', "'Checking old PID file %r.'", ',', 'cfstore', '.', 'daemon_pid_path', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'cfstore', '.', 'daemon_pid_path', ')', ':', 'flogger', '.', 'debug', '(', "'Old PID file exists. Reading from it.'", ')', 'with', 'open', '(', 'cfstore', '.', 'daemon_pid_path', ',', "'rt'", ')', 'as', 'f', ':', 'pid', '=', 'int', '(', 'f', '.', 'read', '(', ')', '.', 'strip', '(', ')', ')', 'flogger', '.', 'debug', '(', "'Checking if old process with PID=%d is alive'", ',', 'pid', ')', 'try', ':', 'os', '.', 'kill', '(', 'pid', ',', '0', ')', '# check if `pid` is alive', 'except', 'OSError', ':', 'flogger', '.', 'info', '(', "'Process with PID=%d is already dead. '", "'So just go on and use this daemon.'", ',', 'pid', ')', 'else', ':', 'if', 'restart', ':', 'flogger', '.', 'info', '(', "'Stopping old daemon with PID=%d.'", ',', 'pid', ')', 'stop_running_daemon', '(', 'cfstore', ',', 'pid', ')', 'else', ':', 'message', '=', '(', "'There is already a running daemon (PID={0})!'", '.', 'format', '(', 'pid', ')', ')', 'if', 'no_error', ':', 'flogger', '.', 'debug', '(', 'message', ')', '# FIXME: Setup log handler and flogger.dump().', '# Note that using the default log file is not safe', '# since it has already been used.', 'return', 'else', ':', 'raise', 'RuntimeError', '(', 'message', ')', 'else', ':', 'flogger', '.', 'debug', '(', "'Daemon PID file %r does not exists. '", "'So just go on and use this daemon.'", ',', 'cfstore', '.', 'daemon_pid_path', ')', 'with', 'open', '(', 'cfstore', '.', 'daemon_pid_path', ',', "'w'", ')', 'as', 'f', ':', 'f', '.', 'write', '(', 'str', '(', 'os', '.', 'getpid', '(', ')', ')', ')', 'try', ':', 'setup_daemon_log_file', '(', 'cfstore', ')', 'flogger', '.', 'dump', '(', ')', 'indexer', '=', 'Indexer', '(', 'cfstore', ',', 'check_duplicate', ',', 'keep_json', ',', 'record_path', ')', 'indexer', '.', 'index_all', '(', ')', 'watch_record', '(', 'indexer', ',', 'use_polling', ')', 'finally', ':', 'os', '.', 'remove', '(', 'cfstore', '.', 'daemon_pid_path', ')'] | Run RASH index daemon.
This daemon watches the directory ``~/.config/rash/data/record``
and translate the JSON files dumped by ``record`` command into
sqlite3 DB at ``~/.config/rash/data/db.sqlite``.
``rash init`` will start RASH automatically by default.
But there are alternative ways to start daemon.
If you want to organize background process in one place such
as supervisord_, it is good to add `--restart` option to force
stop other daemon process if you accidentally started it in
other place. Here is an example of supervisord_ setup::
[program:rash-daemon]
command=rash daemon --restart
.. _supervisord: http://supervisord.org/
Alternatively, you can call ``rash index`` in cron job to
avoid using daemon. It is useful if you want to use RASH
on NFS, as it looks like watchdog does not work on NFS.::
# Refresh RASH DB every 10 minutes
*/10 * * * * rash index | ['Run', 'RASH', 'index', 'daemon', '.'] | train | https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/daemon.py#L20-L107 |
7,520 | QuantEcon/QuantEcon.py | quantecon/optimize/root_finding.py | bisect | def bisect(f, a, b, args=(), xtol=_xtol,
rtol=_rtol, maxiter=_iter, disp=True):
"""
Find root of a function within an interval adapted from Scipy's bisect.
Basic bisection routine to find a zero of the function `f` between the
arguments `a` and `b`. `f(a)` and `f(b)` cannot have the same signs.
`f` must be jitted via numba.
Parameters
----------
f : jitted and callable
Python function returning a number. `f` must be continuous.
a : number
One end of the bracketing interval [a,b].
b : number
The other end of the bracketing interval [a,b].
args : tuple, optional(default=())
Extra arguments to be used in the function call.
xtol : number, optional(default=2e-12)
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter must be nonnegative.
rtol : number, optional(default=4*np.finfo(float).eps)
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root.
maxiter : number, optional(default=100)
Maximum number of iterations.
disp : bool, optional(default=True)
If True, raise a RuntimeError if the algorithm didn't converge.
Returns
-------
results : namedtuple
"""
if xtol <= 0:
raise ValueError("xtol is too small (<= 0)")
if maxiter < 1:
raise ValueError("maxiter must be greater than 0")
# Convert to float
xa = a * 1.0
xb = b * 1.0
fa = f(xa, *args)
fb = f(xb, *args)
funcalls = 2
root, status = _bisect_interval(xa, xb, fa, fb)
# Check for sign error and early termination
if status == _ECONVERGED:
itr = 0
else:
# Perform bisection
dm = xb - xa
for itr in range(maxiter):
dm *= 0.5
xm = xa + dm
fm = f(xm, *args)
funcalls += 1
if fm * fa >= 0:
xa = xm
if fm == 0 or abs(dm) < xtol + rtol * abs(xm):
root = xm
status = _ECONVERGED
itr += 1
break
if disp and status == _ECONVERR:
raise RuntimeError("Failed to converge")
return _results((root, funcalls, itr, status)) | python | def bisect(f, a, b, args=(), xtol=_xtol,
rtol=_rtol, maxiter=_iter, disp=True):
"""
Find root of a function within an interval adapted from Scipy's bisect.
Basic bisection routine to find a zero of the function `f` between the
arguments `a` and `b`. `f(a)` and `f(b)` cannot have the same signs.
`f` must be jitted via numba.
Parameters
----------
f : jitted and callable
Python function returning a number. `f` must be continuous.
a : number
One end of the bracketing interval [a,b].
b : number
The other end of the bracketing interval [a,b].
args : tuple, optional(default=())
Extra arguments to be used in the function call.
xtol : number, optional(default=2e-12)
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter must be nonnegative.
rtol : number, optional(default=4*np.finfo(float).eps)
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root.
maxiter : number, optional(default=100)
Maximum number of iterations.
disp : bool, optional(default=True)
If True, raise a RuntimeError if the algorithm didn't converge.
Returns
-------
results : namedtuple
"""
if xtol <= 0:
raise ValueError("xtol is too small (<= 0)")
if maxiter < 1:
raise ValueError("maxiter must be greater than 0")
# Convert to float
xa = a * 1.0
xb = b * 1.0
fa = f(xa, *args)
fb = f(xb, *args)
funcalls = 2
root, status = _bisect_interval(xa, xb, fa, fb)
# Check for sign error and early termination
if status == _ECONVERGED:
itr = 0
else:
# Perform bisection
dm = xb - xa
for itr in range(maxiter):
dm *= 0.5
xm = xa + dm
fm = f(xm, *args)
funcalls += 1
if fm * fa >= 0:
xa = xm
if fm == 0 or abs(dm) < xtol + rtol * abs(xm):
root = xm
status = _ECONVERGED
itr += 1
break
if disp and status == _ECONVERR:
raise RuntimeError("Failed to converge")
return _results((root, funcalls, itr, status)) | ['def', 'bisect', '(', 'f', ',', 'a', ',', 'b', ',', 'args', '=', '(', ')', ',', 'xtol', '=', '_xtol', ',', 'rtol', '=', '_rtol', ',', 'maxiter', '=', '_iter', ',', 'disp', '=', 'True', ')', ':', 'if', 'xtol', '<=', '0', ':', 'raise', 'ValueError', '(', '"xtol is too small (<= 0)"', ')', 'if', 'maxiter', '<', '1', ':', 'raise', 'ValueError', '(', '"maxiter must be greater than 0"', ')', '# Convert to float', 'xa', '=', 'a', '*', '1.0', 'xb', '=', 'b', '*', '1.0', 'fa', '=', 'f', '(', 'xa', ',', '*', 'args', ')', 'fb', '=', 'f', '(', 'xb', ',', '*', 'args', ')', 'funcalls', '=', '2', 'root', ',', 'status', '=', '_bisect_interval', '(', 'xa', ',', 'xb', ',', 'fa', ',', 'fb', ')', '# Check for sign error and early termination', 'if', 'status', '==', '_ECONVERGED', ':', 'itr', '=', '0', 'else', ':', '# Perform bisection', 'dm', '=', 'xb', '-', 'xa', 'for', 'itr', 'in', 'range', '(', 'maxiter', ')', ':', 'dm', '*=', '0.5', 'xm', '=', 'xa', '+', 'dm', 'fm', '=', 'f', '(', 'xm', ',', '*', 'args', ')', 'funcalls', '+=', '1', 'if', 'fm', '*', 'fa', '>=', '0', ':', 'xa', '=', 'xm', 'if', 'fm', '==', '0', 'or', 'abs', '(', 'dm', ')', '<', 'xtol', '+', 'rtol', '*', 'abs', '(', 'xm', ')', ':', 'root', '=', 'xm', 'status', '=', '_ECONVERGED', 'itr', '+=', '1', 'break', 'if', 'disp', 'and', 'status', '==', '_ECONVERR', ':', 'raise', 'RuntimeError', '(', '"Failed to converge"', ')', 'return', '_results', '(', '(', 'root', ',', 'funcalls', ',', 'itr', ',', 'status', ')', ')'] | Find root of a function within an interval adapted from Scipy's bisect.
Basic bisection routine to find a zero of the function `f` between the
arguments `a` and `b`. `f(a)` and `f(b)` cannot have the same signs.
`f` must be jitted via numba.
Parameters
----------
f : jitted and callable
Python function returning a number. `f` must be continuous.
a : number
One end of the bracketing interval [a,b].
b : number
The other end of the bracketing interval [a,b].
args : tuple, optional(default=())
Extra arguments to be used in the function call.
xtol : number, optional(default=2e-12)
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter must be nonnegative.
rtol : number, optional(default=4*np.finfo(float).eps)
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root.
maxiter : number, optional(default=100)
Maximum number of iterations.
disp : bool, optional(default=True)
If True, raise a RuntimeError if the algorithm didn't converge.
Returns
-------
results : namedtuple | ['Find', 'root', 'of', 'a', 'function', 'within', 'an', 'interval', 'adapted', 'from', 'Scipy', 's', 'bisect', '.'] | train | https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/optimize/root_finding.py#L297-L374 |
7,521 | LudovicRousseau/pyscard | smartcard/wx/ReaderToolbar.py | ReaderComboBox.update | def update(self, observable, handlers):
"""Toolbar ReaderObserver callback that is notified when
readers are added or removed."""
addedreaders, removedreaders = handlers
for reader in addedreaders:
item = self.Append(str(reader))
self.SetClientData(item, reader)
for reader in removedreaders:
item = self.FindString(str(reader))
if wx.NOT_FOUND != item:
self.Delete(item)
selection = self.GetSelection() | python | def update(self, observable, handlers):
"""Toolbar ReaderObserver callback that is notified when
readers are added or removed."""
addedreaders, removedreaders = handlers
for reader in addedreaders:
item = self.Append(str(reader))
self.SetClientData(item, reader)
for reader in removedreaders:
item = self.FindString(str(reader))
if wx.NOT_FOUND != item:
self.Delete(item)
selection = self.GetSelection() | ['def', 'update', '(', 'self', ',', 'observable', ',', 'handlers', ')', ':', 'addedreaders', ',', 'removedreaders', '=', 'handlers', 'for', 'reader', 'in', 'addedreaders', ':', 'item', '=', 'self', '.', 'Append', '(', 'str', '(', 'reader', ')', ')', 'self', '.', 'SetClientData', '(', 'item', ',', 'reader', ')', 'for', 'reader', 'in', 'removedreaders', ':', 'item', '=', 'self', '.', 'FindString', '(', 'str', '(', 'reader', ')', ')', 'if', 'wx', '.', 'NOT_FOUND', '!=', 'item', ':', 'self', '.', 'Delete', '(', 'item', ')', 'selection', '=', 'self', '.', 'GetSelection', '(', ')'] | Toolbar ReaderObserver callback that is notified when
readers are added or removed. | ['Toolbar', 'ReaderObserver', 'callback', 'that', 'is', 'notified', 'when', 'readers', 'are', 'added', 'or', 'removed', '.'] | train | https://github.com/LudovicRousseau/pyscard/blob/62e675028086c75656444cc21d563d9f08ebf8e7/smartcard/wx/ReaderToolbar.py#L45-L56 |
7,522 | ultrabug/py3status | py3status/formatter.py | Block.render | def render(self, get_params, module, _if=None):
"""
render the block and return the output.
"""
enough = False
output = []
valid = None
if self.commands.show:
valid = True
if self.parent and self.commands.soft and _if is None:
return None, self
if _if:
valid = True
elif self.commands._if:
valid = self.check_valid(get_params)
if valid is not False:
for item in self.content:
if isinstance(item, Placeholder):
sub_valid, sub_output, enough = item.get(get_params, self)
output.append(sub_output)
elif isinstance(item, Literal):
sub_valid = None
enough = True
output.append(item.text)
elif isinstance(item, Block):
sub_valid, sub_output = item.render(get_params, module)
if sub_valid is None:
output.append(sub_output)
else:
output.extend(sub_output)
valid = valid or sub_valid
if not valid:
if self.next_block:
valid, output = self.next_block.render(
get_params, module, _if=self.commands._if
)
elif self.parent is None and (
(not self.next_block and enough) or self.base_block
):
valid = True
else:
output = []
# clean
color = self.commands.color
if color and color[0] != "#":
color_name = "color_%s" % color
threshold_color_name = "color_threshold_%s" % color
# substitute color
color = (
getattr(module, color_name, None)
or getattr(module, threshold_color_name, None)
or getattr(module.py3, color_name.upper(), None)
)
if color == "hidden":
return False, []
text = u""
out = []
if isinstance(output, str):
output = [output]
# merge as much output as we can.
# we need to convert values to unicode for concatination.
if python2:
conversion = unicode # noqa
convertables = (str, bool, int, float, unicode) # noqa
else:
conversion = str
convertables = (str, bool, int, float, bytes)
first = True
last_block = None
for index, item in enumerate(output):
is_block = isinstance(item, Block)
if not is_block and item:
last_block = None
if isinstance(item, convertables) or item is None:
text += conversion(item)
continue
elif text:
if not first and (text == "" or out and out[-1].get("color") == color):
out[-1]["full_text"] += text
else:
part = {"full_text": text}
if color:
part["color"] = color
out.append(part)
text = u""
if isinstance(item, Composite):
if color:
item.composite_update(item, {"color": color}, soft=True)
out.extend(item.get_content())
elif is_block:
# if this is a block then likely it is soft.
if not out:
continue
for x in range(index + 1, len(output)):
if output[x] and not isinstance(output[x], Block):
valid, _output = item.render(get_params, module, _if=True)
if _output and _output != last_block:
last_block = _output
out.extend(_output)
break
else:
if item:
out.append(item)
first = False
# add any left over text
if text:
part = {"full_text": text}
if color:
part["color"] = color
out.append(part)
# process any min/max length commands
max_length = self.commands.max_length
min_length = self.commands.min_length
if max_length or min_length:
for item in out:
if max_length is not None:
item["full_text"] = item["full_text"][:max_length]
max_length -= len(item["full_text"])
if min_length:
min_length -= len(item["full_text"])
if min_length > 0:
out[0]["full_text"] = u" " * min_length + out[0]["full_text"]
min_length = 0
return valid, out | python | def render(self, get_params, module, _if=None):
"""
render the block and return the output.
"""
enough = False
output = []
valid = None
if self.commands.show:
valid = True
if self.parent and self.commands.soft and _if is None:
return None, self
if _if:
valid = True
elif self.commands._if:
valid = self.check_valid(get_params)
if valid is not False:
for item in self.content:
if isinstance(item, Placeholder):
sub_valid, sub_output, enough = item.get(get_params, self)
output.append(sub_output)
elif isinstance(item, Literal):
sub_valid = None
enough = True
output.append(item.text)
elif isinstance(item, Block):
sub_valid, sub_output = item.render(get_params, module)
if sub_valid is None:
output.append(sub_output)
else:
output.extend(sub_output)
valid = valid or sub_valid
if not valid:
if self.next_block:
valid, output = self.next_block.render(
get_params, module, _if=self.commands._if
)
elif self.parent is None and (
(not self.next_block and enough) or self.base_block
):
valid = True
else:
output = []
# clean
color = self.commands.color
if color and color[0] != "#":
color_name = "color_%s" % color
threshold_color_name = "color_threshold_%s" % color
# substitute color
color = (
getattr(module, color_name, None)
or getattr(module, threshold_color_name, None)
or getattr(module.py3, color_name.upper(), None)
)
if color == "hidden":
return False, []
text = u""
out = []
if isinstance(output, str):
output = [output]
# merge as much output as we can.
# we need to convert values to unicode for concatination.
if python2:
conversion = unicode # noqa
convertables = (str, bool, int, float, unicode) # noqa
else:
conversion = str
convertables = (str, bool, int, float, bytes)
first = True
last_block = None
for index, item in enumerate(output):
is_block = isinstance(item, Block)
if not is_block and item:
last_block = None
if isinstance(item, convertables) or item is None:
text += conversion(item)
continue
elif text:
if not first and (text == "" or out and out[-1].get("color") == color):
out[-1]["full_text"] += text
else:
part = {"full_text": text}
if color:
part["color"] = color
out.append(part)
text = u""
if isinstance(item, Composite):
if color:
item.composite_update(item, {"color": color}, soft=True)
out.extend(item.get_content())
elif is_block:
# if this is a block then likely it is soft.
if not out:
continue
for x in range(index + 1, len(output)):
if output[x] and not isinstance(output[x], Block):
valid, _output = item.render(get_params, module, _if=True)
if _output and _output != last_block:
last_block = _output
out.extend(_output)
break
else:
if item:
out.append(item)
first = False
# add any left over text
if text:
part = {"full_text": text}
if color:
part["color"] = color
out.append(part)
# process any min/max length commands
max_length = self.commands.max_length
min_length = self.commands.min_length
if max_length or min_length:
for item in out:
if max_length is not None:
item["full_text"] = item["full_text"][:max_length]
max_length -= len(item["full_text"])
if min_length:
min_length -= len(item["full_text"])
if min_length > 0:
out[0]["full_text"] = u" " * min_length + out[0]["full_text"]
min_length = 0
return valid, out | ['def', 'render', '(', 'self', ',', 'get_params', ',', 'module', ',', '_if', '=', 'None', ')', ':', 'enough', '=', 'False', 'output', '=', '[', ']', 'valid', '=', 'None', 'if', 'self', '.', 'commands', '.', 'show', ':', 'valid', '=', 'True', 'if', 'self', '.', 'parent', 'and', 'self', '.', 'commands', '.', 'soft', 'and', '_if', 'is', 'None', ':', 'return', 'None', ',', 'self', 'if', '_if', ':', 'valid', '=', 'True', 'elif', 'self', '.', 'commands', '.', '_if', ':', 'valid', '=', 'self', '.', 'check_valid', '(', 'get_params', ')', 'if', 'valid', 'is', 'not', 'False', ':', 'for', 'item', 'in', 'self', '.', 'content', ':', 'if', 'isinstance', '(', 'item', ',', 'Placeholder', ')', ':', 'sub_valid', ',', 'sub_output', ',', 'enough', '=', 'item', '.', 'get', '(', 'get_params', ',', 'self', ')', 'output', '.', 'append', '(', 'sub_output', ')', 'elif', 'isinstance', '(', 'item', ',', 'Literal', ')', ':', 'sub_valid', '=', 'None', 'enough', '=', 'True', 'output', '.', 'append', '(', 'item', '.', 'text', ')', 'elif', 'isinstance', '(', 'item', ',', 'Block', ')', ':', 'sub_valid', ',', 'sub_output', '=', 'item', '.', 'render', '(', 'get_params', ',', 'module', ')', 'if', 'sub_valid', 'is', 'None', ':', 'output', '.', 'append', '(', 'sub_output', ')', 'else', ':', 'output', '.', 'extend', '(', 'sub_output', ')', 'valid', '=', 'valid', 'or', 'sub_valid', 'if', 'not', 'valid', ':', 'if', 'self', '.', 'next_block', ':', 'valid', ',', 'output', '=', 'self', '.', 'next_block', '.', 'render', '(', 'get_params', ',', 'module', ',', '_if', '=', 'self', '.', 'commands', '.', '_if', ')', 'elif', 'self', '.', 'parent', 'is', 'None', 'and', '(', '(', 'not', 'self', '.', 'next_block', 'and', 'enough', ')', 'or', 'self', '.', 'base_block', ')', ':', 'valid', '=', 'True', 'else', ':', 'output', '=', '[', ']', '# clean', 'color', '=', 'self', '.', 'commands', '.', 'color', 'if', 'color', 'and', 'color', '[', '0', ']', '!=', '"#"', ':', 'color_name', '=', '"color_%s"', '%', 'color', 'threshold_color_name', '=', '"color_threshold_%s"', '%', 'color', '# substitute color', 'color', '=', '(', 'getattr', '(', 'module', ',', 'color_name', ',', 'None', ')', 'or', 'getattr', '(', 'module', ',', 'threshold_color_name', ',', 'None', ')', 'or', 'getattr', '(', 'module', '.', 'py3', ',', 'color_name', '.', 'upper', '(', ')', ',', 'None', ')', ')', 'if', 'color', '==', '"hidden"', ':', 'return', 'False', ',', '[', ']', 'text', '=', 'u""', 'out', '=', '[', ']', 'if', 'isinstance', '(', 'output', ',', 'str', ')', ':', 'output', '=', '[', 'output', ']', '# merge as much output as we can.', '# we need to convert values to unicode for concatination.', 'if', 'python2', ':', 'conversion', '=', 'unicode', '# noqa', 'convertables', '=', '(', 'str', ',', 'bool', ',', 'int', ',', 'float', ',', 'unicode', ')', '# noqa', 'else', ':', 'conversion', '=', 'str', 'convertables', '=', '(', 'str', ',', 'bool', ',', 'int', ',', 'float', ',', 'bytes', ')', 'first', '=', 'True', 'last_block', '=', 'None', 'for', 'index', ',', 'item', 'in', 'enumerate', '(', 'output', ')', ':', 'is_block', '=', 'isinstance', '(', 'item', ',', 'Block', ')', 'if', 'not', 'is_block', 'and', 'item', ':', 'last_block', '=', 'None', 'if', 'isinstance', '(', 'item', ',', 'convertables', ')', 'or', 'item', 'is', 'None', ':', 'text', '+=', 'conversion', '(', 'item', ')', 'continue', 'elif', 'text', ':', 'if', 'not', 'first', 'and', '(', 'text', '==', '""', 'or', 'out', 'and', 'out', '[', '-', '1', ']', '.', 'get', '(', '"color"', ')', '==', 'color', ')', ':', 'out', '[', '-', '1', ']', '[', '"full_text"', ']', '+=', 'text', 'else', ':', 'part', '=', '{', '"full_text"', ':', 'text', '}', 'if', 'color', ':', 'part', '[', '"color"', ']', '=', 'color', 'out', '.', 'append', '(', 'part', ')', 'text', '=', 'u""', 'if', 'isinstance', '(', 'item', ',', 'Composite', ')', ':', 'if', 'color', ':', 'item', '.', 'composite_update', '(', 'item', ',', '{', '"color"', ':', 'color', '}', ',', 'soft', '=', 'True', ')', 'out', '.', 'extend', '(', 'item', '.', 'get_content', '(', ')', ')', 'elif', 'is_block', ':', '# if this is a block then likely it is soft.', 'if', 'not', 'out', ':', 'continue', 'for', 'x', 'in', 'range', '(', 'index', '+', '1', ',', 'len', '(', 'output', ')', ')', ':', 'if', 'output', '[', 'x', ']', 'and', 'not', 'isinstance', '(', 'output', '[', 'x', ']', ',', 'Block', ')', ':', 'valid', ',', '_output', '=', 'item', '.', 'render', '(', 'get_params', ',', 'module', ',', '_if', '=', 'True', ')', 'if', '_output', 'and', '_output', '!=', 'last_block', ':', 'last_block', '=', '_output', 'out', '.', 'extend', '(', '_output', ')', 'break', 'else', ':', 'if', 'item', ':', 'out', '.', 'append', '(', 'item', ')', 'first', '=', 'False', '# add any left over text', 'if', 'text', ':', 'part', '=', '{', '"full_text"', ':', 'text', '}', 'if', 'color', ':', 'part', '[', '"color"', ']', '=', 'color', 'out', '.', 'append', '(', 'part', ')', '# process any min/max length commands', 'max_length', '=', 'self', '.', 'commands', '.', 'max_length', 'min_length', '=', 'self', '.', 'commands', '.', 'min_length', 'if', 'max_length', 'or', 'min_length', ':', 'for', 'item', 'in', 'out', ':', 'if', 'max_length', 'is', 'not', 'None', ':', 'item', '[', '"full_text"', ']', '=', 'item', '[', '"full_text"', ']', '[', ':', 'max_length', ']', 'max_length', '-=', 'len', '(', 'item', '[', '"full_text"', ']', ')', 'if', 'min_length', ':', 'min_length', '-=', 'len', '(', 'item', '[', '"full_text"', ']', ')', 'if', 'min_length', '>', '0', ':', 'out', '[', '0', ']', '[', '"full_text"', ']', '=', 'u" "', '*', 'min_length', '+', 'out', '[', '0', ']', '[', '"full_text"', ']', 'min_length', '=', '0', 'return', 'valid', ',', 'out'] | render the block and return the output. | ['render', 'the', 'block', 'and', 'return', 'the', 'output', '.'] | train | https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/formatter.py#L591-L723 |
7,523 | materialsproject/pymatgen | pymatgen/io/abinit/tasks.py | AbinitTask.temp_shell_task | def temp_shell_task(cls, inp, mpi_procs=1, workdir=None, manager=None):
"""
Build a Task with a temporary workdir. The task is executed via the shell with 1 MPI proc.
Mainly used for invoking Abinit to get important parameters needed to prepare the real task.
Args:
mpi_procs: Number of MPI processes to use.
"""
# Build a simple manager to run the job in a shell subprocess
import tempfile
workdir = tempfile.mkdtemp() if workdir is None else workdir
if manager is None: manager = TaskManager.from_user_config()
# Construct the task and run it
task = cls.from_input(inp, workdir=workdir, manager=manager.to_shell_manager(mpi_procs=mpi_procs))
task.set_name('temp_shell_task')
return task | python | def temp_shell_task(cls, inp, mpi_procs=1, workdir=None, manager=None):
"""
Build a Task with a temporary workdir. The task is executed via the shell with 1 MPI proc.
Mainly used for invoking Abinit to get important parameters needed to prepare the real task.
Args:
mpi_procs: Number of MPI processes to use.
"""
# Build a simple manager to run the job in a shell subprocess
import tempfile
workdir = tempfile.mkdtemp() if workdir is None else workdir
if manager is None: manager = TaskManager.from_user_config()
# Construct the task and run it
task = cls.from_input(inp, workdir=workdir, manager=manager.to_shell_manager(mpi_procs=mpi_procs))
task.set_name('temp_shell_task')
return task | ['def', 'temp_shell_task', '(', 'cls', ',', 'inp', ',', 'mpi_procs', '=', '1', ',', 'workdir', '=', 'None', ',', 'manager', '=', 'None', ')', ':', '# Build a simple manager to run the job in a shell subprocess', 'import', 'tempfile', 'workdir', '=', 'tempfile', '.', 'mkdtemp', '(', ')', 'if', 'workdir', 'is', 'None', 'else', 'workdir', 'if', 'manager', 'is', 'None', ':', 'manager', '=', 'TaskManager', '.', 'from_user_config', '(', ')', '# Construct the task and run it', 'task', '=', 'cls', '.', 'from_input', '(', 'inp', ',', 'workdir', '=', 'workdir', ',', 'manager', '=', 'manager', '.', 'to_shell_manager', '(', 'mpi_procs', '=', 'mpi_procs', ')', ')', 'task', '.', 'set_name', '(', "'temp_shell_task'", ')', 'return', 'task'] | Build a Task with a temporary workdir. The task is executed via the shell with 1 MPI proc.
Mainly used for invoking Abinit to get important parameters needed to prepare the real task.
Args:
mpi_procs: Number of MPI processes to use. | ['Build', 'a', 'Task', 'with', 'a', 'temporary', 'workdir', '.', 'The', 'task', 'is', 'executed', 'via', 'the', 'shell', 'with', '1', 'MPI', 'proc', '.', 'Mainly', 'used', 'for', 'invoking', 'Abinit', 'to', 'get', 'important', 'parameters', 'needed', 'to', 'prepare', 'the', 'real', 'task', '.'] | train | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/tasks.py#L2608-L2624 |
7,524 | jepegit/cellpy | cellpy/readers/cellreader.py | CellpyData.set_mass | def set_mass(self, masses, dataset_number=None, validated=None):
"""Sets the mass (masses) for the test (datasets).
"""
self._set_run_attribute("mass", masses, dataset_number=dataset_number,
validated=validated) | python | def set_mass(self, masses, dataset_number=None, validated=None):
"""Sets the mass (masses) for the test (datasets).
"""
self._set_run_attribute("mass", masses, dataset_number=dataset_number,
validated=validated) | ['def', 'set_mass', '(', 'self', ',', 'masses', ',', 'dataset_number', '=', 'None', ',', 'validated', '=', 'None', ')', ':', 'self', '.', '_set_run_attribute', '(', '"mass"', ',', 'masses', ',', 'dataset_number', '=', 'dataset_number', ',', 'validated', '=', 'validated', ')'] | Sets the mass (masses) for the test (datasets). | ['Sets', 'the', 'mass', '(', 'masses', ')', 'for', 'the', 'test', '(', 'datasets', ')', '.'] | train | https://github.com/jepegit/cellpy/blob/9f4a84cdd11f72cfa02cda8c2d7b5174abbb7370/cellpy/readers/cellreader.py#L3186-L3190 |
7,525 | mvcisback/py-aiger-bv | aigerbv/common.py | kmodels | def kmodels(wordlen: int, k: int, input=None, output=None):
"""Return a circuit taking a wordlen bitvector where only k
valuations return True. Uses encoding from [1].
Note that this is equivalent to (~x < k).
- TODO: Add automated simplification so that the circuits
are equiv.
[1]: Chakraborty, Supratik, et al. "From Weighted to Unweighted Model
Counting." IJCAI. 2015.
"""
assert 0 <= k < 2**wordlen
if output is None:
output = _fresh()
if input is None:
input = _fresh()
input_names = named_indexes(wordlen, input)
atoms = map(aiger.atom, input_names)
active = False
expr = aiger.atom(False)
for atom, bit in zip(atoms, encode_int(wordlen, k, signed=False)):
active |= bit
if not active: # Skip until first 1.
continue
expr = (expr | atom) if bit else (expr & atom)
return aigbv.AIGBV(
aig=expr.aig,
input_map=frozenset([(input, tuple(input_names))]),
output_map=frozenset([(output, (expr.output,))]),
) | python | def kmodels(wordlen: int, k: int, input=None, output=None):
"""Return a circuit taking a wordlen bitvector where only k
valuations return True. Uses encoding from [1].
Note that this is equivalent to (~x < k).
- TODO: Add automated simplification so that the circuits
are equiv.
[1]: Chakraborty, Supratik, et al. "From Weighted to Unweighted Model
Counting." IJCAI. 2015.
"""
assert 0 <= k < 2**wordlen
if output is None:
output = _fresh()
if input is None:
input = _fresh()
input_names = named_indexes(wordlen, input)
atoms = map(aiger.atom, input_names)
active = False
expr = aiger.atom(False)
for atom, bit in zip(atoms, encode_int(wordlen, k, signed=False)):
active |= bit
if not active: # Skip until first 1.
continue
expr = (expr | atom) if bit else (expr & atom)
return aigbv.AIGBV(
aig=expr.aig,
input_map=frozenset([(input, tuple(input_names))]),
output_map=frozenset([(output, (expr.output,))]),
) | ['def', 'kmodels', '(', 'wordlen', ':', 'int', ',', 'k', ':', 'int', ',', 'input', '=', 'None', ',', 'output', '=', 'None', ')', ':', 'assert', '0', '<=', 'k', '<', '2', '**', 'wordlen', 'if', 'output', 'is', 'None', ':', 'output', '=', '_fresh', '(', ')', 'if', 'input', 'is', 'None', ':', 'input', '=', '_fresh', '(', ')', 'input_names', '=', 'named_indexes', '(', 'wordlen', ',', 'input', ')', 'atoms', '=', 'map', '(', 'aiger', '.', 'atom', ',', 'input_names', ')', 'active', '=', 'False', 'expr', '=', 'aiger', '.', 'atom', '(', 'False', ')', 'for', 'atom', ',', 'bit', 'in', 'zip', '(', 'atoms', ',', 'encode_int', '(', 'wordlen', ',', 'k', ',', 'signed', '=', 'False', ')', ')', ':', 'active', '|=', 'bit', 'if', 'not', 'active', ':', '# Skip until first 1.', 'continue', 'expr', '=', '(', 'expr', '|', 'atom', ')', 'if', 'bit', 'else', '(', 'expr', '&', 'atom', ')', 'return', 'aigbv', '.', 'AIGBV', '(', 'aig', '=', 'expr', '.', 'aig', ',', 'input_map', '=', 'frozenset', '(', '[', '(', 'input', ',', 'tuple', '(', 'input_names', ')', ')', ']', ')', ',', 'output_map', '=', 'frozenset', '(', '[', '(', 'output', ',', '(', 'expr', '.', 'output', ',', ')', ')', ']', ')', ',', ')'] | Return a circuit taking a wordlen bitvector where only k
valuations return True. Uses encoding from [1].
Note that this is equivalent to (~x < k).
- TODO: Add automated simplification so that the circuits
are equiv.
[1]: Chakraborty, Supratik, et al. "From Weighted to Unweighted Model
Counting." IJCAI. 2015. | ['Return', 'a', 'circuit', 'taking', 'a', 'wordlen', 'bitvector', 'where', 'only', 'k', 'valuations', 'return', 'True', '.', 'Uses', 'encoding', 'from', '[', '1', ']', '.'] | train | https://github.com/mvcisback/py-aiger-bv/blob/855819844c429c35cdd8dc0b134bcd11f7b2fda3/aigerbv/common.py#L430-L464 |
7,526 | a1ezzz/wasp-general | wasp_general/network/web/cookies.py | WHTTPCookieJar.import_simple_cookie | def import_simple_cookie(cls, simple_cookie):
""" Create cookie jar from SimpleCookie object
:param simple_cookie: cookies to import
:return: WHTTPCookieJar
"""
cookie_jar = WHTTPCookieJar()
for cookie_name in simple_cookie.keys():
cookie_attrs = {}
for attr_name in WHTTPCookie.cookie_attr_value_compliance.keys():
attr_value = simple_cookie[cookie_name][attr_name]
if attr_value != '':
cookie_attrs[attr_name] = attr_value
cookie_jar.add_cookie(WHTTPCookie(
cookie_name, simple_cookie[cookie_name].value, **cookie_attrs
))
return cookie_jar | python | def import_simple_cookie(cls, simple_cookie):
""" Create cookie jar from SimpleCookie object
:param simple_cookie: cookies to import
:return: WHTTPCookieJar
"""
cookie_jar = WHTTPCookieJar()
for cookie_name in simple_cookie.keys():
cookie_attrs = {}
for attr_name in WHTTPCookie.cookie_attr_value_compliance.keys():
attr_value = simple_cookie[cookie_name][attr_name]
if attr_value != '':
cookie_attrs[attr_name] = attr_value
cookie_jar.add_cookie(WHTTPCookie(
cookie_name, simple_cookie[cookie_name].value, **cookie_attrs
))
return cookie_jar | ['def', 'import_simple_cookie', '(', 'cls', ',', 'simple_cookie', ')', ':', 'cookie_jar', '=', 'WHTTPCookieJar', '(', ')', 'for', 'cookie_name', 'in', 'simple_cookie', '.', 'keys', '(', ')', ':', 'cookie_attrs', '=', '{', '}', 'for', 'attr_name', 'in', 'WHTTPCookie', '.', 'cookie_attr_value_compliance', '.', 'keys', '(', ')', ':', 'attr_value', '=', 'simple_cookie', '[', 'cookie_name', ']', '[', 'attr_name', ']', 'if', 'attr_value', '!=', "''", ':', 'cookie_attrs', '[', 'attr_name', ']', '=', 'attr_value', 'cookie_jar', '.', 'add_cookie', '(', 'WHTTPCookie', '(', 'cookie_name', ',', 'simple_cookie', '[', 'cookie_name', ']', '.', 'value', ',', '*', '*', 'cookie_attrs', ')', ')', 'return', 'cookie_jar'] | Create cookie jar from SimpleCookie object
:param simple_cookie: cookies to import
:return: WHTTPCookieJar | ['Create', 'cookie', 'jar', 'from', 'SimpleCookie', 'object'] | train | https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/network/web/cookies.py#L295-L312 |
7,527 | slarse/pdfebc-core | pdfebc_core/compress.py | compress_multiple_pdfs | def compress_multiple_pdfs(source_directory, output_directory, ghostscript_binary):
"""Compress all PDF files in the current directory and place the output in the
given output directory. This is a generator function that first yields the amount
of files to be compressed, and then yields the output path of each file.
Args:
source_directory (str): Filepath to the source directory.
output_directory (str): Filepath to the output directory.
ghostscript_binary (str): Name of the Ghostscript binary.
Returns:
list(str): paths to outputs.
"""
source_paths = _get_pdf_filenames_at(source_directory)
yield len(source_paths)
for source_path in source_paths:
output = os.path.join(output_directory, os.path.basename(source_path))
compress_pdf(source_path, output, ghostscript_binary)
yield output | python | def compress_multiple_pdfs(source_directory, output_directory, ghostscript_binary):
"""Compress all PDF files in the current directory and place the output in the
given output directory. This is a generator function that first yields the amount
of files to be compressed, and then yields the output path of each file.
Args:
source_directory (str): Filepath to the source directory.
output_directory (str): Filepath to the output directory.
ghostscript_binary (str): Name of the Ghostscript binary.
Returns:
list(str): paths to outputs.
"""
source_paths = _get_pdf_filenames_at(source_directory)
yield len(source_paths)
for source_path in source_paths:
output = os.path.join(output_directory, os.path.basename(source_path))
compress_pdf(source_path, output, ghostscript_binary)
yield output | ['def', 'compress_multiple_pdfs', '(', 'source_directory', ',', 'output_directory', ',', 'ghostscript_binary', ')', ':', 'source_paths', '=', '_get_pdf_filenames_at', '(', 'source_directory', ')', 'yield', 'len', '(', 'source_paths', ')', 'for', 'source_path', 'in', 'source_paths', ':', 'output', '=', 'os', '.', 'path', '.', 'join', '(', 'output_directory', ',', 'os', '.', 'path', '.', 'basename', '(', 'source_path', ')', ')', 'compress_pdf', '(', 'source_path', ',', 'output', ',', 'ghostscript_binary', ')', 'yield', 'output'] | Compress all PDF files in the current directory and place the output in the
given output directory. This is a generator function that first yields the amount
of files to be compressed, and then yields the output path of each file.
Args:
source_directory (str): Filepath to the source directory.
output_directory (str): Filepath to the output directory.
ghostscript_binary (str): Name of the Ghostscript binary.
Returns:
list(str): paths to outputs. | ['Compress', 'all', 'PDF', 'files', 'in', 'the', 'current', 'directory', 'and', 'place', 'the', 'output', 'in', 'the', 'given', 'output', 'directory', '.', 'This', 'is', 'a', 'generator', 'function', 'that', 'first', 'yields', 'the', 'amount', 'of', 'files', 'to', 'be', 'compressed', 'and', 'then', 'yields', 'the', 'output', 'path', 'of', 'each', 'file', '.'] | train | https://github.com/slarse/pdfebc-core/blob/fc40857bc42365b7434714333e37d7a3487603a0/pdfebc_core/compress.py#L87-L105 |
7,528 | wmayner/pyphi | pyphi/subsystem.py | Subsystem.phi_effect_mip | def phi_effect_mip(self, mechanism, purview):
"""Return the |small_phi| of the effect MIP.
This is the distance between the unpartitioned effect repertoire and
the MIP cause repertoire.
"""
mip = self.effect_mip(mechanism, purview)
return mip.phi if mip else 0 | python | def phi_effect_mip(self, mechanism, purview):
"""Return the |small_phi| of the effect MIP.
This is the distance between the unpartitioned effect repertoire and
the MIP cause repertoire.
"""
mip = self.effect_mip(mechanism, purview)
return mip.phi if mip else 0 | ['def', 'phi_effect_mip', '(', 'self', ',', 'mechanism', ',', 'purview', ')', ':', 'mip', '=', 'self', '.', 'effect_mip', '(', 'mechanism', ',', 'purview', ')', 'return', 'mip', '.', 'phi', 'if', 'mip', 'else', '0'] | Return the |small_phi| of the effect MIP.
This is the distance between the unpartitioned effect repertoire and
the MIP cause repertoire. | ['Return', 'the', '|small_phi|', 'of', 'the', 'effect', 'MIP', '.'] | train | https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/subsystem.py#L623-L630 |
7,529 | kvesteri/validators | validators/utils.py | func_args_as_dict | def func_args_as_dict(func, args, kwargs):
"""
Return given function's positional and key value arguments as an ordered
dictionary.
"""
if six.PY2:
_getargspec = inspect.getargspec
else:
_getargspec = inspect.getfullargspec
arg_names = list(
OrderedDict.fromkeys(
itertools.chain(
_getargspec(func)[0],
kwargs.keys()
)
)
)
return OrderedDict(
list(six.moves.zip(arg_names, args)) +
list(kwargs.items())
) | python | def func_args_as_dict(func, args, kwargs):
"""
Return given function's positional and key value arguments as an ordered
dictionary.
"""
if six.PY2:
_getargspec = inspect.getargspec
else:
_getargspec = inspect.getfullargspec
arg_names = list(
OrderedDict.fromkeys(
itertools.chain(
_getargspec(func)[0],
kwargs.keys()
)
)
)
return OrderedDict(
list(six.moves.zip(arg_names, args)) +
list(kwargs.items())
) | ['def', 'func_args_as_dict', '(', 'func', ',', 'args', ',', 'kwargs', ')', ':', 'if', 'six', '.', 'PY2', ':', '_getargspec', '=', 'inspect', '.', 'getargspec', 'else', ':', '_getargspec', '=', 'inspect', '.', 'getfullargspec', 'arg_names', '=', 'list', '(', 'OrderedDict', '.', 'fromkeys', '(', 'itertools', '.', 'chain', '(', '_getargspec', '(', 'func', ')', '[', '0', ']', ',', 'kwargs', '.', 'keys', '(', ')', ')', ')', ')', 'return', 'OrderedDict', '(', 'list', '(', 'six', '.', 'moves', '.', 'zip', '(', 'arg_names', ',', 'args', ')', ')', '+', 'list', '(', 'kwargs', '.', 'items', '(', ')', ')', ')'] | Return given function's positional and key value arguments as an ordered
dictionary. | ['Return', 'given', 'function', 's', 'positional', 'and', 'key', 'value', 'arguments', 'as', 'an', 'ordered', 'dictionary', '.'] | train | https://github.com/kvesteri/validators/blob/34d355e87168241e872b25811d245810df2bd430/validators/utils.py#L35-L56 |
7,530 | kstaniek/condoor | condoor/utils.py | is_reachable | def is_reachable(host, port=23):
"""Check reachability for specified hostname/port.
It tries to open TCP socket.
It supports IPv6.
:param host: hostname or ip address string
:rtype: str
:param port: tcp port number
:rtype: number
:return: True if host is reachable else false
"""
try:
addresses = socket.getaddrinfo(
host, port, socket.AF_UNSPEC, socket.SOCK_STREAM
)
except socket.gaierror:
return False
for family, _, _, _, sockaddr in addresses:
sock = socket.socket(family, socket.SOCK_STREAM)
sock.settimeout(5)
try:
sock.connect(sockaddr)
except IOError:
continue
sock.shutdown(socket.SHUT_RDWR)
sock.close()
# Wait 2 sec for socket to shutdown
time.sleep(2)
break
else:
return False
return True | python | def is_reachable(host, port=23):
"""Check reachability for specified hostname/port.
It tries to open TCP socket.
It supports IPv6.
:param host: hostname or ip address string
:rtype: str
:param port: tcp port number
:rtype: number
:return: True if host is reachable else false
"""
try:
addresses = socket.getaddrinfo(
host, port, socket.AF_UNSPEC, socket.SOCK_STREAM
)
except socket.gaierror:
return False
for family, _, _, _, sockaddr in addresses:
sock = socket.socket(family, socket.SOCK_STREAM)
sock.settimeout(5)
try:
sock.connect(sockaddr)
except IOError:
continue
sock.shutdown(socket.SHUT_RDWR)
sock.close()
# Wait 2 sec for socket to shutdown
time.sleep(2)
break
else:
return False
return True | ['def', 'is_reachable', '(', 'host', ',', 'port', '=', '23', ')', ':', 'try', ':', 'addresses', '=', 'socket', '.', 'getaddrinfo', '(', 'host', ',', 'port', ',', 'socket', '.', 'AF_UNSPEC', ',', 'socket', '.', 'SOCK_STREAM', ')', 'except', 'socket', '.', 'gaierror', ':', 'return', 'False', 'for', 'family', ',', '_', ',', '_', ',', '_', ',', 'sockaddr', 'in', 'addresses', ':', 'sock', '=', 'socket', '.', 'socket', '(', 'family', ',', 'socket', '.', 'SOCK_STREAM', ')', 'sock', '.', 'settimeout', '(', '5', ')', 'try', ':', 'sock', '.', 'connect', '(', 'sockaddr', ')', 'except', 'IOError', ':', 'continue', 'sock', '.', 'shutdown', '(', 'socket', '.', 'SHUT_RDWR', ')', 'sock', '.', 'close', '(', ')', '# Wait 2 sec for socket to shutdown', 'time', '.', 'sleep', '(', '2', ')', 'break', 'else', ':', 'return', 'False', 'return', 'True'] | Check reachability for specified hostname/port.
It tries to open TCP socket.
It supports IPv6.
:param host: hostname or ip address string
:rtype: str
:param port: tcp port number
:rtype: number
:return: True if host is reachable else false | ['Check', 'reachability', 'for', 'specified', 'hostname', '/', 'port', '.'] | train | https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/utils.py#L49-L82 |
7,531 | jsommers/switchyard | switchyard/lib/topo/topobuild.py | Topology.serialize | def serialize(self):
'''
Return a JSON string of the serialized topology
'''
return json.dumps(json_graph.node_link_data(self.__nxgraph), cls=Encoder) | python | def serialize(self):
'''
Return a JSON string of the serialized topology
'''
return json.dumps(json_graph.node_link_data(self.__nxgraph), cls=Encoder) | ['def', 'serialize', '(', 'self', ')', ':', 'return', 'json', '.', 'dumps', '(', 'json_graph', '.', 'node_link_data', '(', 'self', '.', '__nxgraph', ')', ',', 'cls', '=', 'Encoder', ')'] | Return a JSON string of the serialized topology | ['Return', 'a', 'JSON', 'string', 'of', 'the', 'serialized', 'topology'] | train | https://github.com/jsommers/switchyard/blob/fdcb3869c937dcedbd6ea7a7822ebd412bf1e2b0/switchyard/lib/topo/topobuild.py#L266-L270 |
7,532 | senseobservationsystems/commonsense-python-lib | senseapi.py | SenseAPI.SensorsGet | def SensorsGet(self, parameters = None, sensor_id = -1):
"""
Retrieve sensors from CommonSense, according to parameters, or by sensor id.
If successful, result can be obtained by a call to getResponse(), and should be a json string.
@param parameters (dictionary) (optional) - Dictionary containing the parameters for the api-call.
@note - http://www.sense-os.nl/45?nodeId=45&selectedId=11887
@param sensor_id (int) (optional) - Sensor id of sensor to retrieve details from.
@return (boolean) - Boolean indicating whether SensorsGet was successful.
"""
url = ''
if parameters is None and sensor_id <> -1:
url = '/sensors/{0}.json'.format(sensor_id)
else:
url = '/sensors.json'
if self.__SenseApiCall__(url, 'GET', parameters = parameters):
return True
else:
self.__error__ = "api call unsuccessful"
return False | python | def SensorsGet(self, parameters = None, sensor_id = -1):
"""
Retrieve sensors from CommonSense, according to parameters, or by sensor id.
If successful, result can be obtained by a call to getResponse(), and should be a json string.
@param parameters (dictionary) (optional) - Dictionary containing the parameters for the api-call.
@note - http://www.sense-os.nl/45?nodeId=45&selectedId=11887
@param sensor_id (int) (optional) - Sensor id of sensor to retrieve details from.
@return (boolean) - Boolean indicating whether SensorsGet was successful.
"""
url = ''
if parameters is None and sensor_id <> -1:
url = '/sensors/{0}.json'.format(sensor_id)
else:
url = '/sensors.json'
if self.__SenseApiCall__(url, 'GET', parameters = parameters):
return True
else:
self.__error__ = "api call unsuccessful"
return False | ['def', 'SensorsGet', '(', 'self', ',', 'parameters', '=', 'None', ',', 'sensor_id', '=', '-', '1', ')', ':', 'url', '=', "''", 'if', 'parameters', 'is', 'None', 'and', 'sensor_id', '<>', '-', '1', ':', 'url', '=', "'/sensors/{0}.json'", '.', 'format', '(', 'sensor_id', ')', 'else', ':', 'url', '=', "'/sensors.json'", 'if', 'self', '.', '__SenseApiCall__', '(', 'url', ',', "'GET'", ',', 'parameters', '=', 'parameters', ')', ':', 'return', 'True', 'else', ':', 'self', '.', '__error__', '=', '"api call unsuccessful"', 'return', 'False'] | Retrieve sensors from CommonSense, according to parameters, or by sensor id.
If successful, result can be obtained by a call to getResponse(), and should be a json string.
@param parameters (dictionary) (optional) - Dictionary containing the parameters for the api-call.
@note - http://www.sense-os.nl/45?nodeId=45&selectedId=11887
@param sensor_id (int) (optional) - Sensor id of sensor to retrieve details from.
@return (boolean) - Boolean indicating whether SensorsGet was successful. | ['Retrieve', 'sensors', 'from', 'CommonSense', 'according', 'to', 'parameters', 'or', 'by', 'sensor', 'id', '.', 'If', 'successful', 'result', 'can', 'be', 'obtained', 'by', 'a', 'call', 'to', 'getResponse', '()', 'and', 'should', 'be', 'a', 'json', 'string', '.'] | train | https://github.com/senseobservationsystems/commonsense-python-lib/blob/aac59a1751ef79eb830b3ca1fab6ef2c83931f87/senseapi.py#L551-L573 |
7,533 | aio-libs/aioredis | aioredis/commands/hash.py | HashCommandsMixin.hincrbyfloat | def hincrbyfloat(self, key, field, increment=1.0):
"""Increment the float value of a hash field by the given number."""
fut = self.execute(b'HINCRBYFLOAT', key, field, increment)
return wait_convert(fut, float) | python | def hincrbyfloat(self, key, field, increment=1.0):
"""Increment the float value of a hash field by the given number."""
fut = self.execute(b'HINCRBYFLOAT', key, field, increment)
return wait_convert(fut, float) | ['def', 'hincrbyfloat', '(', 'self', ',', 'key', ',', 'field', ',', 'increment', '=', '1.0', ')', ':', 'fut', '=', 'self', '.', 'execute', '(', "b'HINCRBYFLOAT'", ',', 'key', ',', 'field', ',', 'increment', ')', 'return', 'wait_convert', '(', 'fut', ',', 'float', ')'] | Increment the float value of a hash field by the given number. | ['Increment', 'the', 'float', 'value', 'of', 'a', 'hash', 'field', 'by', 'the', 'given', 'number', '.'] | train | https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/commands/hash.py#L40-L43 |
7,534 | NYUCCL/psiTurk | psiturk/psiturk_org_services.py | PsiturkOrgServices.get_ad_url | def get_ad_url(self, ad_id, sandbox):
"""
get_ad_url:
gets ad server thing
"""
if sandbox:
return self.sandbox_ad_server + '/view/' + str(ad_id)
else:
return self.ad_server + '/view/' + str(ad_id) | python | def get_ad_url(self, ad_id, sandbox):
"""
get_ad_url:
gets ad server thing
"""
if sandbox:
return self.sandbox_ad_server + '/view/' + str(ad_id)
else:
return self.ad_server + '/view/' + str(ad_id) | ['def', 'get_ad_url', '(', 'self', ',', 'ad_id', ',', 'sandbox', ')', ':', 'if', 'sandbox', ':', 'return', 'self', '.', 'sandbox_ad_server', '+', "'/view/'", '+', 'str', '(', 'ad_id', ')', 'else', ':', 'return', 'self', '.', 'ad_server', '+', "'/view/'", '+', 'str', '(', 'ad_id', ')'] | get_ad_url:
gets ad server thing | ['get_ad_url', ':', 'gets', 'ad', 'server', 'thing'] | train | https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/psiturk_org_services.py#L108-L116 |
7,535 | fhcrc/taxtastic | taxtastic/ncbi.py | read_nodes | def read_nodes(rows, source_id=1):
"""
Return an iterator of rows ready to insert into table "nodes".
* rows - iterator of lists (eg, output from read_archive or read_dmp)
"""
ncbi_keys = ['tax_id', 'parent_id', 'rank', 'embl_code', 'division_id']
extra_keys = ['source_id', 'is_valid']
is_valid = True
ncbi_cols = len(ncbi_keys)
rank = ncbi_keys.index('rank')
parent_id = ncbi_keys.index('parent_id')
# assumes the first row is the root
row = next(rows)
row[rank] = 'root'
# parent must be None for termination of recursive CTE for
# calculating lineages
row[parent_id] = None
rows = itertools.chain([row], rows)
yield ncbi_keys + extra_keys
for row in rows:
# replace whitespace in "rank" with underscore
row[rank] = '_'.join(row[rank].split())
# provide default values for source_id and is_valid
yield row[:ncbi_cols] + [source_id, is_valid] | python | def read_nodes(rows, source_id=1):
"""
Return an iterator of rows ready to insert into table "nodes".
* rows - iterator of lists (eg, output from read_archive or read_dmp)
"""
ncbi_keys = ['tax_id', 'parent_id', 'rank', 'embl_code', 'division_id']
extra_keys = ['source_id', 'is_valid']
is_valid = True
ncbi_cols = len(ncbi_keys)
rank = ncbi_keys.index('rank')
parent_id = ncbi_keys.index('parent_id')
# assumes the first row is the root
row = next(rows)
row[rank] = 'root'
# parent must be None for termination of recursive CTE for
# calculating lineages
row[parent_id] = None
rows = itertools.chain([row], rows)
yield ncbi_keys + extra_keys
for row in rows:
# replace whitespace in "rank" with underscore
row[rank] = '_'.join(row[rank].split())
# provide default values for source_id and is_valid
yield row[:ncbi_cols] + [source_id, is_valid] | ['def', 'read_nodes', '(', 'rows', ',', 'source_id', '=', '1', ')', ':', 'ncbi_keys', '=', '[', "'tax_id'", ',', "'parent_id'", ',', "'rank'", ',', "'embl_code'", ',', "'division_id'", ']', 'extra_keys', '=', '[', "'source_id'", ',', "'is_valid'", ']', 'is_valid', '=', 'True', 'ncbi_cols', '=', 'len', '(', 'ncbi_keys', ')', 'rank', '=', 'ncbi_keys', '.', 'index', '(', "'rank'", ')', 'parent_id', '=', 'ncbi_keys', '.', 'index', '(', "'parent_id'", ')', '# assumes the first row is the root', 'row', '=', 'next', '(', 'rows', ')', 'row', '[', 'rank', ']', '=', "'root'", '# parent must be None for termination of recursive CTE for', '# calculating lineages', 'row', '[', 'parent_id', ']', '=', 'None', 'rows', '=', 'itertools', '.', 'chain', '(', '[', 'row', ']', ',', 'rows', ')', 'yield', 'ncbi_keys', '+', 'extra_keys', 'for', 'row', 'in', 'rows', ':', '# replace whitespace in "rank" with underscore', 'row', '[', 'rank', ']', '=', "'_'", '.', 'join', '(', 'row', '[', 'rank', ']', '.', 'split', '(', ')', ')', '# provide default values for source_id and is_valid', 'yield', 'row', '[', ':', 'ncbi_cols', ']', '+', '[', 'source_id', ',', 'is_valid', ']'] | Return an iterator of rows ready to insert into table "nodes".
* rows - iterator of lists (eg, output from read_archive or read_dmp) | ['Return', 'an', 'iterator', 'of', 'rows', 'ready', 'to', 'insert', 'into', 'table', 'nodes', '.'] | train | https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/ncbi.py#L250-L280 |
7,536 | wmayner/pyphi | pyphi/validate.py | blackbox | def blackbox(blackbox):
"""Validate a macro blackboxing."""
if tuple(sorted(blackbox.output_indices)) != blackbox.output_indices:
raise ValueError('Output indices {} must be ordered'.format(
blackbox.output_indices))
partition(blackbox.partition)
for part in blackbox.partition:
if not set(part) & set(blackbox.output_indices):
raise ValueError(
'Every blackbox must have an output - {} does not'.format(
part)) | python | def blackbox(blackbox):
"""Validate a macro blackboxing."""
if tuple(sorted(blackbox.output_indices)) != blackbox.output_indices:
raise ValueError('Output indices {} must be ordered'.format(
blackbox.output_indices))
partition(blackbox.partition)
for part in blackbox.partition:
if not set(part) & set(blackbox.output_indices):
raise ValueError(
'Every blackbox must have an output - {} does not'.format(
part)) | ['def', 'blackbox', '(', 'blackbox', ')', ':', 'if', 'tuple', '(', 'sorted', '(', 'blackbox', '.', 'output_indices', ')', ')', '!=', 'blackbox', '.', 'output_indices', ':', 'raise', 'ValueError', '(', "'Output indices {} must be ordered'", '.', 'format', '(', 'blackbox', '.', 'output_indices', ')', ')', 'partition', '(', 'blackbox', '.', 'partition', ')', 'for', 'part', 'in', 'blackbox', '.', 'partition', ':', 'if', 'not', 'set', '(', 'part', ')', '&', 'set', '(', 'blackbox', '.', 'output_indices', ')', ':', 'raise', 'ValueError', '(', "'Every blackbox must have an output - {} does not'", '.', 'format', '(', 'part', ')', ')'] | Validate a macro blackboxing. | ['Validate', 'a', 'macro', 'blackboxing', '.'] | train | https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/validate.py#L223-L235 |
7,537 | inveniosoftware-attic/invenio-knowledge | docs/_ext/flask_app.py | setup | def setup(sphinx):
"""Setup Sphinx object."""
from flask import has_app_context
from invenio_base.factory import create_app
PACKAGES = ['invenio_base', 'invenio.modules.accounts',
'invenio.modules.records', 'invenio_knowledge']
if not has_app_context():
app = create_app(PACKAGES=PACKAGES)
ctx = app.test_request_context('/')
ctx.push() | python | def setup(sphinx):
"""Setup Sphinx object."""
from flask import has_app_context
from invenio_base.factory import create_app
PACKAGES = ['invenio_base', 'invenio.modules.accounts',
'invenio.modules.records', 'invenio_knowledge']
if not has_app_context():
app = create_app(PACKAGES=PACKAGES)
ctx = app.test_request_context('/')
ctx.push() | ['def', 'setup', '(', 'sphinx', ')', ':', 'from', 'flask', 'import', 'has_app_context', 'from', 'invenio_base', '.', 'factory', 'import', 'create_app', 'PACKAGES', '=', '[', "'invenio_base'", ',', "'invenio.modules.accounts'", ',', "'invenio.modules.records'", ',', "'invenio_knowledge'", ']', 'if', 'not', 'has_app_context', '(', ')', ':', 'app', '=', 'create_app', '(', 'PACKAGES', '=', 'PACKAGES', ')', 'ctx', '=', 'app', '.', 'test_request_context', '(', "'/'", ')', 'ctx', '.', 'push', '(', ')'] | Setup Sphinx object. | ['Setup', 'Sphinx', 'object', '.'] | train | https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/docs/_ext/flask_app.py#L23-L33 |
7,538 | billy-yoyo/RainbowSixSiege-Python-API | r6sapi/r6sapi.py | Rank.get_charm_url | def get_charm_url(self):
"""Get charm URL for the bracket this rank is in
Returns
-------
:class:`str`
the URL for the charm
"""
if self.rank_id <= 4: return self.RANK_CHARMS[0]
if self.rank_id <= 8: return self.RANK_CHARMS[1]
if self.rank_id <= 12: return self.RANK_CHARMS[2]
if self.rank_id <= 16: return self.RANK_CHARMS[3]
if self.rank_id <= 19: return self.RANK_CHARMS[4]
return self.RANK_CHARMS[5] | python | def get_charm_url(self):
"""Get charm URL for the bracket this rank is in
Returns
-------
:class:`str`
the URL for the charm
"""
if self.rank_id <= 4: return self.RANK_CHARMS[0]
if self.rank_id <= 8: return self.RANK_CHARMS[1]
if self.rank_id <= 12: return self.RANK_CHARMS[2]
if self.rank_id <= 16: return self.RANK_CHARMS[3]
if self.rank_id <= 19: return self.RANK_CHARMS[4]
return self.RANK_CHARMS[5] | ['def', 'get_charm_url', '(', 'self', ')', ':', 'if', 'self', '.', 'rank_id', '<=', '4', ':', 'return', 'self', '.', 'RANK_CHARMS', '[', '0', ']', 'if', 'self', '.', 'rank_id', '<=', '8', ':', 'return', 'self', '.', 'RANK_CHARMS', '[', '1', ']', 'if', 'self', '.', 'rank_id', '<=', '12', ':', 'return', 'self', '.', 'RANK_CHARMS', '[', '2', ']', 'if', 'self', '.', 'rank_id', '<=', '16', ':', 'return', 'self', '.', 'RANK_CHARMS', '[', '3', ']', 'if', 'self', '.', 'rank_id', '<=', '19', ':', 'return', 'self', '.', 'RANK_CHARMS', '[', '4', ']', 'return', 'self', '.', 'RANK_CHARMS', '[', '5', ']'] | Get charm URL for the bracket this rank is in
Returns
-------
:class:`str`
the URL for the charm | ['Get', 'charm', 'URL', 'for', 'the', 'bracket', 'this', 'rank', 'is', 'in'] | train | https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L808-L822 |
7,539 | NICTA/revrand | revrand/optimize/decorators.py | _logtrick_gen | def _logtrick_gen(bounds):
"""Generate warping functions and new bounds for the log trick."""
# Test which parameters we can apply the log trick too
ispos = np.array([isinstance(b, bt.Positive) for b in bounds], dtype=bool)
nispos = ~ispos
# Functions that implement the log trick
def logx(x):
xwarp = np.empty_like(x)
xwarp[ispos] = np.log(x[ispos])
xwarp[nispos] = x[nispos]
return xwarp
def expx(xwarp):
x = np.empty_like(xwarp)
x[ispos] = np.exp(xwarp[ispos])
x[nispos] = xwarp[nispos]
return x
def gradx(grad, xwarp):
gwarp = np.empty_like(grad)
gwarp[ispos] = grad[ispos] * np.exp(xwarp[ispos])
gwarp[nispos] = grad[nispos]
return gwarp
# Redefine bounds as appropriate for new ranges for numerical stability
for i, (b, pos) in enumerate(zip(bounds, ispos)):
if pos:
upper = EXPMAX if b.upper is None else np.log(b.upper)
bounds[i] = bt.Bound(lower=LOGMINPOS, upper=upper)
return logx, expx, gradx, bounds | python | def _logtrick_gen(bounds):
"""Generate warping functions and new bounds for the log trick."""
# Test which parameters we can apply the log trick too
ispos = np.array([isinstance(b, bt.Positive) for b in bounds], dtype=bool)
nispos = ~ispos
# Functions that implement the log trick
def logx(x):
xwarp = np.empty_like(x)
xwarp[ispos] = np.log(x[ispos])
xwarp[nispos] = x[nispos]
return xwarp
def expx(xwarp):
x = np.empty_like(xwarp)
x[ispos] = np.exp(xwarp[ispos])
x[nispos] = xwarp[nispos]
return x
def gradx(grad, xwarp):
gwarp = np.empty_like(grad)
gwarp[ispos] = grad[ispos] * np.exp(xwarp[ispos])
gwarp[nispos] = grad[nispos]
return gwarp
# Redefine bounds as appropriate for new ranges for numerical stability
for i, (b, pos) in enumerate(zip(bounds, ispos)):
if pos:
upper = EXPMAX if b.upper is None else np.log(b.upper)
bounds[i] = bt.Bound(lower=LOGMINPOS, upper=upper)
return logx, expx, gradx, bounds | ['def', '_logtrick_gen', '(', 'bounds', ')', ':', '# Test which parameters we can apply the log trick too', 'ispos', '=', 'np', '.', 'array', '(', '[', 'isinstance', '(', 'b', ',', 'bt', '.', 'Positive', ')', 'for', 'b', 'in', 'bounds', ']', ',', 'dtype', '=', 'bool', ')', 'nispos', '=', '~', 'ispos', '# Functions that implement the log trick', 'def', 'logx', '(', 'x', ')', ':', 'xwarp', '=', 'np', '.', 'empty_like', '(', 'x', ')', 'xwarp', '[', 'ispos', ']', '=', 'np', '.', 'log', '(', 'x', '[', 'ispos', ']', ')', 'xwarp', '[', 'nispos', ']', '=', 'x', '[', 'nispos', ']', 'return', 'xwarp', 'def', 'expx', '(', 'xwarp', ')', ':', 'x', '=', 'np', '.', 'empty_like', '(', 'xwarp', ')', 'x', '[', 'ispos', ']', '=', 'np', '.', 'exp', '(', 'xwarp', '[', 'ispos', ']', ')', 'x', '[', 'nispos', ']', '=', 'xwarp', '[', 'nispos', ']', 'return', 'x', 'def', 'gradx', '(', 'grad', ',', 'xwarp', ')', ':', 'gwarp', '=', 'np', '.', 'empty_like', '(', 'grad', ')', 'gwarp', '[', 'ispos', ']', '=', 'grad', '[', 'ispos', ']', '*', 'np', '.', 'exp', '(', 'xwarp', '[', 'ispos', ']', ')', 'gwarp', '[', 'nispos', ']', '=', 'grad', '[', 'nispos', ']', 'return', 'gwarp', '# Redefine bounds as appropriate for new ranges for numerical stability', 'for', 'i', ',', '(', 'b', ',', 'pos', ')', 'in', 'enumerate', '(', 'zip', '(', 'bounds', ',', 'ispos', ')', ')', ':', 'if', 'pos', ':', 'upper', '=', 'EXPMAX', 'if', 'b', '.', 'upper', 'is', 'None', 'else', 'np', '.', 'log', '(', 'b', '.', 'upper', ')', 'bounds', '[', 'i', ']', '=', 'bt', '.', 'Bound', '(', 'lower', '=', 'LOGMINPOS', ',', 'upper', '=', 'upper', ')', 'return', 'logx', ',', 'expx', ',', 'gradx', ',', 'bounds'] | Generate warping functions and new bounds for the log trick. | ['Generate', 'warping', 'functions', 'and', 'new', 'bounds', 'for', 'the', 'log', 'trick', '.'] | train | https://github.com/NICTA/revrand/blob/4c1881b6c1772d2b988518e49dde954f165acfb6/revrand/optimize/decorators.py#L586-L617 |
7,540 | pytroll/satpy | satpy/multiscene.py | MultiScene._get_animation_frames | def _get_animation_frames(self, all_datasets, shape, fill_value=None,
ignore_missing=False):
"""Create enhanced image frames to save to a file."""
for idx, ds in enumerate(all_datasets):
if ds is None and ignore_missing:
continue
elif ds is None:
log.debug("Missing frame: %d", idx)
data = da.zeros(shape, dtype=np.uint8, chunks=shape)
data = xr.DataArray(data)
else:
img = get_enhanced_image(ds)
data, mode = img.finalize(fill_value=fill_value)
if data.ndim == 3:
# assume all other shapes are (y, x)
# we need arrays grouped by pixel so
# transpose if needed
data = data.transpose('y', 'x', 'bands')
yield data.data | python | def _get_animation_frames(self, all_datasets, shape, fill_value=None,
ignore_missing=False):
"""Create enhanced image frames to save to a file."""
for idx, ds in enumerate(all_datasets):
if ds is None and ignore_missing:
continue
elif ds is None:
log.debug("Missing frame: %d", idx)
data = da.zeros(shape, dtype=np.uint8, chunks=shape)
data = xr.DataArray(data)
else:
img = get_enhanced_image(ds)
data, mode = img.finalize(fill_value=fill_value)
if data.ndim == 3:
# assume all other shapes are (y, x)
# we need arrays grouped by pixel so
# transpose if needed
data = data.transpose('y', 'x', 'bands')
yield data.data | ['def', '_get_animation_frames', '(', 'self', ',', 'all_datasets', ',', 'shape', ',', 'fill_value', '=', 'None', ',', 'ignore_missing', '=', 'False', ')', ':', 'for', 'idx', ',', 'ds', 'in', 'enumerate', '(', 'all_datasets', ')', ':', 'if', 'ds', 'is', 'None', 'and', 'ignore_missing', ':', 'continue', 'elif', 'ds', 'is', 'None', ':', 'log', '.', 'debug', '(', '"Missing frame: %d"', ',', 'idx', ')', 'data', '=', 'da', '.', 'zeros', '(', 'shape', ',', 'dtype', '=', 'np', '.', 'uint8', ',', 'chunks', '=', 'shape', ')', 'data', '=', 'xr', '.', 'DataArray', '(', 'data', ')', 'else', ':', 'img', '=', 'get_enhanced_image', '(', 'ds', ')', 'data', ',', 'mode', '=', 'img', '.', 'finalize', '(', 'fill_value', '=', 'fill_value', ')', 'if', 'data', '.', 'ndim', '==', '3', ':', '# assume all other shapes are (y, x)', '# we need arrays grouped by pixel so', '# transpose if needed', 'data', '=', 'data', '.', 'transpose', '(', "'y'", ',', "'x'", ',', "'bands'", ')', 'yield', 'data', '.', 'data'] | Create enhanced image frames to save to a file. | ['Create', 'enhanced', 'image', 'frames', 'to', 'save', 'to', 'a', 'file', '.'] | train | https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/multiscene.py#L374-L392 |
7,541 | mbedmicro/pyOCD | pyocd/flash/flash.py | Flash.program_page | def program_page(self, address, bytes):
"""!
@brief Flash one or more pages.
@exception FlashProgramFailure
"""
assert self._active_operation == self.Operation.PROGRAM
# prevent security settings from locking the device
bytes = self.override_security_bits(address, bytes)
# first transfer in RAM
self.target.write_memory_block8(self.begin_data, bytes)
# update core register to execute the program_page subroutine
result = self._call_function_and_wait(self.flash_algo['pc_program_page'], address, len(bytes), self.begin_data)
# check the return code
if result != 0:
raise FlashProgramFailure('program_page(0x%x) error: %i' % (address, result), address, result) | python | def program_page(self, address, bytes):
"""!
@brief Flash one or more pages.
@exception FlashProgramFailure
"""
assert self._active_operation == self.Operation.PROGRAM
# prevent security settings from locking the device
bytes = self.override_security_bits(address, bytes)
# first transfer in RAM
self.target.write_memory_block8(self.begin_data, bytes)
# update core register to execute the program_page subroutine
result = self._call_function_and_wait(self.flash_algo['pc_program_page'], address, len(bytes), self.begin_data)
# check the return code
if result != 0:
raise FlashProgramFailure('program_page(0x%x) error: %i' % (address, result), address, result) | ['def', 'program_page', '(', 'self', ',', 'address', ',', 'bytes', ')', ':', 'assert', 'self', '.', '_active_operation', '==', 'self', '.', 'Operation', '.', 'PROGRAM', '# prevent security settings from locking the device', 'bytes', '=', 'self', '.', 'override_security_bits', '(', 'address', ',', 'bytes', ')', '# first transfer in RAM', 'self', '.', 'target', '.', 'write_memory_block8', '(', 'self', '.', 'begin_data', ',', 'bytes', ')', '# update core register to execute the program_page subroutine', 'result', '=', 'self', '.', '_call_function_and_wait', '(', 'self', '.', 'flash_algo', '[', "'pc_program_page'", ']', ',', 'address', ',', 'len', '(', 'bytes', ')', ',', 'self', '.', 'begin_data', ')', '# check the return code', 'if', 'result', '!=', '0', ':', 'raise', 'FlashProgramFailure', '(', "'program_page(0x%x) error: %i'", '%', '(', 'address', ',', 'result', ')', ',', 'address', ',', 'result', ')'] | !
@brief Flash one or more pages.
@exception FlashProgramFailure | ['!'] | train | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/flash/flash.py#L355-L374 |
7,542 | Unidata/MetPy | examples/meteogram_metpy.py | Meteogram.plot_rh | def plot_rh(self, rh, plot_range=None):
"""
Required input:
RH: Relative humidity (%)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT RELATIVE HUMIDITY
if not plot_range:
plot_range = [0, 100, 4]
self.ax3 = fig.add_subplot(4, 1, 3, sharex=self.ax1)
self.ax3.plot(self.dates, rh, 'g-', label='Relative Humidity')
self.ax3.legend(loc='upper center', bbox_to_anchor=(0.5, 1.22), prop={'size': 12})
self.ax3.grid(b=True, which='major', axis='y', color='k', linestyle='--',
linewidth=0.5)
self.ax3.set_ylim(plot_range[0], plot_range[1], plot_range[2])
self.ax3.fill_between(self.dates, rh, self.ax3.get_ylim()[0], color='g')
self.ax3.set_ylabel('Relative Humidity\n(%)', multialignment='center')
self.ax3.xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
axtwin = self.ax3.twinx()
axtwin.set_ylim(plot_range[0], plot_range[1], plot_range[2]) | python | def plot_rh(self, rh, plot_range=None):
"""
Required input:
RH: Relative humidity (%)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT RELATIVE HUMIDITY
if not plot_range:
plot_range = [0, 100, 4]
self.ax3 = fig.add_subplot(4, 1, 3, sharex=self.ax1)
self.ax3.plot(self.dates, rh, 'g-', label='Relative Humidity')
self.ax3.legend(loc='upper center', bbox_to_anchor=(0.5, 1.22), prop={'size': 12})
self.ax3.grid(b=True, which='major', axis='y', color='k', linestyle='--',
linewidth=0.5)
self.ax3.set_ylim(plot_range[0], plot_range[1], plot_range[2])
self.ax3.fill_between(self.dates, rh, self.ax3.get_ylim()[0], color='g')
self.ax3.set_ylabel('Relative Humidity\n(%)', multialignment='center')
self.ax3.xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
axtwin = self.ax3.twinx()
axtwin.set_ylim(plot_range[0], plot_range[1], plot_range[2]) | ['def', 'plot_rh', '(', 'self', ',', 'rh', ',', 'plot_range', '=', 'None', ')', ':', '# PLOT RELATIVE HUMIDITY', 'if', 'not', 'plot_range', ':', 'plot_range', '=', '[', '0', ',', '100', ',', '4', ']', 'self', '.', 'ax3', '=', 'fig', '.', 'add_subplot', '(', '4', ',', '1', ',', '3', ',', 'sharex', '=', 'self', '.', 'ax1', ')', 'self', '.', 'ax3', '.', 'plot', '(', 'self', '.', 'dates', ',', 'rh', ',', "'g-'", ',', 'label', '=', "'Relative Humidity'", ')', 'self', '.', 'ax3', '.', 'legend', '(', 'loc', '=', "'upper center'", ',', 'bbox_to_anchor', '=', '(', '0.5', ',', '1.22', ')', ',', 'prop', '=', '{', "'size'", ':', '12', '}', ')', 'self', '.', 'ax3', '.', 'grid', '(', 'b', '=', 'True', ',', 'which', '=', "'major'", ',', 'axis', '=', "'y'", ',', 'color', '=', "'k'", ',', 'linestyle', '=', "'--'", ',', 'linewidth', '=', '0.5', ')', 'self', '.', 'ax3', '.', 'set_ylim', '(', 'plot_range', '[', '0', ']', ',', 'plot_range', '[', '1', ']', ',', 'plot_range', '[', '2', ']', ')', 'self', '.', 'ax3', '.', 'fill_between', '(', 'self', '.', 'dates', ',', 'rh', ',', 'self', '.', 'ax3', '.', 'get_ylim', '(', ')', '[', '0', ']', ',', 'color', '=', "'g'", ')', 'self', '.', 'ax3', '.', 'set_ylabel', '(', "'Relative Humidity\\n(%)'", ',', 'multialignment', '=', "'center'", ')', 'self', '.', 'ax3', '.', 'xaxis', '.', 'set_major_formatter', '(', 'mpl', '.', 'dates', '.', 'DateFormatter', '(', "'%d/%H UTC'", ')', ')', 'axtwin', '=', 'self', '.', 'ax3', '.', 'twinx', '(', ')', 'axtwin', '.', 'set_ylim', '(', 'plot_range', '[', '0', ']', ',', 'plot_range', '[', '1', ']', ',', 'plot_range', '[', '2', ']', ')'] | Required input:
RH: Relative humidity (%)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step)) | ['Required', 'input', ':', 'RH', ':', 'Relative', 'humidity', '(', '%', ')', 'Optional', 'Input', ':', 'plot_range', ':', 'Data', 'range', 'for', 'making', 'figure', '(', 'list', 'of', '(', 'min', 'max', 'step', '))'] | train | https://github.com/Unidata/MetPy/blob/16f68a94919b9a82dcf9cada2169cf039129e67b/examples/meteogram_metpy.py#L121-L142 |
7,543 | cloud-custodian/cloud-custodian | tools/sandbox/c7n_autodoc/c7n-autodoc.py | gather_file_data | def gather_file_data(config):
""" Gather policy information from files
"""
file_regex = re.compile(config['file_regex'])
category_regex = re.compile(config['category_regex'])
policies = {}
for root, dirs, files in os.walk(config['c7n_policy_directory']):
for file in files:
if file_regex.match(file):
file_path = root + '/' + file
logging.debug('Processing file %s', file_path)
with open(file_path, 'r') as stream:
try:
if category_regex.search(file_path):
category = 'Security & Governance'
else:
category = 'Cost Controls'
policies = yaml.load(stream)
for policy in policies['policies']:
logging.debug(
'Processing policy %s', policy['name'])
policy['file_url'] = get_file_url(
file_path, config)
resource_type = policy['resource']
if category not in c7n_data:
c7n_data[category] = {}
if resource_type not in c7n_data[category]:
c7n_data[category][resource_type] = []
c7n_data[category][resource_type].append(policy)
except yaml.YAMLError as exc:
logging.error(exc) | python | def gather_file_data(config):
""" Gather policy information from files
"""
file_regex = re.compile(config['file_regex'])
category_regex = re.compile(config['category_regex'])
policies = {}
for root, dirs, files in os.walk(config['c7n_policy_directory']):
for file in files:
if file_regex.match(file):
file_path = root + '/' + file
logging.debug('Processing file %s', file_path)
with open(file_path, 'r') as stream:
try:
if category_regex.search(file_path):
category = 'Security & Governance'
else:
category = 'Cost Controls'
policies = yaml.load(stream)
for policy in policies['policies']:
logging.debug(
'Processing policy %s', policy['name'])
policy['file_url'] = get_file_url(
file_path, config)
resource_type = policy['resource']
if category not in c7n_data:
c7n_data[category] = {}
if resource_type not in c7n_data[category]:
c7n_data[category][resource_type] = []
c7n_data[category][resource_type].append(policy)
except yaml.YAMLError as exc:
logging.error(exc) | ['def', 'gather_file_data', '(', 'config', ')', ':', 'file_regex', '=', 're', '.', 'compile', '(', 'config', '[', "'file_regex'", ']', ')', 'category_regex', '=', 're', '.', 'compile', '(', 'config', '[', "'category_regex'", ']', ')', 'policies', '=', '{', '}', 'for', 'root', ',', 'dirs', ',', 'files', 'in', 'os', '.', 'walk', '(', 'config', '[', "'c7n_policy_directory'", ']', ')', ':', 'for', 'file', 'in', 'files', ':', 'if', 'file_regex', '.', 'match', '(', 'file', ')', ':', 'file_path', '=', 'root', '+', "'/'", '+', 'file', 'logging', '.', 'debug', '(', "'Processing file %s'", ',', 'file_path', ')', 'with', 'open', '(', 'file_path', ',', "'r'", ')', 'as', 'stream', ':', 'try', ':', 'if', 'category_regex', '.', 'search', '(', 'file_path', ')', ':', 'category', '=', "'Security & Governance'", 'else', ':', 'category', '=', "'Cost Controls'", 'policies', '=', 'yaml', '.', 'load', '(', 'stream', ')', 'for', 'policy', 'in', 'policies', '[', "'policies'", ']', ':', 'logging', '.', 'debug', '(', "'Processing policy %s'", ',', 'policy', '[', "'name'", ']', ')', 'policy', '[', "'file_url'", ']', '=', 'get_file_url', '(', 'file_path', ',', 'config', ')', 'resource_type', '=', 'policy', '[', "'resource'", ']', 'if', 'category', 'not', 'in', 'c7n_data', ':', 'c7n_data', '[', 'category', ']', '=', '{', '}', 'if', 'resource_type', 'not', 'in', 'c7n_data', '[', 'category', ']', ':', 'c7n_data', '[', 'category', ']', '[', 'resource_type', ']', '=', '[', ']', 'c7n_data', '[', 'category', ']', '[', 'resource_type', ']', '.', 'append', '(', 'policy', ')', 'except', 'yaml', '.', 'YAMLError', 'as', 'exc', ':', 'logging', '.', 'error', '(', 'exc', ')'] | Gather policy information from files | ['Gather', 'policy', 'information', 'from', 'files'] | train | https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/sandbox/c7n_autodoc/c7n-autodoc.py#L76-L108 |
7,544 | Shapeways/coyote_framework | coyote_framework/webdriver/webdriverwrapper/WebElementWrapper.py | WebElementWrapper.has_class | def has_class(self, classname):
"""Test if an element has a specific classname
@type classname: str
@param classname: Classname to test for; cannot contain spaces
@rtype: bool
@return: True if classname exists; false otherwise
"""
def element_has_class():
"""Wrapper to test if element has a class"""
pattern = re.compile('(\s|^){classname}(\s|$)'.format(classname=classname))
classes = self.element.get_attribute('class')
matches = re.search(pattern, classes)
if matches is not None:
return True
return False
return self.execute_and_handle_webelement_exceptions(
element_has_class,
'check for element class "{}"'.format(classname)
) | python | def has_class(self, classname):
"""Test if an element has a specific classname
@type classname: str
@param classname: Classname to test for; cannot contain spaces
@rtype: bool
@return: True if classname exists; false otherwise
"""
def element_has_class():
"""Wrapper to test if element has a class"""
pattern = re.compile('(\s|^){classname}(\s|$)'.format(classname=classname))
classes = self.element.get_attribute('class')
matches = re.search(pattern, classes)
if matches is not None:
return True
return False
return self.execute_and_handle_webelement_exceptions(
element_has_class,
'check for element class "{}"'.format(classname)
) | ['def', 'has_class', '(', 'self', ',', 'classname', ')', ':', 'def', 'element_has_class', '(', ')', ':', '"""Wrapper to test if element has a class"""', 'pattern', '=', 're', '.', 'compile', '(', "'(\\s|^){classname}(\\s|$)'", '.', 'format', '(', 'classname', '=', 'classname', ')', ')', 'classes', '=', 'self', '.', 'element', '.', 'get_attribute', '(', "'class'", ')', 'matches', '=', 're', '.', 'search', '(', 'pattern', ',', 'classes', ')', 'if', 'matches', 'is', 'not', 'None', ':', 'return', 'True', 'return', 'False', 'return', 'self', '.', 'execute_and_handle_webelement_exceptions', '(', 'element_has_class', ',', '\'check for element class "{}"\'', '.', 'format', '(', 'classname', ')', ')'] | Test if an element has a specific classname
@type classname: str
@param classname: Classname to test for; cannot contain spaces
@rtype: bool
@return: True if classname exists; false otherwise | ['Test', 'if', 'an', 'element', 'has', 'a', 'specific', 'classname'] | train | https://github.com/Shapeways/coyote_framework/blob/cb29899b984a21d56bf65d0b1d907073948fe16c/coyote_framework/webdriver/webdriverwrapper/WebElementWrapper.py#L379-L400 |
7,545 | rackerlabs/lambda-uploader | lambda_uploader/uploader.py | PackageUploader._upload_s3 | def _upload_s3(self, zip_file):
'''
Uploads the lambda package to s3
'''
s3_client = self._aws_session.client('s3')
transfer = boto3.s3.transfer.S3Transfer(s3_client)
transfer.upload_file(zip_file, self._config.s3_bucket,
self._config.s3_package_name()) | python | def _upload_s3(self, zip_file):
'''
Uploads the lambda package to s3
'''
s3_client = self._aws_session.client('s3')
transfer = boto3.s3.transfer.S3Transfer(s3_client)
transfer.upload_file(zip_file, self._config.s3_bucket,
self._config.s3_package_name()) | ['def', '_upload_s3', '(', 'self', ',', 'zip_file', ')', ':', 's3_client', '=', 'self', '.', '_aws_session', '.', 'client', '(', "'s3'", ')', 'transfer', '=', 'boto3', '.', 's3', '.', 'transfer', '.', 'S3Transfer', '(', 's3_client', ')', 'transfer', '.', 'upload_file', '(', 'zip_file', ',', 'self', '.', '_config', '.', 's3_bucket', ',', 'self', '.', '_config', '.', 's3_package_name', '(', ')', ')'] | Uploads the lambda package to s3 | ['Uploads', 'the', 'lambda', 'package', 'to', 's3'] | train | https://github.com/rackerlabs/lambda-uploader/blob/a5036e60d45d1a4fdc07df071f5b6e3b113388d4/lambda_uploader/uploader.py#L219-L226 |
7,546 | pypyr/pypyr-cli | pypyr/steps/pype.py | get_arguments | def get_arguments(context):
"""Parse arguments for pype from context and assign default values.
Args:
context: pypyr.context.Context. context is mandatory.
Returns:
tuple (pipeline_name, #str
use_parent_context, #bool
pipe_arg, #str
skip_parse, #bool
raise_error #bool
)
Raises:
pypyr.errors.KeyNotInContextError: if ['pype']['name'] is missing.
pypyr.errors.KeyInContextHasNoValueError: if ['pype']['name'] exists but
is None.
"""
context.assert_key_has_value(key='pype', caller=__name__)
pype = context.get_formatted('pype')
try:
pipeline_name = pype['name']
if pipeline_name is None:
raise KeyInContextHasNoValueError(
"pypyr.steps.pype ['pype']['name'] exists but is empty.")
except KeyError as err:
raise KeyNotInContextError(
"pypyr.steps.pype missing 'name' in the 'pype' context item. "
"You need to specify the pipeline name to run another "
"pipeline.") from err
use_parent_context = pype.get('useParentContext', True)
pipe_arg = pype.get('pipeArg', None)
skip_parse = pype.get('skipParse', True)
raise_error = pype.get('raiseError', True)
loader = pype.get('loader', None)
return (
pipeline_name,
use_parent_context,
pipe_arg,
skip_parse,
raise_error,
loader,
) | python | def get_arguments(context):
"""Parse arguments for pype from context and assign default values.
Args:
context: pypyr.context.Context. context is mandatory.
Returns:
tuple (pipeline_name, #str
use_parent_context, #bool
pipe_arg, #str
skip_parse, #bool
raise_error #bool
)
Raises:
pypyr.errors.KeyNotInContextError: if ['pype']['name'] is missing.
pypyr.errors.KeyInContextHasNoValueError: if ['pype']['name'] exists but
is None.
"""
context.assert_key_has_value(key='pype', caller=__name__)
pype = context.get_formatted('pype')
try:
pipeline_name = pype['name']
if pipeline_name is None:
raise KeyInContextHasNoValueError(
"pypyr.steps.pype ['pype']['name'] exists but is empty.")
except KeyError as err:
raise KeyNotInContextError(
"pypyr.steps.pype missing 'name' in the 'pype' context item. "
"You need to specify the pipeline name to run another "
"pipeline.") from err
use_parent_context = pype.get('useParentContext', True)
pipe_arg = pype.get('pipeArg', None)
skip_parse = pype.get('skipParse', True)
raise_error = pype.get('raiseError', True)
loader = pype.get('loader', None)
return (
pipeline_name,
use_parent_context,
pipe_arg,
skip_parse,
raise_error,
loader,
) | ['def', 'get_arguments', '(', 'context', ')', ':', 'context', '.', 'assert_key_has_value', '(', 'key', '=', "'pype'", ',', 'caller', '=', '__name__', ')', 'pype', '=', 'context', '.', 'get_formatted', '(', "'pype'", ')', 'try', ':', 'pipeline_name', '=', 'pype', '[', "'name'", ']', 'if', 'pipeline_name', 'is', 'None', ':', 'raise', 'KeyInContextHasNoValueError', '(', '"pypyr.steps.pype [\'pype\'][\'name\'] exists but is empty."', ')', 'except', 'KeyError', 'as', 'err', ':', 'raise', 'KeyNotInContextError', '(', '"pypyr.steps.pype missing \'name\' in the \'pype\' context item. "', '"You need to specify the pipeline name to run another "', '"pipeline."', ')', 'from', 'err', 'use_parent_context', '=', 'pype', '.', 'get', '(', "'useParentContext'", ',', 'True', ')', 'pipe_arg', '=', 'pype', '.', 'get', '(', "'pipeArg'", ',', 'None', ')', 'skip_parse', '=', 'pype', '.', 'get', '(', "'skipParse'", ',', 'True', ')', 'raise_error', '=', 'pype', '.', 'get', '(', "'raiseError'", ',', 'True', ')', 'loader', '=', 'pype', '.', 'get', '(', "'loader'", ',', 'None', ')', 'return', '(', 'pipeline_name', ',', 'use_parent_context', ',', 'pipe_arg', ',', 'skip_parse', ',', 'raise_error', ',', 'loader', ',', ')'] | Parse arguments for pype from context and assign default values.
Args:
context: pypyr.context.Context. context is mandatory.
Returns:
tuple (pipeline_name, #str
use_parent_context, #bool
pipe_arg, #str
skip_parse, #bool
raise_error #bool
)
Raises:
pypyr.errors.KeyNotInContextError: if ['pype']['name'] is missing.
pypyr.errors.KeyInContextHasNoValueError: if ['pype']['name'] exists but
is None. | ['Parse', 'arguments', 'for', 'pype', 'from', 'context', 'and', 'assign', 'default', 'values', '.'] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/pype.py#L96-L143 |
7,547 | tsileo/globster | globster.py | normalize_pattern | def normalize_pattern(pattern):
"""Converts backslashes in path patterns to forward slashes.
Doesn't normalize regular expressions - they may contain escapes.
"""
if not (pattern.startswith('RE:') or pattern.startswith('!RE:')):
pattern = _slashes.sub('/', pattern)
if len(pattern) > 1:
pattern = pattern.rstrip('/')
return pattern | python | def normalize_pattern(pattern):
"""Converts backslashes in path patterns to forward slashes.
Doesn't normalize regular expressions - they may contain escapes.
"""
if not (pattern.startswith('RE:') or pattern.startswith('!RE:')):
pattern = _slashes.sub('/', pattern)
if len(pattern) > 1:
pattern = pattern.rstrip('/')
return pattern | ['def', 'normalize_pattern', '(', 'pattern', ')', ':', 'if', 'not', '(', 'pattern', '.', 'startswith', '(', "'RE:'", ')', 'or', 'pattern', '.', 'startswith', '(', "'!RE:'", ')', ')', ':', 'pattern', '=', '_slashes', '.', 'sub', '(', "'/'", ',', 'pattern', ')', 'if', 'len', '(', 'pattern', ')', '>', '1', ':', 'pattern', '=', 'pattern', '.', 'rstrip', '(', "'/'", ')', 'return', 'pattern'] | Converts backslashes in path patterns to forward slashes.
Doesn't normalize regular expressions - they may contain escapes. | ['Converts', 'backslashes', 'in', 'path', 'patterns', 'to', 'forward', 'slashes', '.'] | train | https://github.com/tsileo/globster/blob/9628bce60207b150d39b409cddc3fadb34e70841/globster.py#L366-L375 |
7,548 | explosion/spaCy | spacy/language.py | Language.update | def update(self, docs, golds, drop=0.0, sgd=None, losses=None, component_cfg=None):
"""Update the models in the pipeline.
docs (iterable): A batch of `Doc` objects.
golds (iterable): A batch of `GoldParse` objects.
drop (float): The droput rate.
sgd (callable): An optimizer.
RETURNS (dict): Results from the update.
DOCS: https://spacy.io/api/language#update
"""
if len(docs) != len(golds):
raise IndexError(Errors.E009.format(n_docs=len(docs), n_golds=len(golds)))
if len(docs) == 0:
return
if sgd is None:
if self._optimizer is None:
self._optimizer = create_default_optimizer(Model.ops)
sgd = self._optimizer
# Allow dict of args to GoldParse, instead of GoldParse objects.
gold_objs = []
doc_objs = []
for doc, gold in zip(docs, golds):
if isinstance(doc, basestring_):
doc = self.make_doc(doc)
if not isinstance(gold, GoldParse):
gold = GoldParse(doc, **gold)
doc_objs.append(doc)
gold_objs.append(gold)
golds = gold_objs
docs = doc_objs
grads = {}
def get_grads(W, dW, key=None):
grads[key] = (W, dW)
get_grads.alpha = sgd.alpha
get_grads.b1 = sgd.b1
get_grads.b2 = sgd.b2
pipes = list(self.pipeline)
random.shuffle(pipes)
if component_cfg is None:
component_cfg = {}
for name, proc in pipes:
if not hasattr(proc, "update"):
continue
grads = {}
kwargs = component_cfg.get(name, {})
kwargs.setdefault("drop", drop)
proc.update(docs, golds, sgd=get_grads, losses=losses, **kwargs)
for key, (W, dW) in grads.items():
sgd(W, dW, key=key) | python | def update(self, docs, golds, drop=0.0, sgd=None, losses=None, component_cfg=None):
"""Update the models in the pipeline.
docs (iterable): A batch of `Doc` objects.
golds (iterable): A batch of `GoldParse` objects.
drop (float): The droput rate.
sgd (callable): An optimizer.
RETURNS (dict): Results from the update.
DOCS: https://spacy.io/api/language#update
"""
if len(docs) != len(golds):
raise IndexError(Errors.E009.format(n_docs=len(docs), n_golds=len(golds)))
if len(docs) == 0:
return
if sgd is None:
if self._optimizer is None:
self._optimizer = create_default_optimizer(Model.ops)
sgd = self._optimizer
# Allow dict of args to GoldParse, instead of GoldParse objects.
gold_objs = []
doc_objs = []
for doc, gold in zip(docs, golds):
if isinstance(doc, basestring_):
doc = self.make_doc(doc)
if not isinstance(gold, GoldParse):
gold = GoldParse(doc, **gold)
doc_objs.append(doc)
gold_objs.append(gold)
golds = gold_objs
docs = doc_objs
grads = {}
def get_grads(W, dW, key=None):
grads[key] = (W, dW)
get_grads.alpha = sgd.alpha
get_grads.b1 = sgd.b1
get_grads.b2 = sgd.b2
pipes = list(self.pipeline)
random.shuffle(pipes)
if component_cfg is None:
component_cfg = {}
for name, proc in pipes:
if not hasattr(proc, "update"):
continue
grads = {}
kwargs = component_cfg.get(name, {})
kwargs.setdefault("drop", drop)
proc.update(docs, golds, sgd=get_grads, losses=losses, **kwargs)
for key, (W, dW) in grads.items():
sgd(W, dW, key=key) | ['def', 'update', '(', 'self', ',', 'docs', ',', 'golds', ',', 'drop', '=', '0.0', ',', 'sgd', '=', 'None', ',', 'losses', '=', 'None', ',', 'component_cfg', '=', 'None', ')', ':', 'if', 'len', '(', 'docs', ')', '!=', 'len', '(', 'golds', ')', ':', 'raise', 'IndexError', '(', 'Errors', '.', 'E009', '.', 'format', '(', 'n_docs', '=', 'len', '(', 'docs', ')', ',', 'n_golds', '=', 'len', '(', 'golds', ')', ')', ')', 'if', 'len', '(', 'docs', ')', '==', '0', ':', 'return', 'if', 'sgd', 'is', 'None', ':', 'if', 'self', '.', '_optimizer', 'is', 'None', ':', 'self', '.', '_optimizer', '=', 'create_default_optimizer', '(', 'Model', '.', 'ops', ')', 'sgd', '=', 'self', '.', '_optimizer', '# Allow dict of args to GoldParse, instead of GoldParse objects.', 'gold_objs', '=', '[', ']', 'doc_objs', '=', '[', ']', 'for', 'doc', ',', 'gold', 'in', 'zip', '(', 'docs', ',', 'golds', ')', ':', 'if', 'isinstance', '(', 'doc', ',', 'basestring_', ')', ':', 'doc', '=', 'self', '.', 'make_doc', '(', 'doc', ')', 'if', 'not', 'isinstance', '(', 'gold', ',', 'GoldParse', ')', ':', 'gold', '=', 'GoldParse', '(', 'doc', ',', '*', '*', 'gold', ')', 'doc_objs', '.', 'append', '(', 'doc', ')', 'gold_objs', '.', 'append', '(', 'gold', ')', 'golds', '=', 'gold_objs', 'docs', '=', 'doc_objs', 'grads', '=', '{', '}', 'def', 'get_grads', '(', 'W', ',', 'dW', ',', 'key', '=', 'None', ')', ':', 'grads', '[', 'key', ']', '=', '(', 'W', ',', 'dW', ')', 'get_grads', '.', 'alpha', '=', 'sgd', '.', 'alpha', 'get_grads', '.', 'b1', '=', 'sgd', '.', 'b1', 'get_grads', '.', 'b2', '=', 'sgd', '.', 'b2', 'pipes', '=', 'list', '(', 'self', '.', 'pipeline', ')', 'random', '.', 'shuffle', '(', 'pipes', ')', 'if', 'component_cfg', 'is', 'None', ':', 'component_cfg', '=', '{', '}', 'for', 'name', ',', 'proc', 'in', 'pipes', ':', 'if', 'not', 'hasattr', '(', 'proc', ',', '"update"', ')', ':', 'continue', 'grads', '=', '{', '}', 'kwargs', '=', 'component_cfg', '.', 'get', '(', 'name', ',', '{', '}', ')', 'kwargs', '.', 'setdefault', '(', '"drop"', ',', 'drop', ')', 'proc', '.', 'update', '(', 'docs', ',', 'golds', ',', 'sgd', '=', 'get_grads', ',', 'losses', '=', 'losses', ',', '*', '*', 'kwargs', ')', 'for', 'key', ',', '(', 'W', ',', 'dW', ')', 'in', 'grads', '.', 'items', '(', ')', ':', 'sgd', '(', 'W', ',', 'dW', ',', 'key', '=', 'key', ')'] | Update the models in the pipeline.
docs (iterable): A batch of `Doc` objects.
golds (iterable): A batch of `GoldParse` objects.
drop (float): The droput rate.
sgd (callable): An optimizer.
RETURNS (dict): Results from the update.
DOCS: https://spacy.io/api/language#update | ['Update', 'the', 'models', 'in', 'the', 'pipeline', '.'] | train | https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L408-L459 |
7,549 | sosreport/sos | sos/utilities.py | find | def find(file_pattern, top_dir, max_depth=None, path_pattern=None):
"""Generator function to find files recursively.
Usage::
for filename in find("*.properties", "/var/log/foobar"):
print filename
"""
if max_depth:
base_depth = os.path.dirname(top_dir).count(os.path.sep)
max_depth += base_depth
for path, dirlist, filelist in os.walk(top_dir):
if max_depth and path.count(os.path.sep) >= max_depth:
del dirlist[:]
if path_pattern and not fnmatch.fnmatch(path, path_pattern):
continue
for name in fnmatch.filter(filelist, file_pattern):
yield os.path.join(path, name) | python | def find(file_pattern, top_dir, max_depth=None, path_pattern=None):
"""Generator function to find files recursively.
Usage::
for filename in find("*.properties", "/var/log/foobar"):
print filename
"""
if max_depth:
base_depth = os.path.dirname(top_dir).count(os.path.sep)
max_depth += base_depth
for path, dirlist, filelist in os.walk(top_dir):
if max_depth and path.count(os.path.sep) >= max_depth:
del dirlist[:]
if path_pattern and not fnmatch.fnmatch(path, path_pattern):
continue
for name in fnmatch.filter(filelist, file_pattern):
yield os.path.join(path, name) | ['def', 'find', '(', 'file_pattern', ',', 'top_dir', ',', 'max_depth', '=', 'None', ',', 'path_pattern', '=', 'None', ')', ':', 'if', 'max_depth', ':', 'base_depth', '=', 'os', '.', 'path', '.', 'dirname', '(', 'top_dir', ')', '.', 'count', '(', 'os', '.', 'path', '.', 'sep', ')', 'max_depth', '+=', 'base_depth', 'for', 'path', ',', 'dirlist', ',', 'filelist', 'in', 'os', '.', 'walk', '(', 'top_dir', ')', ':', 'if', 'max_depth', 'and', 'path', '.', 'count', '(', 'os', '.', 'path', '.', 'sep', ')', '>=', 'max_depth', ':', 'del', 'dirlist', '[', ':', ']', 'if', 'path_pattern', 'and', 'not', 'fnmatch', '.', 'fnmatch', '(', 'path', ',', 'path_pattern', ')', ':', 'continue', 'for', 'name', 'in', 'fnmatch', '.', 'filter', '(', 'filelist', ',', 'file_pattern', ')', ':', 'yield', 'os', '.', 'path', '.', 'join', '(', 'path', ',', 'name', ')'] | Generator function to find files recursively.
Usage::
for filename in find("*.properties", "/var/log/foobar"):
print filename | ['Generator', 'function', 'to', 'find', 'files', 'recursively', '.', 'Usage', '::'] | train | https://github.com/sosreport/sos/blob/2ebc04da53dc871c8dd5243567afa4f8592dca29/sos/utilities.py#L65-L84 |
7,550 | urschrei/convertbng | convertbng/util.py | _void_array_to_list | def _void_array_to_list(restuple, _func, _args):
""" Convert the FFI result to Python data structures """
shape = (restuple.e.len, 1)
array_size = np.prod(shape)
mem_size = 8 * array_size
array_str_e = string_at(restuple.e.data, mem_size)
array_str_n = string_at(restuple.n.data, mem_size)
ls_e = np.frombuffer(array_str_e, float, array_size).tolist()
ls_n = np.frombuffer(array_str_n, float, array_size).tolist()
return ls_e, ls_n | python | def _void_array_to_list(restuple, _func, _args):
""" Convert the FFI result to Python data structures """
shape = (restuple.e.len, 1)
array_size = np.prod(shape)
mem_size = 8 * array_size
array_str_e = string_at(restuple.e.data, mem_size)
array_str_n = string_at(restuple.n.data, mem_size)
ls_e = np.frombuffer(array_str_e, float, array_size).tolist()
ls_n = np.frombuffer(array_str_n, float, array_size).tolist()
return ls_e, ls_n | ['def', '_void_array_to_list', '(', 'restuple', ',', '_func', ',', '_args', ')', ':', 'shape', '=', '(', 'restuple', '.', 'e', '.', 'len', ',', '1', ')', 'array_size', '=', 'np', '.', 'prod', '(', 'shape', ')', 'mem_size', '=', '8', '*', 'array_size', 'array_str_e', '=', 'string_at', '(', 'restuple', '.', 'e', '.', 'data', ',', 'mem_size', ')', 'array_str_n', '=', 'string_at', '(', 'restuple', '.', 'n', '.', 'data', ',', 'mem_size', ')', 'ls_e', '=', 'np', '.', 'frombuffer', '(', 'array_str_e', ',', 'float', ',', 'array_size', ')', '.', 'tolist', '(', ')', 'ls_n', '=', 'np', '.', 'frombuffer', '(', 'array_str_n', ',', 'float', ',', 'array_size', ')', '.', 'tolist', '(', ')', 'return', 'ls_e', ',', 'ls_n'] | Convert the FFI result to Python data structures | ['Convert', 'the', 'FFI', 'result', 'to', 'Python', 'data', 'structures'] | train | https://github.com/urschrei/convertbng/blob/b0f5ca8b4942a835a834aed4c1fdb4d827c72342/convertbng/util.py#L122-L134 |
7,551 | hannes-brt/hebel | hebel/layers/input_dropout.py | InputDropout.backprop | def backprop(self, input_data, df_output, cache=None):
""" Backpropagate through the hidden layer
**Parameters:**
input_data : ``GPUArray``
Inpute data to perform dropout on.
df_output : ``GPUArray``
Gradients with respect to the output of this layer
(received from the layer above).
cache : list of ``GPUArray``
Cache obtained from forward pass. If the cache is
provided, then the activations are not recalculated.
**Returns:**
gradients : empty tuple
Gradients are empty since this layer has no parameters.
df_input : ``GPUArray``
Gradients with respect to the input.
"""
if self.compute_input_gradients:
apply_dropout_mask(df_output, dropout_mask)
return tuple(), df_output | python | def backprop(self, input_data, df_output, cache=None):
""" Backpropagate through the hidden layer
**Parameters:**
input_data : ``GPUArray``
Inpute data to perform dropout on.
df_output : ``GPUArray``
Gradients with respect to the output of this layer
(received from the layer above).
cache : list of ``GPUArray``
Cache obtained from forward pass. If the cache is
provided, then the activations are not recalculated.
**Returns:**
gradients : empty tuple
Gradients are empty since this layer has no parameters.
df_input : ``GPUArray``
Gradients with respect to the input.
"""
if self.compute_input_gradients:
apply_dropout_mask(df_output, dropout_mask)
return tuple(), df_output | ['def', 'backprop', '(', 'self', ',', 'input_data', ',', 'df_output', ',', 'cache', '=', 'None', ')', ':', 'if', 'self', '.', 'compute_input_gradients', ':', 'apply_dropout_mask', '(', 'df_output', ',', 'dropout_mask', ')', 'return', 'tuple', '(', ')', ',', 'df_output'] | Backpropagate through the hidden layer
**Parameters:**
input_data : ``GPUArray``
Inpute data to perform dropout on.
df_output : ``GPUArray``
Gradients with respect to the output of this layer
(received from the layer above).
cache : list of ``GPUArray``
Cache obtained from forward pass. If the cache is
provided, then the activations are not recalculated.
**Returns:**
gradients : empty tuple
Gradients are empty since this layer has no parameters.
df_input : ``GPUArray``
Gradients with respect to the input. | ['Backpropagate', 'through', 'the', 'hidden', 'layer'] | train | https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/layers/input_dropout.py#L91-L119 |
7,552 | ranaroussi/qtpylib | qtpylib/tools.py | convert_timezone | def convert_timezone(date_str, tz_from, tz_to="UTC", fmt=None):
""" get timezone as tz_offset """
tz_offset = datetime_to_timezone(
datetime.datetime.now(), tz=tz_from).strftime('%z')
tz_offset = tz_offset[:3] + ':' + tz_offset[3:]
date = parse_date(str(date_str) + tz_offset)
if tz_from != tz_to:
date = datetime_to_timezone(date, tz_to)
if isinstance(fmt, str):
return date.strftime(fmt)
return date | python | def convert_timezone(date_str, tz_from, tz_to="UTC", fmt=None):
""" get timezone as tz_offset """
tz_offset = datetime_to_timezone(
datetime.datetime.now(), tz=tz_from).strftime('%z')
tz_offset = tz_offset[:3] + ':' + tz_offset[3:]
date = parse_date(str(date_str) + tz_offset)
if tz_from != tz_to:
date = datetime_to_timezone(date, tz_to)
if isinstance(fmt, str):
return date.strftime(fmt)
return date | ['def', 'convert_timezone', '(', 'date_str', ',', 'tz_from', ',', 'tz_to', '=', '"UTC"', ',', 'fmt', '=', 'None', ')', ':', 'tz_offset', '=', 'datetime_to_timezone', '(', 'datetime', '.', 'datetime', '.', 'now', '(', ')', ',', 'tz', '=', 'tz_from', ')', '.', 'strftime', '(', "'%z'", ')', 'tz_offset', '=', 'tz_offset', '[', ':', '3', ']', '+', "':'", '+', 'tz_offset', '[', '3', ':', ']', 'date', '=', 'parse_date', '(', 'str', '(', 'date_str', ')', '+', 'tz_offset', ')', 'if', 'tz_from', '!=', 'tz_to', ':', 'date', '=', 'datetime_to_timezone', '(', 'date', ',', 'tz_to', ')', 'if', 'isinstance', '(', 'fmt', ',', 'str', ')', ':', 'return', 'date', '.', 'strftime', '(', 'fmt', ')', 'return', 'date'] | get timezone as tz_offset | ['get', 'timezone', 'as', 'tz_offset'] | train | https://github.com/ranaroussi/qtpylib/blob/0dbbc465fafd9cb9b0f4d10e1e07fae4e15032dd/qtpylib/tools.py#L487-L499 |
7,553 | tamasgal/km3pipe | km3pipe/io/ch.py | CHPump.finish | def finish(self):
"""Clean up the JLigier controlhost connection"""
log.debug("Disconnecting from JLigier.")
self.client.socket.shutdown(socket.SHUT_RDWR)
self.client._disconnect() | python | def finish(self):
"""Clean up the JLigier controlhost connection"""
log.debug("Disconnecting from JLigier.")
self.client.socket.shutdown(socket.SHUT_RDWR)
self.client._disconnect() | ['def', 'finish', '(', 'self', ')', ':', 'log', '.', 'debug', '(', '"Disconnecting from JLigier."', ')', 'self', '.', 'client', '.', 'socket', '.', 'shutdown', '(', 'socket', '.', 'SHUT_RDWR', ')', 'self', '.', 'client', '.', '_disconnect', '(', ')'] | Clean up the JLigier controlhost connection | ['Clean', 'up', 'the', 'JLigier', 'controlhost', 'connection'] | train | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/ch.py#L176-L180 |
7,554 | blubberdiblub/eztemplate | eztemplate/engines/string_formatter_engine.py | FormatterWrapper.format_field | def format_field(self, value, format_spec):
"""When field missing, return original spec."""
if isinstance(value, MissingField):
if format_spec is not None:
value.format_spec = format_spec
return str(value)
return super(FormatterWrapper, self).format_field(value, format_spec) | python | def format_field(self, value, format_spec):
"""When field missing, return original spec."""
if isinstance(value, MissingField):
if format_spec is not None:
value.format_spec = format_spec
return str(value)
return super(FormatterWrapper, self).format_field(value, format_spec) | ['def', 'format_field', '(', 'self', ',', 'value', ',', 'format_spec', ')', ':', 'if', 'isinstance', '(', 'value', ',', 'MissingField', ')', ':', 'if', 'format_spec', 'is', 'not', 'None', ':', 'value', '.', 'format_spec', '=', 'format_spec', 'return', 'str', '(', 'value', ')', 'return', 'super', '(', 'FormatterWrapper', ',', 'self', ')', '.', 'format_field', '(', 'value', ',', 'format_spec', ')'] | When field missing, return original spec. | ['When', 'field', 'missing', 'return', 'original', 'spec', '.'] | train | https://github.com/blubberdiblub/eztemplate/blob/ab5b2b4987c045116d130fd83e216704b8edfb5d/eztemplate/engines/string_formatter_engine.py#L81-L88 |
7,555 | inveniosoftware/invenio-previewer | invenio_previewer/extensions/csv_dthreejs.py | preview | def preview(file):
"""Render appropiate template with embed flag."""
file_info = validate_csv(file)
return render_template(
'invenio_previewer/csv_bar.html',
file=file,
delimiter=file_info['delimiter'],
encoding=file_info['encoding'],
js_bundles=current_previewer.js_bundles + ['previewer_csv_js'],
css_bundles=current_previewer.css_bundles,
) | python | def preview(file):
"""Render appropiate template with embed flag."""
file_info = validate_csv(file)
return render_template(
'invenio_previewer/csv_bar.html',
file=file,
delimiter=file_info['delimiter'],
encoding=file_info['encoding'],
js_bundles=current_previewer.js_bundles + ['previewer_csv_js'],
css_bundles=current_previewer.css_bundles,
) | ['def', 'preview', '(', 'file', ')', ':', 'file_info', '=', 'validate_csv', '(', 'file', ')', 'return', 'render_template', '(', "'invenio_previewer/csv_bar.html'", ',', 'file', '=', 'file', ',', 'delimiter', '=', 'file_info', '[', "'delimiter'", ']', ',', 'encoding', '=', 'file_info', '[', "'encoding'", ']', ',', 'js_bundles', '=', 'current_previewer', '.', 'js_bundles', '+', '[', "'previewer_csv_js'", ']', ',', 'css_bundles', '=', 'current_previewer', '.', 'css_bundles', ',', ')'] | Render appropiate template with embed flag. | ['Render', 'appropiate', 'template', 'with', 'embed', 'flag', '.'] | train | https://github.com/inveniosoftware/invenio-previewer/blob/558fd22e0f29cc8cd7a6999abd4febcf6b248c49/invenio_previewer/extensions/csv_dthreejs.py#L54-L64 |
7,556 | bslatkin/dpxdt | dpxdt/client/capture_worker.py | register | def register(coordinator):
"""Registers this module as a worker with the given coordinator."""
if FLAGS.phantomjs_script:
utils.verify_binary('phantomjs_binary', ['--version'])
assert os.path.exists(FLAGS.phantomjs_script)
else:
utils.verify_binary('capture_binary', ['--version'])
assert FLAGS.capture_script
assert os.path.exists(FLAGS.capture_script)
assert FLAGS.capture_threads > 0
assert FLAGS.queue_server_prefix
item = queue_worker.RemoteQueueWorkflow(
constants.CAPTURE_QUEUE_NAME,
DoCaptureQueueWorkflow,
max_tasks=FLAGS.capture_threads,
wait_seconds=FLAGS.capture_wait_seconds)
item.root = True
coordinator.input_queue.put(item) | python | def register(coordinator):
"""Registers this module as a worker with the given coordinator."""
if FLAGS.phantomjs_script:
utils.verify_binary('phantomjs_binary', ['--version'])
assert os.path.exists(FLAGS.phantomjs_script)
else:
utils.verify_binary('capture_binary', ['--version'])
assert FLAGS.capture_script
assert os.path.exists(FLAGS.capture_script)
assert FLAGS.capture_threads > 0
assert FLAGS.queue_server_prefix
item = queue_worker.RemoteQueueWorkflow(
constants.CAPTURE_QUEUE_NAME,
DoCaptureQueueWorkflow,
max_tasks=FLAGS.capture_threads,
wait_seconds=FLAGS.capture_wait_seconds)
item.root = True
coordinator.input_queue.put(item) | ['def', 'register', '(', 'coordinator', ')', ':', 'if', 'FLAGS', '.', 'phantomjs_script', ':', 'utils', '.', 'verify_binary', '(', "'phantomjs_binary'", ',', '[', "'--version'", ']', ')', 'assert', 'os', '.', 'path', '.', 'exists', '(', 'FLAGS', '.', 'phantomjs_script', ')', 'else', ':', 'utils', '.', 'verify_binary', '(', "'capture_binary'", ',', '[', "'--version'", ']', ')', 'assert', 'FLAGS', '.', 'capture_script', 'assert', 'os', '.', 'path', '.', 'exists', '(', 'FLAGS', '.', 'capture_script', ')', 'assert', 'FLAGS', '.', 'capture_threads', '>', '0', 'assert', 'FLAGS', '.', 'queue_server_prefix', 'item', '=', 'queue_worker', '.', 'RemoteQueueWorkflow', '(', 'constants', '.', 'CAPTURE_QUEUE_NAME', ',', 'DoCaptureQueueWorkflow', ',', 'max_tasks', '=', 'FLAGS', '.', 'capture_threads', ',', 'wait_seconds', '=', 'FLAGS', '.', 'capture_wait_seconds', ')', 'item', '.', 'root', '=', 'True', 'coordinator', '.', 'input_queue', '.', 'put', '(', 'item', ')'] | Registers this module as a worker with the given coordinator. | ['Registers', 'this', 'module', 'as', 'a', 'worker', 'with', 'the', 'given', 'coordinator', '.'] | train | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/client/capture_worker.py#L207-L227 |
7,557 | QualiSystems/cloudshell-networking-devices | cloudshell/devices/standards/sdn/configuration_attributes_structure.py | GenericSDNResource.add_trunk_ports | def add_trunk_ports(self):
"""SDN Controller enable trunk ports
:rtype: list[tuple[str, str]]
"""
ports = self.attributes.get("{}Enable Full Trunk Ports".format(self.namespace_prefix), None)
return self._parse_ports(ports=ports) | python | def add_trunk_ports(self):
"""SDN Controller enable trunk ports
:rtype: list[tuple[str, str]]
"""
ports = self.attributes.get("{}Enable Full Trunk Ports".format(self.namespace_prefix), None)
return self._parse_ports(ports=ports) | ['def', 'add_trunk_ports', '(', 'self', ')', ':', 'ports', '=', 'self', '.', 'attributes', '.', 'get', '(', '"{}Enable Full Trunk Ports"', '.', 'format', '(', 'self', '.', 'namespace_prefix', ')', ',', 'None', ')', 'return', 'self', '.', '_parse_ports', '(', 'ports', '=', 'ports', ')'] | SDN Controller enable trunk ports
:rtype: list[tuple[str, str]] | ['SDN', 'Controller', 'enable', 'trunk', 'ports'] | train | https://github.com/QualiSystems/cloudshell-networking-devices/blob/009aab33edb30035b52fe10dbb91db61c95ba4d9/cloudshell/devices/standards/sdn/configuration_attributes_structure.py#L68-L74 |
7,558 | MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/engine_creator.py | create_postgresql_psycopg2 | def create_postgresql_psycopg2(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a postgresql database using psycopg2.
"""
return create_engine(
_create_postgresql_psycopg2(username, password, host, port, database),
**kwargs
) | python | def create_postgresql_psycopg2(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a postgresql database using psycopg2.
"""
return create_engine(
_create_postgresql_psycopg2(username, password, host, port, database),
**kwargs
) | ['def', 'create_postgresql_psycopg2', '(', 'username', ',', 'password', ',', 'host', ',', 'port', ',', 'database', ',', '*', '*', 'kwargs', ')', ':', '# pragma: no cover', 'return', 'create_engine', '(', '_create_postgresql_psycopg2', '(', 'username', ',', 'password', ',', 'host', ',', 'port', ',', 'database', ')', ',', '*', '*', 'kwargs', ')'] | create an engine connected to a postgresql database using psycopg2. | ['create', 'an', 'engine', 'connected', 'to', 'a', 'postgresql', 'database', 'using', 'psycopg2', '.'] | train | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/engine_creator.py#L78-L85 |
7,559 | praw-dev/prawtools | prawtools/stats.py | SubredditStats.publish_results | def publish_results(self, view, submitters, commenters):
"""Submit the results to the subreddit. Has no return value (None)."""
def timef(timestamp, date_only=False):
"""Return a suitable string representaation of the timestamp."""
dtime = datetime.fromtimestamp(timestamp)
if date_only:
retval = dtime.strftime('%Y-%m-%d')
else:
retval = dtime.strftime('%Y-%m-%d %H:%M PDT')
return retval
basic = self.basic_stats()
top_commenters = self.top_commenters(commenters)
top_comments = self.top_comments()
top_submissions = self.top_submissions()
# Decrease number of top submitters if body is too large.
body = None
while body is None or len(body) > 40000 and submitters > 0:
body = (basic + self.top_submitters(submitters) + top_commenters
+ top_submissions + top_comments + self.post_footer)
submitters -= 1
title = '{} {} {}posts from {} to {}'.format(
self.post_prefix, str(self.subreddit),
'top ' if view in TOP_VALUES else '', timef(self.min_date, True),
timef(self.max_date))
try: # Attempt to make the submission
return self.submit_subreddit.submit(title, selftext=body)
except Exception:
logger.exception('Failed to submit to {}'
.format(self.submit_subreddit))
self._save_report(title, body) | python | def publish_results(self, view, submitters, commenters):
"""Submit the results to the subreddit. Has no return value (None)."""
def timef(timestamp, date_only=False):
"""Return a suitable string representaation of the timestamp."""
dtime = datetime.fromtimestamp(timestamp)
if date_only:
retval = dtime.strftime('%Y-%m-%d')
else:
retval = dtime.strftime('%Y-%m-%d %H:%M PDT')
return retval
basic = self.basic_stats()
top_commenters = self.top_commenters(commenters)
top_comments = self.top_comments()
top_submissions = self.top_submissions()
# Decrease number of top submitters if body is too large.
body = None
while body is None or len(body) > 40000 and submitters > 0:
body = (basic + self.top_submitters(submitters) + top_commenters
+ top_submissions + top_comments + self.post_footer)
submitters -= 1
title = '{} {} {}posts from {} to {}'.format(
self.post_prefix, str(self.subreddit),
'top ' if view in TOP_VALUES else '', timef(self.min_date, True),
timef(self.max_date))
try: # Attempt to make the submission
return self.submit_subreddit.submit(title, selftext=body)
except Exception:
logger.exception('Failed to submit to {}'
.format(self.submit_subreddit))
self._save_report(title, body) | ['def', 'publish_results', '(', 'self', ',', 'view', ',', 'submitters', ',', 'commenters', ')', ':', 'def', 'timef', '(', 'timestamp', ',', 'date_only', '=', 'False', ')', ':', '"""Return a suitable string representaation of the timestamp."""', 'dtime', '=', 'datetime', '.', 'fromtimestamp', '(', 'timestamp', ')', 'if', 'date_only', ':', 'retval', '=', 'dtime', '.', 'strftime', '(', "'%Y-%m-%d'", ')', 'else', ':', 'retval', '=', 'dtime', '.', 'strftime', '(', "'%Y-%m-%d %H:%M PDT'", ')', 'return', 'retval', 'basic', '=', 'self', '.', 'basic_stats', '(', ')', 'top_commenters', '=', 'self', '.', 'top_commenters', '(', 'commenters', ')', 'top_comments', '=', 'self', '.', 'top_comments', '(', ')', 'top_submissions', '=', 'self', '.', 'top_submissions', '(', ')', '# Decrease number of top submitters if body is too large.', 'body', '=', 'None', 'while', 'body', 'is', 'None', 'or', 'len', '(', 'body', ')', '>', '40000', 'and', 'submitters', '>', '0', ':', 'body', '=', '(', 'basic', '+', 'self', '.', 'top_submitters', '(', 'submitters', ')', '+', 'top_commenters', '+', 'top_submissions', '+', 'top_comments', '+', 'self', '.', 'post_footer', ')', 'submitters', '-=', '1', 'title', '=', "'{} {} {}posts from {} to {}'", '.', 'format', '(', 'self', '.', 'post_prefix', ',', 'str', '(', 'self', '.', 'subreddit', ')', ',', "'top '", 'if', 'view', 'in', 'TOP_VALUES', 'else', "''", ',', 'timef', '(', 'self', '.', 'min_date', ',', 'True', ')', ',', 'timef', '(', 'self', '.', 'max_date', ')', ')', 'try', ':', '# Attempt to make the submission', 'return', 'self', '.', 'submit_subreddit', '.', 'submit', '(', 'title', ',', 'selftext', '=', 'body', ')', 'except', 'Exception', ':', 'logger', '.', 'exception', '(', "'Failed to submit to {}'", '.', 'format', '(', 'self', '.', 'submit_subreddit', ')', ')', 'self', '.', '_save_report', '(', 'title', ',', 'body', ')'] | Submit the results to the subreddit. Has no return value (None). | ['Submit', 'the', 'results', 'to', 'the', 'subreddit', '.', 'Has', 'no', 'return', 'value', '(', 'None', ')', '.'] | train | https://github.com/praw-dev/prawtools/blob/571d5c28c2222f6f8dbbca8c815b8da0a776ab85/prawtools/stats.py#L227-L260 |
7,560 | saltstack/salt | salt/modules/memcached.py | increment | def increment(key, delta=1, host=DEFAULT_HOST, port=DEFAULT_PORT):
'''
Increment the value of a key
CLI Example:
.. code-block:: bash
salt '*' memcached.increment <key>
salt '*' memcached.increment <key> 2
'''
conn = _connect(host, port)
_check_stats(conn)
cur = get(key)
if cur is None:
raise CommandExecutionError('Key \'{0}\' does not exist'.format(key))
elif not isinstance(cur, six.integer_types):
raise CommandExecutionError(
'Value for key \'{0}\' must be an integer to be '
'incremented'.format(key)
)
try:
return conn.incr(key, delta)
except ValueError:
raise SaltInvocationError('Delta value must be an integer') | python | def increment(key, delta=1, host=DEFAULT_HOST, port=DEFAULT_PORT):
'''
Increment the value of a key
CLI Example:
.. code-block:: bash
salt '*' memcached.increment <key>
salt '*' memcached.increment <key> 2
'''
conn = _connect(host, port)
_check_stats(conn)
cur = get(key)
if cur is None:
raise CommandExecutionError('Key \'{0}\' does not exist'.format(key))
elif not isinstance(cur, six.integer_types):
raise CommandExecutionError(
'Value for key \'{0}\' must be an integer to be '
'incremented'.format(key)
)
try:
return conn.incr(key, delta)
except ValueError:
raise SaltInvocationError('Delta value must be an integer') | ['def', 'increment', '(', 'key', ',', 'delta', '=', '1', ',', 'host', '=', 'DEFAULT_HOST', ',', 'port', '=', 'DEFAULT_PORT', ')', ':', 'conn', '=', '_connect', '(', 'host', ',', 'port', ')', '_check_stats', '(', 'conn', ')', 'cur', '=', 'get', '(', 'key', ')', 'if', 'cur', 'is', 'None', ':', 'raise', 'CommandExecutionError', '(', "'Key \\'{0}\\' does not exist'", '.', 'format', '(', 'key', ')', ')', 'elif', 'not', 'isinstance', '(', 'cur', ',', 'six', '.', 'integer_types', ')', ':', 'raise', 'CommandExecutionError', '(', "'Value for key \\'{0}\\' must be an integer to be '", "'incremented'", '.', 'format', '(', 'key', ')', ')', 'try', ':', 'return', 'conn', '.', 'incr', '(', 'key', ',', 'delta', ')', 'except', 'ValueError', ':', 'raise', 'SaltInvocationError', '(', "'Delta value must be an integer'", ')'] | Increment the value of a key
CLI Example:
.. code-block:: bash
salt '*' memcached.increment <key>
salt '*' memcached.increment <key> 2 | ['Increment', 'the', 'value', 'of', 'a', 'key'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/memcached.py#L213-L239 |
7,561 | bennylope/django-organizations | organizations/utils.py | model_field_attr | def model_field_attr(model, model_field, attr):
"""
Returns the specified attribute for the specified field on the model class.
"""
fields = dict([(field.name, field) for field in model._meta.fields])
return getattr(fields[model_field], attr) | python | def model_field_attr(model, model_field, attr):
"""
Returns the specified attribute for the specified field on the model class.
"""
fields = dict([(field.name, field) for field in model._meta.fields])
return getattr(fields[model_field], attr) | ['def', 'model_field_attr', '(', 'model', ',', 'model_field', ',', 'attr', ')', ':', 'fields', '=', 'dict', '(', '[', '(', 'field', '.', 'name', ',', 'field', ')', 'for', 'field', 'in', 'model', '.', '_meta', '.', 'fields', ']', ')', 'return', 'getattr', '(', 'fields', '[', 'model_field', ']', ',', 'attr', ')'] | Returns the specified attribute for the specified field on the model class. | ['Returns', 'the', 'specified', 'attribute', 'for', 'the', 'specified', 'field', 'on', 'the', 'model', 'class', '.'] | train | https://github.com/bennylope/django-organizations/blob/85f753a8f7a8f0f31636c9209fb69e7030a5c79a/organizations/utils.py#L115-L120 |
7,562 | wummel/linkchecker | linkcheck/lock.py | get_lock | def get_lock (name, debug=False):
"""Get a new lock.
@param debug: if True, acquire() and release() will have debug messages
@ptype debug: boolean, default is False
@return: a lock object
@rtype: threading.Lock or DebugLock
"""
lock = threading.Lock()
# for thread debugging, use the DebugLock wrapper
if debug:
lock = DebugLock(lock, name)
return lock | python | def get_lock (name, debug=False):
"""Get a new lock.
@param debug: if True, acquire() and release() will have debug messages
@ptype debug: boolean, default is False
@return: a lock object
@rtype: threading.Lock or DebugLock
"""
lock = threading.Lock()
# for thread debugging, use the DebugLock wrapper
if debug:
lock = DebugLock(lock, name)
return lock | ['def', 'get_lock', '(', 'name', ',', 'debug', '=', 'False', ')', ':', 'lock', '=', 'threading', '.', 'Lock', '(', ')', '# for thread debugging, use the DebugLock wrapper', 'if', 'debug', ':', 'lock', '=', 'DebugLock', '(', 'lock', ',', 'name', ')', 'return', 'lock'] | Get a new lock.
@param debug: if True, acquire() and release() will have debug messages
@ptype debug: boolean, default is False
@return: a lock object
@rtype: threading.Lock or DebugLock | ['Get', 'a', 'new', 'lock', '.'] | train | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/lock.py#L23-L34 |
7,563 | seequent/properties | properties/extras/uid.py | HasUID.serialize | def serialize(self, include_class=True, save_dynamic=False, **kwargs):
"""Serialize nested HasUID instances to a flat dictionary
**Parameters**:
* **include_class** - If True (the default), the name of the class
will also be saved to the serialized dictionary under key
:code:`'__class__'`
* **save_dynamic** - If True, dynamic properties are written to
the serialized dict (default: False).
* You may also specify a **registry** - This is the flat dictionary
where UID/HasUID pairs are stored. By default, no registry need
be provided; a new dictionary will be created.
* Any other keyword arguments will be passed through to the Property
serializers.
"""
registry = kwargs.pop('registry', None)
if registry is None:
registry = dict()
if not registry:
root = True
registry.update({'__root__': self.uid})
else:
root = False
key = self.uid
if key not in registry:
registry.update({key: None})
registry.update({key: super(HasUID, self).serialize(
registry=registry,
include_class=include_class,
save_dynamic=save_dynamic,
**kwargs
)})
if root:
return registry
return key | python | def serialize(self, include_class=True, save_dynamic=False, **kwargs):
"""Serialize nested HasUID instances to a flat dictionary
**Parameters**:
* **include_class** - If True (the default), the name of the class
will also be saved to the serialized dictionary under key
:code:`'__class__'`
* **save_dynamic** - If True, dynamic properties are written to
the serialized dict (default: False).
* You may also specify a **registry** - This is the flat dictionary
where UID/HasUID pairs are stored. By default, no registry need
be provided; a new dictionary will be created.
* Any other keyword arguments will be passed through to the Property
serializers.
"""
registry = kwargs.pop('registry', None)
if registry is None:
registry = dict()
if not registry:
root = True
registry.update({'__root__': self.uid})
else:
root = False
key = self.uid
if key not in registry:
registry.update({key: None})
registry.update({key: super(HasUID, self).serialize(
registry=registry,
include_class=include_class,
save_dynamic=save_dynamic,
**kwargs
)})
if root:
return registry
return key | ['def', 'serialize', '(', 'self', ',', 'include_class', '=', 'True', ',', 'save_dynamic', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', 'registry', '=', 'kwargs', '.', 'pop', '(', "'registry'", ',', 'None', ')', 'if', 'registry', 'is', 'None', ':', 'registry', '=', 'dict', '(', ')', 'if', 'not', 'registry', ':', 'root', '=', 'True', 'registry', '.', 'update', '(', '{', "'__root__'", ':', 'self', '.', 'uid', '}', ')', 'else', ':', 'root', '=', 'False', 'key', '=', 'self', '.', 'uid', 'if', 'key', 'not', 'in', 'registry', ':', 'registry', '.', 'update', '(', '{', 'key', ':', 'None', '}', ')', 'registry', '.', 'update', '(', '{', 'key', ':', 'super', '(', 'HasUID', ',', 'self', ')', '.', 'serialize', '(', 'registry', '=', 'registry', ',', 'include_class', '=', 'include_class', ',', 'save_dynamic', '=', 'save_dynamic', ',', '*', '*', 'kwargs', ')', '}', ')', 'if', 'root', ':', 'return', 'registry', 'return', 'key'] | Serialize nested HasUID instances to a flat dictionary
**Parameters**:
* **include_class** - If True (the default), the name of the class
will also be saved to the serialized dictionary under key
:code:`'__class__'`
* **save_dynamic** - If True, dynamic properties are written to
the serialized dict (default: False).
* You may also specify a **registry** - This is the flat dictionary
where UID/HasUID pairs are stored. By default, no registry need
be provided; a new dictionary will be created.
* Any other keyword arguments will be passed through to the Property
serializers. | ['Serialize', 'nested', 'HasUID', 'instances', 'to', 'a', 'flat', 'dictionary'] | train | https://github.com/seequent/properties/blob/096b07012fff86b0a880c8c018320c3b512751b9/properties/extras/uid.py#L69-L104 |
7,564 | blue-yonder/tsfresh | tsfresh/feature_extraction/feature_calculators.py | agg_autocorrelation | def agg_autocorrelation(x, param):
r"""
Calculates the value of an aggregation function :math:`f_{agg}` (e.g. the variance or the mean) over the
autocorrelation :math:`R(l)` for different lags. The autocorrelation :math:`R(l)` for lag :math:`l` is defined as
.. math::
R(l) = \frac{1}{(n-l)\sigma^{2}} \sum_{t=1}^{n-l}(X_{t}-\mu )(X_{t+l}-\mu)
where :math:`X_i` are the values of the time series, :math:`n` its length. Finally, :math:`\sigma^2` and
:math:`\mu` are estimators for its variance and mean
(See `Estimation of the Autocorrelation function <http://en.wikipedia.org/wiki/Autocorrelation#Estimation>`_).
The :math:`R(l)` for different lags :math:`l` form a vector. This feature calculator applies the aggregation
function :math:`f_{agg}` to this vector and returns
.. math::
f_{agg} \left( R(1), \ldots, R(m)\right) \quad \text{for} \quad m = max(n, maxlag).
Here :math:`maxlag` is the second parameter passed to this function.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"f_agg": x, "maxlag", n} with x str, the name of a numpy function
(e.g. "mean", "var", "std", "median"), its the name of the aggregator function that is applied to the
autocorrelations. Further, n is an int and the maximal number of lags to consider.
:type param: list
:return: the value of this feature
:return type: float
"""
# if the time series is longer than the following threshold, we use fft to calculate the acf
THRESHOLD_TO_USE_FFT = 1250
var = np.var(x)
n = len(x)
max_maxlag = max([config["maxlag"] for config in param])
if np.abs(var) < 10**-10 or n == 1:
a = [0] * len(x)
else:
a = acf(x, unbiased=True, fft=n > THRESHOLD_TO_USE_FFT, nlags=max_maxlag)[1:]
return [("f_agg_\"{}\"__maxlag_{}".format(config["f_agg"], config["maxlag"]),
getattr(np, config["f_agg"])(a[:int(config["maxlag"])])) for config in param] | python | def agg_autocorrelation(x, param):
r"""
Calculates the value of an aggregation function :math:`f_{agg}` (e.g. the variance or the mean) over the
autocorrelation :math:`R(l)` for different lags. The autocorrelation :math:`R(l)` for lag :math:`l` is defined as
.. math::
R(l) = \frac{1}{(n-l)\sigma^{2}} \sum_{t=1}^{n-l}(X_{t}-\mu )(X_{t+l}-\mu)
where :math:`X_i` are the values of the time series, :math:`n` its length. Finally, :math:`\sigma^2` and
:math:`\mu` are estimators for its variance and mean
(See `Estimation of the Autocorrelation function <http://en.wikipedia.org/wiki/Autocorrelation#Estimation>`_).
The :math:`R(l)` for different lags :math:`l` form a vector. This feature calculator applies the aggregation
function :math:`f_{agg}` to this vector and returns
.. math::
f_{agg} \left( R(1), \ldots, R(m)\right) \quad \text{for} \quad m = max(n, maxlag).
Here :math:`maxlag` is the second parameter passed to this function.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"f_agg": x, "maxlag", n} with x str, the name of a numpy function
(e.g. "mean", "var", "std", "median"), its the name of the aggregator function that is applied to the
autocorrelations. Further, n is an int and the maximal number of lags to consider.
:type param: list
:return: the value of this feature
:return type: float
"""
# if the time series is longer than the following threshold, we use fft to calculate the acf
THRESHOLD_TO_USE_FFT = 1250
var = np.var(x)
n = len(x)
max_maxlag = max([config["maxlag"] for config in param])
if np.abs(var) < 10**-10 or n == 1:
a = [0] * len(x)
else:
a = acf(x, unbiased=True, fft=n > THRESHOLD_TO_USE_FFT, nlags=max_maxlag)[1:]
return [("f_agg_\"{}\"__maxlag_{}".format(config["f_agg"], config["maxlag"]),
getattr(np, config["f_agg"])(a[:int(config["maxlag"])])) for config in param] | ['def', 'agg_autocorrelation', '(', 'x', ',', 'param', ')', ':', '# if the time series is longer than the following threshold, we use fft to calculate the acf', 'THRESHOLD_TO_USE_FFT', '=', '1250', 'var', '=', 'np', '.', 'var', '(', 'x', ')', 'n', '=', 'len', '(', 'x', ')', 'max_maxlag', '=', 'max', '(', '[', 'config', '[', '"maxlag"', ']', 'for', 'config', 'in', 'param', ']', ')', 'if', 'np', '.', 'abs', '(', 'var', ')', '<', '10', '**', '-', '10', 'or', 'n', '==', '1', ':', 'a', '=', '[', '0', ']', '*', 'len', '(', 'x', ')', 'else', ':', 'a', '=', 'acf', '(', 'x', ',', 'unbiased', '=', 'True', ',', 'fft', '=', 'n', '>', 'THRESHOLD_TO_USE_FFT', ',', 'nlags', '=', 'max_maxlag', ')', '[', '1', ':', ']', 'return', '[', '(', '"f_agg_\\"{}\\"__maxlag_{}"', '.', 'format', '(', 'config', '[', '"f_agg"', ']', ',', 'config', '[', '"maxlag"', ']', ')', ',', 'getattr', '(', 'np', ',', 'config', '[', '"f_agg"', ']', ')', '(', 'a', '[', ':', 'int', '(', 'config', '[', '"maxlag"', ']', ')', ']', ')', ')', 'for', 'config', 'in', 'param', ']'] | r"""
Calculates the value of an aggregation function :math:`f_{agg}` (e.g. the variance or the mean) over the
autocorrelation :math:`R(l)` for different lags. The autocorrelation :math:`R(l)` for lag :math:`l` is defined as
.. math::
R(l) = \frac{1}{(n-l)\sigma^{2}} \sum_{t=1}^{n-l}(X_{t}-\mu )(X_{t+l}-\mu)
where :math:`X_i` are the values of the time series, :math:`n` its length. Finally, :math:`\sigma^2` and
:math:`\mu` are estimators for its variance and mean
(See `Estimation of the Autocorrelation function <http://en.wikipedia.org/wiki/Autocorrelation#Estimation>`_).
The :math:`R(l)` for different lags :math:`l` form a vector. This feature calculator applies the aggregation
function :math:`f_{agg}` to this vector and returns
.. math::
f_{agg} \left( R(1), \ldots, R(m)\right) \quad \text{for} \quad m = max(n, maxlag).
Here :math:`maxlag` is the second parameter passed to this function.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"f_agg": x, "maxlag", n} with x str, the name of a numpy function
(e.g. "mean", "var", "std", "median"), its the name of the aggregator function that is applied to the
autocorrelations. Further, n is an int and the maximal number of lags to consider.
:type param: list
:return: the value of this feature
:return type: float | ['r', 'Calculates', 'the', 'value', 'of', 'an', 'aggregation', 'function', ':', 'math', ':', 'f_', '{', 'agg', '}', '(', 'e', '.', 'g', '.', 'the', 'variance', 'or', 'the', 'mean', ')', 'over', 'the', 'autocorrelation', ':', 'math', ':', 'R', '(', 'l', ')', 'for', 'different', 'lags', '.', 'The', 'autocorrelation', ':', 'math', ':', 'R', '(', 'l', ')', 'for', 'lag', ':', 'math', ':', 'l', 'is', 'defined', 'as'] | train | https://github.com/blue-yonder/tsfresh/blob/c72c9c574371cf7dd7d54e00a466792792e5d202/tsfresh/feature_extraction/feature_calculators.py#L324-L366 |
7,565 | pjmark/NIMPA | niftypet/nimpa/prc/imio.py | list_dcm_datain | def list_dcm_datain(datain):
''' List all DICOM file paths in the datain dictionary of input data.
'''
if not isinstance(datain, dict):
raise ValueError('The input is not a dictionary!')
dcmlst = []
# list of mu-map DICOM files
if 'mumapDCM' in datain:
dcmump = os.listdir(datain['mumapDCM'])
# accept only *.dcm extensions
dcmump = [os.path.join(datain['mumapDCM'],d) for d in dcmump if d.endswith(dcmext)]
dcmlst += dcmump
if 'T1DCM' in datain:
dcmt1 = os.listdir(datain['T1DCM'])
# accept only *.dcm extensions
dcmt1 = [os.path.join(datain['T1DCM'],d) for d in dcmt1 if d.endswith(dcmext)]
dcmlst += dcmt1
if 'T2DCM' in datain:
dcmt2 = os.listdir(datain['T2DCM'])
# accept only *.dcm extensions
dcmt2 = [os.path.join(datain['T2DCM'],d) for d in dcmt2 if d.endswith(dcmext)]
dcmlst += dcmt2
if 'UTE1' in datain:
dcmute1 = os.listdir(datain['UTE1'])
# accept only *.dcm extensions
dcmute1 = [os.path.join(datain['UTE1'],d) for d in dcmute1 if d.endswith(dcmext)]
dcmlst += dcmute1
if 'UTE2' in datain:
dcmute2 = os.listdir(datain['UTE2'])
# accept only *.dcm extensions
dcmute2 = [os.path.join(datain['UTE2'],d) for d in dcmute2 if d.endswith(dcmext)]
dcmlst += dcmute2
#-list-mode data dcm
if 'lm_dcm' in datain:
dcmlst += [datain['lm_dcm']]
if 'lm_ima' in datain:
dcmlst += [datain['lm_ima']]
#-norm
if 'nrm_dcm' in datain:
dcmlst += [datain['nrm_dcm']]
if 'nrm_ima' in datain:
dcmlst += [datain['nrm_ima']]
return dcmlst | python | def list_dcm_datain(datain):
''' List all DICOM file paths in the datain dictionary of input data.
'''
if not isinstance(datain, dict):
raise ValueError('The input is not a dictionary!')
dcmlst = []
# list of mu-map DICOM files
if 'mumapDCM' in datain:
dcmump = os.listdir(datain['mumapDCM'])
# accept only *.dcm extensions
dcmump = [os.path.join(datain['mumapDCM'],d) for d in dcmump if d.endswith(dcmext)]
dcmlst += dcmump
if 'T1DCM' in datain:
dcmt1 = os.listdir(datain['T1DCM'])
# accept only *.dcm extensions
dcmt1 = [os.path.join(datain['T1DCM'],d) for d in dcmt1 if d.endswith(dcmext)]
dcmlst += dcmt1
if 'T2DCM' in datain:
dcmt2 = os.listdir(datain['T2DCM'])
# accept only *.dcm extensions
dcmt2 = [os.path.join(datain['T2DCM'],d) for d in dcmt2 if d.endswith(dcmext)]
dcmlst += dcmt2
if 'UTE1' in datain:
dcmute1 = os.listdir(datain['UTE1'])
# accept only *.dcm extensions
dcmute1 = [os.path.join(datain['UTE1'],d) for d in dcmute1 if d.endswith(dcmext)]
dcmlst += dcmute1
if 'UTE2' in datain:
dcmute2 = os.listdir(datain['UTE2'])
# accept only *.dcm extensions
dcmute2 = [os.path.join(datain['UTE2'],d) for d in dcmute2 if d.endswith(dcmext)]
dcmlst += dcmute2
#-list-mode data dcm
if 'lm_dcm' in datain:
dcmlst += [datain['lm_dcm']]
if 'lm_ima' in datain:
dcmlst += [datain['lm_ima']]
#-norm
if 'nrm_dcm' in datain:
dcmlst += [datain['nrm_dcm']]
if 'nrm_ima' in datain:
dcmlst += [datain['nrm_ima']]
return dcmlst | ['def', 'list_dcm_datain', '(', 'datain', ')', ':', 'if', 'not', 'isinstance', '(', 'datain', ',', 'dict', ')', ':', 'raise', 'ValueError', '(', "'The input is not a dictionary!'", ')', 'dcmlst', '=', '[', ']', '# list of mu-map DICOM files', 'if', "'mumapDCM'", 'in', 'datain', ':', 'dcmump', '=', 'os', '.', 'listdir', '(', 'datain', '[', "'mumapDCM'", ']', ')', '# accept only *.dcm extensions', 'dcmump', '=', '[', 'os', '.', 'path', '.', 'join', '(', 'datain', '[', "'mumapDCM'", ']', ',', 'd', ')', 'for', 'd', 'in', 'dcmump', 'if', 'd', '.', 'endswith', '(', 'dcmext', ')', ']', 'dcmlst', '+=', 'dcmump', 'if', "'T1DCM'", 'in', 'datain', ':', 'dcmt1', '=', 'os', '.', 'listdir', '(', 'datain', '[', "'T1DCM'", ']', ')', '# accept only *.dcm extensions', 'dcmt1', '=', '[', 'os', '.', 'path', '.', 'join', '(', 'datain', '[', "'T1DCM'", ']', ',', 'd', ')', 'for', 'd', 'in', 'dcmt1', 'if', 'd', '.', 'endswith', '(', 'dcmext', ')', ']', 'dcmlst', '+=', 'dcmt1', 'if', "'T2DCM'", 'in', 'datain', ':', 'dcmt2', '=', 'os', '.', 'listdir', '(', 'datain', '[', "'T2DCM'", ']', ')', '# accept only *.dcm extensions', 'dcmt2', '=', '[', 'os', '.', 'path', '.', 'join', '(', 'datain', '[', "'T2DCM'", ']', ',', 'd', ')', 'for', 'd', 'in', 'dcmt2', 'if', 'd', '.', 'endswith', '(', 'dcmext', ')', ']', 'dcmlst', '+=', 'dcmt2', 'if', "'UTE1'", 'in', 'datain', ':', 'dcmute1', '=', 'os', '.', 'listdir', '(', 'datain', '[', "'UTE1'", ']', ')', '# accept only *.dcm extensions', 'dcmute1', '=', '[', 'os', '.', 'path', '.', 'join', '(', 'datain', '[', "'UTE1'", ']', ',', 'd', ')', 'for', 'd', 'in', 'dcmute1', 'if', 'd', '.', 'endswith', '(', 'dcmext', ')', ']', 'dcmlst', '+=', 'dcmute1', 'if', "'UTE2'", 'in', 'datain', ':', 'dcmute2', '=', 'os', '.', 'listdir', '(', 'datain', '[', "'UTE2'", ']', ')', '# accept only *.dcm extensions', 'dcmute2', '=', '[', 'os', '.', 'path', '.', 'join', '(', 'datain', '[', "'UTE2'", ']', ',', 'd', ')', 'for', 'd', 'in', 'dcmute2', 'if', 'd', '.', 'endswith', '(', 'dcmext', ')', ']', 'dcmlst', '+=', 'dcmute2', '#-list-mode data dcm', 'if', "'lm_dcm'", 'in', 'datain', ':', 'dcmlst', '+=', '[', 'datain', '[', "'lm_dcm'", ']', ']', 'if', "'lm_ima'", 'in', 'datain', ':', 'dcmlst', '+=', '[', 'datain', '[', "'lm_ima'", ']', ']', '#-norm', 'if', "'nrm_dcm'", 'in', 'datain', ':', 'dcmlst', '+=', '[', 'datain', '[', "'nrm_dcm'", ']', ']', 'if', "'nrm_ima'", 'in', 'datain', ':', 'dcmlst', '+=', '[', 'datain', '[', "'nrm_ima'", ']', ']', 'return', 'dcmlst'] | List all DICOM file paths in the datain dictionary of input data. | ['List', 'all', 'DICOM', 'file', 'paths', 'in', 'the', 'datain', 'dictionary', 'of', 'input', 'data', '.'] | train | https://github.com/pjmark/NIMPA/blob/3f4231fed2934a1d92e4cd8e9e153b0118e29d86/niftypet/nimpa/prc/imio.py#L358-L411 |
7,566 | crdoconnor/strictyaml | strictyaml/representation.py | YAML.data | def data(self):
"""
Returns raw data representation of the document or document segment.
Mappings are rendered as ordered dicts, sequences as lists and scalar values
as whatever the validator returns (int, string, etc.).
If no validators are used, scalar values are always returned as strings.
"""
if isinstance(self._value, CommentedMap):
mapping = OrderedDict()
for key, value in self._value.items():
mapping[key.data] = value.data
return mapping
elif isinstance(self._value, CommentedSeq):
return [item.data for item in self._value]
else:
return self._value | python | def data(self):
"""
Returns raw data representation of the document or document segment.
Mappings are rendered as ordered dicts, sequences as lists and scalar values
as whatever the validator returns (int, string, etc.).
If no validators are used, scalar values are always returned as strings.
"""
if isinstance(self._value, CommentedMap):
mapping = OrderedDict()
for key, value in self._value.items():
mapping[key.data] = value.data
return mapping
elif isinstance(self._value, CommentedSeq):
return [item.data for item in self._value]
else:
return self._value | ['def', 'data', '(', 'self', ')', ':', 'if', 'isinstance', '(', 'self', '.', '_value', ',', 'CommentedMap', ')', ':', 'mapping', '=', 'OrderedDict', '(', ')', 'for', 'key', ',', 'value', 'in', 'self', '.', '_value', '.', 'items', '(', ')', ':', 'mapping', '[', 'key', '.', 'data', ']', '=', 'value', '.', 'data', 'return', 'mapping', 'elif', 'isinstance', '(', 'self', '.', '_value', ',', 'CommentedSeq', ')', ':', 'return', '[', 'item', '.', 'data', 'for', 'item', 'in', 'self', '.', '_value', ']', 'else', ':', 'return', 'self', '.', '_value'] | Returns raw data representation of the document or document segment.
Mappings are rendered as ordered dicts, sequences as lists and scalar values
as whatever the validator returns (int, string, etc.).
If no validators are used, scalar values are always returned as strings. | ['Returns', 'raw', 'data', 'representation', 'of', 'the', 'document', 'or', 'document', 'segment', '.'] | train | https://github.com/crdoconnor/strictyaml/blob/efdac7f89e81679fc95686288cd32b9563fde609/strictyaml/representation.py#L102-L119 |
7,567 | cs50/check50 | check50/flask.py | app.raw_content | def raw_content(self, output=None, str_output=None):
"""Searches for `output` regex match within content of page, regardless of mimetype."""
return self._search_page(output, str_output, self.response.data, lambda regex, content: regex.search(content.decode())) | python | def raw_content(self, output=None, str_output=None):
"""Searches for `output` regex match within content of page, regardless of mimetype."""
return self._search_page(output, str_output, self.response.data, lambda regex, content: regex.search(content.decode())) | ['def', 'raw_content', '(', 'self', ',', 'output', '=', 'None', ',', 'str_output', '=', 'None', ')', ':', 'return', 'self', '.', '_search_page', '(', 'output', ',', 'str_output', ',', 'self', '.', 'response', '.', 'data', ',', 'lambda', 'regex', ',', 'content', ':', 'regex', '.', 'search', '(', 'content', '.', 'decode', '(', ')', ')', ')'] | Searches for `output` regex match within content of page, regardless of mimetype. | ['Searches', 'for', 'output', 'regex', 'match', 'within', 'content', 'of', 'page', 'regardless', 'of', 'mimetype', '.'] | train | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/flask.py#L116-L118 |
7,568 | chaoss/grimoirelab-manuscripts | manuscripts/report.py | Report.sec_project_community | def sec_project_community(self, project=None):
"""
Generate the data for the Communication section in a Project report
:return:
"""
def create_csv(metric1, csv_labels, file_label):
esfilters = None
csv_labels = csv_labels.replace("_", "") # LaTeX not supports "_"
if project != self.GLOBAL_PROJECT:
esfilters = {"project": project}
data_path = os.path.join(self.data_dir, "data")
file_name = os.path.join(data_path, file_label + "_" + project + ".csv")
logger.debug("CSV file %s generation in progress", file_name)
m1 = metric1(self.es_url, self.get_metric_index(metric1),
esfilters=esfilters, start=self.end_prev_month, end=self.end)
top = m1.get_list()
csv = csv_labels + '\n'
for i in range(0, len(top['value'])):
if i > self.TOP_MAX:
break
csv += top[metric1.FIELD_NAME][i] + "," + self.str_val(top['value'][i])
csv += "\n"
with open(file_name, "w") as f:
f.write(csv)
logger.debug("CSV file %s was generated", file_name)
logger.info("Community data for: %s", project)
author = self.config['project_community']['author_metrics'][0]
csv_labels = 'labels,' + author.id
file_label = author.ds.name + "_" + author.id
title_label = author.name + " per " + self.interval
self.__create_csv_eps(author, None, csv_labels, file_label, title_label,
project)
"""
Main developers
"""
metric = self.config['project_community']['people_top_metrics'][0]
# TODO: Commits must be extracted from metric
csv_labels = author.id + ",commits"
file_label = author.ds.name + "_top_" + author.id
create_csv(metric, csv_labels, file_label)
"""
Main organizations
"""
orgs = self.config['project_community']['orgs_top_metrics'][0]
# TODO: Commits must be extracted from metric
csv_labels = orgs.id + ",commits"
file_label = orgs.ds.name + "_top_" + orgs.id
create_csv(orgs, csv_labels, file_label) | python | def sec_project_community(self, project=None):
"""
Generate the data for the Communication section in a Project report
:return:
"""
def create_csv(metric1, csv_labels, file_label):
esfilters = None
csv_labels = csv_labels.replace("_", "") # LaTeX not supports "_"
if project != self.GLOBAL_PROJECT:
esfilters = {"project": project}
data_path = os.path.join(self.data_dir, "data")
file_name = os.path.join(data_path, file_label + "_" + project + ".csv")
logger.debug("CSV file %s generation in progress", file_name)
m1 = metric1(self.es_url, self.get_metric_index(metric1),
esfilters=esfilters, start=self.end_prev_month, end=self.end)
top = m1.get_list()
csv = csv_labels + '\n'
for i in range(0, len(top['value'])):
if i > self.TOP_MAX:
break
csv += top[metric1.FIELD_NAME][i] + "," + self.str_val(top['value'][i])
csv += "\n"
with open(file_name, "w") as f:
f.write(csv)
logger.debug("CSV file %s was generated", file_name)
logger.info("Community data for: %s", project)
author = self.config['project_community']['author_metrics'][0]
csv_labels = 'labels,' + author.id
file_label = author.ds.name + "_" + author.id
title_label = author.name + " per " + self.interval
self.__create_csv_eps(author, None, csv_labels, file_label, title_label,
project)
"""
Main developers
"""
metric = self.config['project_community']['people_top_metrics'][0]
# TODO: Commits must be extracted from metric
csv_labels = author.id + ",commits"
file_label = author.ds.name + "_top_" + author.id
create_csv(metric, csv_labels, file_label)
"""
Main organizations
"""
orgs = self.config['project_community']['orgs_top_metrics'][0]
# TODO: Commits must be extracted from metric
csv_labels = orgs.id + ",commits"
file_label = orgs.ds.name + "_top_" + orgs.id
create_csv(orgs, csv_labels, file_label) | ['def', 'sec_project_community', '(', 'self', ',', 'project', '=', 'None', ')', ':', 'def', 'create_csv', '(', 'metric1', ',', 'csv_labels', ',', 'file_label', ')', ':', 'esfilters', '=', 'None', 'csv_labels', '=', 'csv_labels', '.', 'replace', '(', '"_"', ',', '""', ')', '# LaTeX not supports "_"', 'if', 'project', '!=', 'self', '.', 'GLOBAL_PROJECT', ':', 'esfilters', '=', '{', '"project"', ':', 'project', '}', 'data_path', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'data_dir', ',', '"data"', ')', 'file_name', '=', 'os', '.', 'path', '.', 'join', '(', 'data_path', ',', 'file_label', '+', '"_"', '+', 'project', '+', '".csv"', ')', 'logger', '.', 'debug', '(', '"CSV file %s generation in progress"', ',', 'file_name', ')', 'm1', '=', 'metric1', '(', 'self', '.', 'es_url', ',', 'self', '.', 'get_metric_index', '(', 'metric1', ')', ',', 'esfilters', '=', 'esfilters', ',', 'start', '=', 'self', '.', 'end_prev_month', ',', 'end', '=', 'self', '.', 'end', ')', 'top', '=', 'm1', '.', 'get_list', '(', ')', 'csv', '=', 'csv_labels', '+', "'\\n'", 'for', 'i', 'in', 'range', '(', '0', ',', 'len', '(', 'top', '[', "'value'", ']', ')', ')', ':', 'if', 'i', '>', 'self', '.', 'TOP_MAX', ':', 'break', 'csv', '+=', 'top', '[', 'metric1', '.', 'FIELD_NAME', ']', '[', 'i', ']', '+', '","', '+', 'self', '.', 'str_val', '(', 'top', '[', "'value'", ']', '[', 'i', ']', ')', 'csv', '+=', '"\\n"', 'with', 'open', '(', 'file_name', ',', '"w"', ')', 'as', 'f', ':', 'f', '.', 'write', '(', 'csv', ')', 'logger', '.', 'debug', '(', '"CSV file %s was generated"', ',', 'file_name', ')', 'logger', '.', 'info', '(', '"Community data for: %s"', ',', 'project', ')', 'author', '=', 'self', '.', 'config', '[', "'project_community'", ']', '[', "'author_metrics'", ']', '[', '0', ']', 'csv_labels', '=', "'labels,'", '+', 'author', '.', 'id', 'file_label', '=', 'author', '.', 'ds', '.', 'name', '+', '"_"', '+', 'author', '.', 'id', 'title_label', '=', 'author', '.', 'name', '+', '" per "', '+', 'self', '.', 'interval', 'self', '.', '__create_csv_eps', '(', 'author', ',', 'None', ',', 'csv_labels', ',', 'file_label', ',', 'title_label', ',', 'project', ')', '"""\n Main developers\n\n """', 'metric', '=', 'self', '.', 'config', '[', "'project_community'", ']', '[', "'people_top_metrics'", ']', '[', '0', ']', '# TODO: Commits must be extracted from metric', 'csv_labels', '=', 'author', '.', 'id', '+', '",commits"', 'file_label', '=', 'author', '.', 'ds', '.', 'name', '+', '"_top_"', '+', 'author', '.', 'id', 'create_csv', '(', 'metric', ',', 'csv_labels', ',', 'file_label', ')', '"""\n Main organizations\n\n """', 'orgs', '=', 'self', '.', 'config', '[', "'project_community'", ']', '[', "'orgs_top_metrics'", ']', '[', '0', ']', '# TODO: Commits must be extracted from metric', 'csv_labels', '=', 'orgs', '.', 'id', '+', '",commits"', 'file_label', '=', 'orgs', '.', 'ds', '.', 'name', '+', '"_top_"', '+', 'orgs', '.', 'id', 'create_csv', '(', 'orgs', ',', 'csv_labels', ',', 'file_label', ')'] | Generate the data for the Communication section in a Project report
:return: | ['Generate', 'the', 'data', 'for', 'the', 'Communication', 'section', 'in', 'a', 'Project', 'report', ':', 'return', ':'] | train | https://github.com/chaoss/grimoirelab-manuscripts/blob/94a3ad4f11bfbcd6c5190e01cb5d3e47a5187cd9/manuscripts/report.py#L556-L616 |
7,569 | markovmodel/PyEMMA | pyemma/datasets/potentials.py | AsymmetricDoubleWell.sample | def sample(self, x0, nsteps, nskip=1):
r"""generate nsteps sample points"""
x = np.zeros(shape=(nsteps + 1,))
x[0] = x0
for t in range(nsteps):
q = x[t]
for s in range(nskip):
q = self.step(q)
x[t + 1] = q
return x | python | def sample(self, x0, nsteps, nskip=1):
r"""generate nsteps sample points"""
x = np.zeros(shape=(nsteps + 1,))
x[0] = x0
for t in range(nsteps):
q = x[t]
for s in range(nskip):
q = self.step(q)
x[t + 1] = q
return x | ['def', 'sample', '(', 'self', ',', 'x0', ',', 'nsteps', ',', 'nskip', '=', '1', ')', ':', 'x', '=', 'np', '.', 'zeros', '(', 'shape', '=', '(', 'nsteps', '+', '1', ',', ')', ')', 'x', '[', '0', ']', '=', 'x0', 'for', 't', 'in', 'range', '(', 'nsteps', ')', ':', 'q', '=', 'x', '[', 't', ']', 'for', 's', 'in', 'range', '(', 'nskip', ')', ':', 'q', '=', 'self', '.', 'step', '(', 'q', ')', 'x', '[', 't', '+', '1', ']', '=', 'q', 'return', 'x'] | r"""generate nsteps sample points | ['r', 'generate', 'nsteps', 'sample', 'points'] | train | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/datasets/potentials.py#L111-L120 |
7,570 | santoshphilip/eppy | eppy/loops.py | extractfields | def extractfields(data, commdct, objkey, fieldlists):
"""get all the objects of objkey.
fieldlists will have a fieldlist for each of those objects.
return the contents of those fields"""
# TODO : this assumes that the field list identical for
# each instance of the object. This is not true.
# So we should have a field list for each instance of the object
# and map them with a zip
objindex = data.dtls.index(objkey)
objcomm = commdct[objindex]
objfields = []
# get the field names of that object
for dct in objcomm[0:]:
try:
thefieldcomms = dct['field']
objfields.append(thefieldcomms[0])
except KeyError as err:
objfields.append(None)
fieldindexes = []
for fieldlist in fieldlists:
fieldindex = []
for item in fieldlist:
if isinstance(item, int):
fieldindex.append(item)
else:
fieldindex.append(objfields.index(item) + 0)
# the index starts at 1, not at 0
fieldindexes.append(fieldindex)
theobjects = data.dt[objkey]
fieldcontents = []
for theobject, fieldindex in zip(theobjects, fieldindexes):
innerlst = []
for item in fieldindex:
try:
innerlst.append(theobject[item])
except IndexError as err:
break
fieldcontents.append(innerlst)
# fieldcontents.append([theobject[item] for item in fieldindex])
return fieldcontents | python | def extractfields(data, commdct, objkey, fieldlists):
"""get all the objects of objkey.
fieldlists will have a fieldlist for each of those objects.
return the contents of those fields"""
# TODO : this assumes that the field list identical for
# each instance of the object. This is not true.
# So we should have a field list for each instance of the object
# and map them with a zip
objindex = data.dtls.index(objkey)
objcomm = commdct[objindex]
objfields = []
# get the field names of that object
for dct in objcomm[0:]:
try:
thefieldcomms = dct['field']
objfields.append(thefieldcomms[0])
except KeyError as err:
objfields.append(None)
fieldindexes = []
for fieldlist in fieldlists:
fieldindex = []
for item in fieldlist:
if isinstance(item, int):
fieldindex.append(item)
else:
fieldindex.append(objfields.index(item) + 0)
# the index starts at 1, not at 0
fieldindexes.append(fieldindex)
theobjects = data.dt[objkey]
fieldcontents = []
for theobject, fieldindex in zip(theobjects, fieldindexes):
innerlst = []
for item in fieldindex:
try:
innerlst.append(theobject[item])
except IndexError as err:
break
fieldcontents.append(innerlst)
# fieldcontents.append([theobject[item] for item in fieldindex])
return fieldcontents | ['def', 'extractfields', '(', 'data', ',', 'commdct', ',', 'objkey', ',', 'fieldlists', ')', ':', '# TODO : this assumes that the field list identical for', '# each instance of the object. This is not true.', '# So we should have a field list for each instance of the object', '# and map them with a zip', 'objindex', '=', 'data', '.', 'dtls', '.', 'index', '(', 'objkey', ')', 'objcomm', '=', 'commdct', '[', 'objindex', ']', 'objfields', '=', '[', ']', '# get the field names of that object', 'for', 'dct', 'in', 'objcomm', '[', '0', ':', ']', ':', 'try', ':', 'thefieldcomms', '=', 'dct', '[', "'field'", ']', 'objfields', '.', 'append', '(', 'thefieldcomms', '[', '0', ']', ')', 'except', 'KeyError', 'as', 'err', ':', 'objfields', '.', 'append', '(', 'None', ')', 'fieldindexes', '=', '[', ']', 'for', 'fieldlist', 'in', 'fieldlists', ':', 'fieldindex', '=', '[', ']', 'for', 'item', 'in', 'fieldlist', ':', 'if', 'isinstance', '(', 'item', ',', 'int', ')', ':', 'fieldindex', '.', 'append', '(', 'item', ')', 'else', ':', 'fieldindex', '.', 'append', '(', 'objfields', '.', 'index', '(', 'item', ')', '+', '0', ')', '# the index starts at 1, not at 0', 'fieldindexes', '.', 'append', '(', 'fieldindex', ')', 'theobjects', '=', 'data', '.', 'dt', '[', 'objkey', ']', 'fieldcontents', '=', '[', ']', 'for', 'theobject', ',', 'fieldindex', 'in', 'zip', '(', 'theobjects', ',', 'fieldindexes', ')', ':', 'innerlst', '=', '[', ']', 'for', 'item', 'in', 'fieldindex', ':', 'try', ':', 'innerlst', '.', 'append', '(', 'theobject', '[', 'item', ']', ')', 'except', 'IndexError', 'as', 'err', ':', 'break', 'fieldcontents', '.', 'append', '(', 'innerlst', ')', '# fieldcontents.append([theobject[item] for item in fieldindex])', 'return', 'fieldcontents'] | get all the objects of objkey.
fieldlists will have a fieldlist for each of those objects.
return the contents of those fields | ['get', 'all', 'the', 'objects', 'of', 'objkey', '.', 'fieldlists', 'will', 'have', 'a', 'fieldlist', 'for', 'each', 'of', 'those', 'objects', '.', 'return', 'the', 'contents', 'of', 'those', 'fields'] | train | https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/loops.py#L21-L60 |
7,571 | koszullab/instaGRAAL | instagraal/leastsqbound.py | external2internal | def external2internal(xe, bounds):
""" Convert a series of external variables to internal variables"""
xi = np.empty_like(xe)
for i, (v, bound) in enumerate(zip(xe, bounds)):
a = bound[0] # minimum
b = bound[1] # maximum
if a == None and b == None: # No constraints
xi[i] = v
elif b == None: # only min
xi[i] = np.sqrt((v - a + 1.) ** 2. - 1)
elif a == None: # only max
xi[i] = np.sqrt((b - v + 1.) ** 2. - 1)
else: # both min and max
xi[i] = np.arcsin((2. * (v - a) / (b - a)) - 1.)
return xi | python | def external2internal(xe, bounds):
""" Convert a series of external variables to internal variables"""
xi = np.empty_like(xe)
for i, (v, bound) in enumerate(zip(xe, bounds)):
a = bound[0] # minimum
b = bound[1] # maximum
if a == None and b == None: # No constraints
xi[i] = v
elif b == None: # only min
xi[i] = np.sqrt((v - a + 1.) ** 2. - 1)
elif a == None: # only max
xi[i] = np.sqrt((b - v + 1.) ** 2. - 1)
else: # both min and max
xi[i] = np.arcsin((2. * (v - a) / (b - a)) - 1.)
return xi | ['def', 'external2internal', '(', 'xe', ',', 'bounds', ')', ':', 'xi', '=', 'np', '.', 'empty_like', '(', 'xe', ')', 'for', 'i', ',', '(', 'v', ',', 'bound', ')', 'in', 'enumerate', '(', 'zip', '(', 'xe', ',', 'bounds', ')', ')', ':', 'a', '=', 'bound', '[', '0', ']', '# minimum', 'b', '=', 'bound', '[', '1', ']', '# maximum', 'if', 'a', '==', 'None', 'and', 'b', '==', 'None', ':', '# No constraints', 'xi', '[', 'i', ']', '=', 'v', 'elif', 'b', '==', 'None', ':', '# only min', 'xi', '[', 'i', ']', '=', 'np', '.', 'sqrt', '(', '(', 'v', '-', 'a', '+', '1.', ')', '**', '2.', '-', '1', ')', 'elif', 'a', '==', 'None', ':', '# only max', 'xi', '[', 'i', ']', '=', 'np', '.', 'sqrt', '(', '(', 'b', '-', 'v', '+', '1.', ')', '**', '2.', '-', '1', ')', 'else', ':', '# both min and max', 'xi', '[', 'i', ']', '=', 'np', '.', 'arcsin', '(', '(', '2.', '*', '(', 'v', '-', 'a', ')', '/', '(', 'b', '-', 'a', ')', ')', '-', '1.', ')', 'return', 'xi'] | Convert a series of external variables to internal variables | ['Convert', 'a', 'series', 'of', 'external', 'variables', 'to', 'internal', 'variables'] | train | https://github.com/koszullab/instaGRAAL/blob/1c02ca838e57d8178eec79f223644b2acd0153dd/instagraal/leastsqbound.py#L81-L103 |
7,572 | CRS-support/ftw | ftw/http.py | HttpResponse.parse_content_encoding | def parse_content_encoding(self, response_headers, response_data):
"""
Parses a response that contains Content-Encoding to retrieve
response_data
"""
if response_headers['content-encoding'] == 'gzip':
buf = StringIO.StringIO(response_data)
zipbuf = gzip.GzipFile(fileobj=buf)
response_data = zipbuf.read()
elif response_headers['content-encoding'] == 'deflate':
data = StringIO.StringIO(zlib.decompress(response_data))
response_data = data.read()
else:
raise errors.TestError(
'Received unknown Content-Encoding',
{
'content-encoding':
str(response_headers['content-encoding']),
'function': 'http.HttpResponse.parse_content_encoding'
})
return response_data | python | def parse_content_encoding(self, response_headers, response_data):
"""
Parses a response that contains Content-Encoding to retrieve
response_data
"""
if response_headers['content-encoding'] == 'gzip':
buf = StringIO.StringIO(response_data)
zipbuf = gzip.GzipFile(fileobj=buf)
response_data = zipbuf.read()
elif response_headers['content-encoding'] == 'deflate':
data = StringIO.StringIO(zlib.decompress(response_data))
response_data = data.read()
else:
raise errors.TestError(
'Received unknown Content-Encoding',
{
'content-encoding':
str(response_headers['content-encoding']),
'function': 'http.HttpResponse.parse_content_encoding'
})
return response_data | ['def', 'parse_content_encoding', '(', 'self', ',', 'response_headers', ',', 'response_data', ')', ':', 'if', 'response_headers', '[', "'content-encoding'", ']', '==', "'gzip'", ':', 'buf', '=', 'StringIO', '.', 'StringIO', '(', 'response_data', ')', 'zipbuf', '=', 'gzip', '.', 'GzipFile', '(', 'fileobj', '=', 'buf', ')', 'response_data', '=', 'zipbuf', '.', 'read', '(', ')', 'elif', 'response_headers', '[', "'content-encoding'", ']', '==', "'deflate'", ':', 'data', '=', 'StringIO', '.', 'StringIO', '(', 'zlib', '.', 'decompress', '(', 'response_data', ')', ')', 'response_data', '=', 'data', '.', 'read', '(', ')', 'else', ':', 'raise', 'errors', '.', 'TestError', '(', "'Received unknown Content-Encoding'", ',', '{', "'content-encoding'", ':', 'str', '(', 'response_headers', '[', "'content-encoding'", ']', ')', ',', "'function'", ':', "'http.HttpResponse.parse_content_encoding'", '}', ')', 'return', 'response_data'] | Parses a response that contains Content-Encoding to retrieve
response_data | ['Parses', 'a', 'response', 'that', 'contains', 'Content', '-', 'Encoding', 'to', 'retrieve', 'response_data'] | train | https://github.com/CRS-support/ftw/blob/1bbfd9b702e7e65532c1fd52bc82960556cefae5/ftw/http.py#L41-L61 |
7,573 | projectatomic/atomic-reactor | atomic_reactor/plugins/pre_pull_base_image.py | PullBaseImagePlugin._pull_and_tag_image | def _pull_and_tag_image(self, image, build_json, nonce):
"""Docker pull the image and tag it uniquely for use by this build"""
image = image.copy()
first_library_exc = None
for _ in range(20):
# retry until pull and tag is successful or definitively fails.
# should never require 20 retries but there's a race condition at work.
# just in case something goes wildly wrong, limit to 20 so it terminates.
try:
self.tasker.pull_image(image, insecure=self.parent_registry_insecure,
dockercfg_path=self.parent_registry_dockercfg_path)
self.workflow.pulled_base_images.add(image.to_str())
except RetryGeneratorException as exc:
# getting here means the pull itself failed. we may want to retry if the
# image being pulled lacks a namespace, like e.g. "rhel7". we cannot count
# on the registry mapping this into the docker standard "library/rhel7" so
# need to retry with that.
if first_library_exc:
# we already tried and failed; report the first failure.
raise first_library_exc
if image.namespace:
# already namespaced, do not retry with "library/", just fail.
raise
self.log.info("'%s' not found", image.to_str())
image.namespace = 'library'
self.log.info("trying '%s'", image.to_str())
first_library_exc = exc # report first failure if retry also fails
continue
# Attempt to tag it using a unique ID. We might have to retry
# if another build with the same parent image is finishing up
# and removing images it pulled.
# Use the OpenShift build name as the unique ID
unique_id = build_json['metadata']['name']
new_image = ImageName(repo=unique_id, tag=nonce)
try:
self.log.info("tagging pulled image")
response = self.tasker.tag_image(image, new_image)
self.workflow.pulled_base_images.add(response)
self.log.debug("image '%s' is available as '%s'", image, new_image)
return new_image
except docker.errors.NotFound:
# If we get here, some other build raced us to remove
# the parent image, and that build won.
# Retry the pull immediately.
self.log.info("re-pulling removed image")
continue
# Failed to tag it after 20 tries
self.log.error("giving up trying to pull image")
raise RuntimeError("too many attempts to pull and tag image") | python | def _pull_and_tag_image(self, image, build_json, nonce):
"""Docker pull the image and tag it uniquely for use by this build"""
image = image.copy()
first_library_exc = None
for _ in range(20):
# retry until pull and tag is successful or definitively fails.
# should never require 20 retries but there's a race condition at work.
# just in case something goes wildly wrong, limit to 20 so it terminates.
try:
self.tasker.pull_image(image, insecure=self.parent_registry_insecure,
dockercfg_path=self.parent_registry_dockercfg_path)
self.workflow.pulled_base_images.add(image.to_str())
except RetryGeneratorException as exc:
# getting here means the pull itself failed. we may want to retry if the
# image being pulled lacks a namespace, like e.g. "rhel7". we cannot count
# on the registry mapping this into the docker standard "library/rhel7" so
# need to retry with that.
if first_library_exc:
# we already tried and failed; report the first failure.
raise first_library_exc
if image.namespace:
# already namespaced, do not retry with "library/", just fail.
raise
self.log.info("'%s' not found", image.to_str())
image.namespace = 'library'
self.log.info("trying '%s'", image.to_str())
first_library_exc = exc # report first failure if retry also fails
continue
# Attempt to tag it using a unique ID. We might have to retry
# if another build with the same parent image is finishing up
# and removing images it pulled.
# Use the OpenShift build name as the unique ID
unique_id = build_json['metadata']['name']
new_image = ImageName(repo=unique_id, tag=nonce)
try:
self.log.info("tagging pulled image")
response = self.tasker.tag_image(image, new_image)
self.workflow.pulled_base_images.add(response)
self.log.debug("image '%s' is available as '%s'", image, new_image)
return new_image
except docker.errors.NotFound:
# If we get here, some other build raced us to remove
# the parent image, and that build won.
# Retry the pull immediately.
self.log.info("re-pulling removed image")
continue
# Failed to tag it after 20 tries
self.log.error("giving up trying to pull image")
raise RuntimeError("too many attempts to pull and tag image") | ['def', '_pull_and_tag_image', '(', 'self', ',', 'image', ',', 'build_json', ',', 'nonce', ')', ':', 'image', '=', 'image', '.', 'copy', '(', ')', 'first_library_exc', '=', 'None', 'for', '_', 'in', 'range', '(', '20', ')', ':', '# retry until pull and tag is successful or definitively fails.', "# should never require 20 retries but there's a race condition at work.", '# just in case something goes wildly wrong, limit to 20 so it terminates.', 'try', ':', 'self', '.', 'tasker', '.', 'pull_image', '(', 'image', ',', 'insecure', '=', 'self', '.', 'parent_registry_insecure', ',', 'dockercfg_path', '=', 'self', '.', 'parent_registry_dockercfg_path', ')', 'self', '.', 'workflow', '.', 'pulled_base_images', '.', 'add', '(', 'image', '.', 'to_str', '(', ')', ')', 'except', 'RetryGeneratorException', 'as', 'exc', ':', '# getting here means the pull itself failed. we may want to retry if the', '# image being pulled lacks a namespace, like e.g. "rhel7". we cannot count', '# on the registry mapping this into the docker standard "library/rhel7" so', '# need to retry with that.', 'if', 'first_library_exc', ':', '# we already tried and failed; report the first failure.', 'raise', 'first_library_exc', 'if', 'image', '.', 'namespace', ':', '# already namespaced, do not retry with "library/", just fail.', 'raise', 'self', '.', 'log', '.', 'info', '(', '"\'%s\' not found"', ',', 'image', '.', 'to_str', '(', ')', ')', 'image', '.', 'namespace', '=', "'library'", 'self', '.', 'log', '.', 'info', '(', '"trying \'%s\'"', ',', 'image', '.', 'to_str', '(', ')', ')', 'first_library_exc', '=', 'exc', '# report first failure if retry also fails', 'continue', '# Attempt to tag it using a unique ID. We might have to retry', '# if another build with the same parent image is finishing up', '# and removing images it pulled.', '# Use the OpenShift build name as the unique ID', 'unique_id', '=', 'build_json', '[', "'metadata'", ']', '[', "'name'", ']', 'new_image', '=', 'ImageName', '(', 'repo', '=', 'unique_id', ',', 'tag', '=', 'nonce', ')', 'try', ':', 'self', '.', 'log', '.', 'info', '(', '"tagging pulled image"', ')', 'response', '=', 'self', '.', 'tasker', '.', 'tag_image', '(', 'image', ',', 'new_image', ')', 'self', '.', 'workflow', '.', 'pulled_base_images', '.', 'add', '(', 'response', ')', 'self', '.', 'log', '.', 'debug', '(', '"image \'%s\' is available as \'%s\'"', ',', 'image', ',', 'new_image', ')', 'return', 'new_image', 'except', 'docker', '.', 'errors', '.', 'NotFound', ':', '# If we get here, some other build raced us to remove', '# the parent image, and that build won.', '# Retry the pull immediately.', 'self', '.', 'log', '.', 'info', '(', '"re-pulling removed image"', ')', 'continue', '# Failed to tag it after 20 tries', 'self', '.', 'log', '.', 'error', '(', '"giving up trying to pull image"', ')', 'raise', 'RuntimeError', '(', '"too many attempts to pull and tag image"', ')'] | Docker pull the image and tag it uniquely for use by this build | ['Docker', 'pull', 'the', 'image', 'and', 'tag', 'it', 'uniquely', 'for', 'use', 'by', 'this', 'build'] | train | https://github.com/projectatomic/atomic-reactor/blob/fd31c01b964097210bf169960d051e5f04019a80/atomic_reactor/plugins/pre_pull_base_image.py#L250-L303 |
7,574 | TurboGears/gearbox | gearbox/commandmanager.py | CommandManager.find_command | def find_command(self, argv):
"""Given an argument list, find a command and
return the processor and any remaining arguments.
"""
search_args = argv[:]
name = ''
while search_args:
if search_args[0].startswith('-'):
name = '%s %s' % (name, search_args[0])
raise ValueError('Invalid command %r' % name)
next_val = search_args.pop(0)
name = '%s %s' % (name, next_val) if name else next_val
if name in self.commands:
cmd_ep = self.commands[name]
if hasattr(cmd_ep, 'resolve'):
cmd_factory = cmd_ep.resolve()
else:
# NOTE(dhellmann): Some fake classes don't take
# require as an argument. Yay?
arg_spec = inspect.getargspec(cmd_ep.load)
if 'require' in arg_spec[0]:
cmd_factory = cmd_ep.load(require=False)
else:
cmd_factory = cmd_ep.load()
return (cmd_factory, name, search_args)
else:
raise ValueError('Unknown command %r' % next(iter(argv), '')) | python | def find_command(self, argv):
"""Given an argument list, find a command and
return the processor and any remaining arguments.
"""
search_args = argv[:]
name = ''
while search_args:
if search_args[0].startswith('-'):
name = '%s %s' % (name, search_args[0])
raise ValueError('Invalid command %r' % name)
next_val = search_args.pop(0)
name = '%s %s' % (name, next_val) if name else next_val
if name in self.commands:
cmd_ep = self.commands[name]
if hasattr(cmd_ep, 'resolve'):
cmd_factory = cmd_ep.resolve()
else:
# NOTE(dhellmann): Some fake classes don't take
# require as an argument. Yay?
arg_spec = inspect.getargspec(cmd_ep.load)
if 'require' in arg_spec[0]:
cmd_factory = cmd_ep.load(require=False)
else:
cmd_factory = cmd_ep.load()
return (cmd_factory, name, search_args)
else:
raise ValueError('Unknown command %r' % next(iter(argv), '')) | ['def', 'find_command', '(', 'self', ',', 'argv', ')', ':', 'search_args', '=', 'argv', '[', ':', ']', 'name', '=', "''", 'while', 'search_args', ':', 'if', 'search_args', '[', '0', ']', '.', 'startswith', '(', "'-'", ')', ':', 'name', '=', "'%s %s'", '%', '(', 'name', ',', 'search_args', '[', '0', ']', ')', 'raise', 'ValueError', '(', "'Invalid command %r'", '%', 'name', ')', 'next_val', '=', 'search_args', '.', 'pop', '(', '0', ')', 'name', '=', "'%s %s'", '%', '(', 'name', ',', 'next_val', ')', 'if', 'name', 'else', 'next_val', 'if', 'name', 'in', 'self', '.', 'commands', ':', 'cmd_ep', '=', 'self', '.', 'commands', '[', 'name', ']', 'if', 'hasattr', '(', 'cmd_ep', ',', "'resolve'", ')', ':', 'cmd_factory', '=', 'cmd_ep', '.', 'resolve', '(', ')', 'else', ':', "# NOTE(dhellmann): Some fake classes don't take", '# require as an argument. Yay?', 'arg_spec', '=', 'inspect', '.', 'getargspec', '(', 'cmd_ep', '.', 'load', ')', 'if', "'require'", 'in', 'arg_spec', '[', '0', ']', ':', 'cmd_factory', '=', 'cmd_ep', '.', 'load', '(', 'require', '=', 'False', ')', 'else', ':', 'cmd_factory', '=', 'cmd_ep', '.', 'load', '(', ')', 'return', '(', 'cmd_factory', ',', 'name', ',', 'search_args', ')', 'else', ':', 'raise', 'ValueError', '(', "'Unknown command %r'", '%', 'next', '(', 'iter', '(', 'argv', ')', ',', "''", ')', ')'] | Given an argument list, find a command and
return the processor and any remaining arguments. | ['Given', 'an', 'argument', 'list', 'find', 'a', 'command', 'and', 'return', 'the', 'processor', 'and', 'any', 'remaining', 'arguments', '.'] | train | https://github.com/TurboGears/gearbox/blob/df496ab28050ce6a4cc4c502488f5c5812f2baff/gearbox/commandmanager.py#L63-L89 |
7,575 | gem/oq-engine | openquake/risklib/asset.py | TagCollection.get_tag | def get_tag(self, tagname, tagidx):
"""
:returns: the tag associated to the given tagname and tag index
"""
return '%s=%s' % (tagname, decode(getattr(self, tagname)[tagidx])) | python | def get_tag(self, tagname, tagidx):
"""
:returns: the tag associated to the given tagname and tag index
"""
return '%s=%s' % (tagname, decode(getattr(self, tagname)[tagidx])) | ['def', 'get_tag', '(', 'self', ',', 'tagname', ',', 'tagidx', ')', ':', 'return', "'%s=%s'", '%', '(', 'tagname', ',', 'decode', '(', 'getattr', '(', 'self', ',', 'tagname', ')', '[', 'tagidx', ']', ')', ')'] | :returns: the tag associated to the given tagname and tag index | [':', 'returns', ':', 'the', 'tag', 'associated', 'to', 'the', 'given', 'tagname', 'and', 'tag', 'index'] | train | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/risklib/asset.py#L332-L336 |
7,576 | QUANTAXIS/QUANTAXIS | QUANTAXIS/QAFetch/QAQuery_Advance.py | QA_fetch_future_min_adv | def QA_fetch_future_min_adv(
code,
start, end=None,
frequence='1min',
if_drop_index=True,
collections=DATABASE.future_min):
'''
'获取股票分钟线'
:param code:
:param start:
:param end:
:param frequence:
:param if_drop_index:
:param collections:
:return:
'''
if frequence in ['1min', '1m']:
frequence = '1min'
elif frequence in ['5min', '5m']:
frequence = '5min'
elif frequence in ['15min', '15m']:
frequence = '15min'
elif frequence in ['30min', '30m']:
frequence = '30min'
elif frequence in ['60min', '60m']:
frequence = '60min'
# __data = [] 没有使用
end = start if end is None else end
if len(start) == 10:
start = '{} 00:00:00'.format(start)
if len(end) == 10:
end = '{} 15:00:00'.format(end)
# 🛠 todo 报告错误 如果开始时间 在 结束时间之后
# if start == end:
# 🛠 todo 如果相等,根据 frequence 获取开始时间的 时间段 QA_fetch_index_min_adv, 不支持start end是相等的
#print("QA Error QA_fetch_index_min_adv parameter code=%s , start=%s, end=%s is equal, should have time span! " % (code, start, end))
# return None
res = QA_fetch_future_min(
code, start, end, format='pd', frequence=frequence)
if res is None:
print("QA Error QA_fetch_future_min_adv parameter code=%s start=%s end=%s frequence=%s call QA_fetch_future_min return None" % (
code, start, end, frequence))
else:
res_reset_index = res.set_index(
['datetime', 'code'], drop=if_drop_index)
# if res_reset_index is None:
# print("QA Error QA_fetch_index_min_adv set index 'date, code' return None")
return QA_DataStruct_Future_min(res_reset_index) | python | def QA_fetch_future_min_adv(
code,
start, end=None,
frequence='1min',
if_drop_index=True,
collections=DATABASE.future_min):
'''
'获取股票分钟线'
:param code:
:param start:
:param end:
:param frequence:
:param if_drop_index:
:param collections:
:return:
'''
if frequence in ['1min', '1m']:
frequence = '1min'
elif frequence in ['5min', '5m']:
frequence = '5min'
elif frequence in ['15min', '15m']:
frequence = '15min'
elif frequence in ['30min', '30m']:
frequence = '30min'
elif frequence in ['60min', '60m']:
frequence = '60min'
# __data = [] 没有使用
end = start if end is None else end
if len(start) == 10:
start = '{} 00:00:00'.format(start)
if len(end) == 10:
end = '{} 15:00:00'.format(end)
# 🛠 todo 报告错误 如果开始时间 在 结束时间之后
# if start == end:
# 🛠 todo 如果相等,根据 frequence 获取开始时间的 时间段 QA_fetch_index_min_adv, 不支持start end是相等的
#print("QA Error QA_fetch_index_min_adv parameter code=%s , start=%s, end=%s is equal, should have time span! " % (code, start, end))
# return None
res = QA_fetch_future_min(
code, start, end, format='pd', frequence=frequence)
if res is None:
print("QA Error QA_fetch_future_min_adv parameter code=%s start=%s end=%s frequence=%s call QA_fetch_future_min return None" % (
code, start, end, frequence))
else:
res_reset_index = res.set_index(
['datetime', 'code'], drop=if_drop_index)
# if res_reset_index is None:
# print("QA Error QA_fetch_index_min_adv set index 'date, code' return None")
return QA_DataStruct_Future_min(res_reset_index) | ['def', 'QA_fetch_future_min_adv', '(', 'code', ',', 'start', ',', 'end', '=', 'None', ',', 'frequence', '=', "'1min'", ',', 'if_drop_index', '=', 'True', ',', 'collections', '=', 'DATABASE', '.', 'future_min', ')', ':', 'if', 'frequence', 'in', '[', "'1min'", ',', "'1m'", ']', ':', 'frequence', '=', "'1min'", 'elif', 'frequence', 'in', '[', "'5min'", ',', "'5m'", ']', ':', 'frequence', '=', "'5min'", 'elif', 'frequence', 'in', '[', "'15min'", ',', "'15m'", ']', ':', 'frequence', '=', "'15min'", 'elif', 'frequence', 'in', '[', "'30min'", ',', "'30m'", ']', ':', 'frequence', '=', "'30min'", 'elif', 'frequence', 'in', '[', "'60min'", ',', "'60m'", ']', ':', 'frequence', '=', "'60min'", '# __data = [] 没有使用', 'end', '=', 'start', 'if', 'end', 'is', 'None', 'else', 'end', 'if', 'len', '(', 'start', ')', '==', '10', ':', 'start', '=', "'{} 00:00:00'", '.', 'format', '(', 'start', ')', 'if', 'len', '(', 'end', ')', '==', '10', ':', 'end', '=', "'{} 15:00:00'", '.', 'format', '(', 'end', ')', '# 🛠 todo 报告错误 如果开始时间 在 结束时间之后', '# if start == end:', '# 🛠 todo 如果相等,根据 frequence 获取开始时间的 时间段 QA_fetch_index_min_adv, 不支持start end是相等的', '#print("QA Error QA_fetch_index_min_adv parameter code=%s , start=%s, end=%s is equal, should have time span! " % (code, start, end))', '# return None', 'res', '=', 'QA_fetch_future_min', '(', 'code', ',', 'start', ',', 'end', ',', 'format', '=', "'pd'", ',', 'frequence', '=', 'frequence', ')', 'if', 'res', 'is', 'None', ':', 'print', '(', '"QA Error QA_fetch_future_min_adv parameter code=%s start=%s end=%s frequence=%s call QA_fetch_future_min return None"', '%', '(', 'code', ',', 'start', ',', 'end', ',', 'frequence', ')', ')', 'else', ':', 'res_reset_index', '=', 'res', '.', 'set_index', '(', '[', "'datetime'", ',', "'code'", ']', ',', 'drop', '=', 'if_drop_index', ')', '# if res_reset_index is None:', '# print("QA Error QA_fetch_index_min_adv set index \'date, code\' return None")', 'return', 'QA_DataStruct_Future_min', '(', 'res_reset_index', ')'] | '获取股票分钟线'
:param code:
:param start:
:param end:
:param frequence:
:param if_drop_index:
:param collections:
:return: | ['获取股票分钟线', ':', 'param', 'code', ':', ':', 'param', 'start', ':', ':', 'param', 'end', ':', ':', 'param', 'frequence', ':', ':', 'param', 'if_drop_index', ':', ':', 'param', 'collections', ':', ':', 'return', ':'] | train | https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAFetch/QAQuery_Advance.py#L378-L430 |
7,577 | hatemile/hatemile-for-python | hatemile/implementation/css.py | AccessibleCSSImplementation._speak_as_literal_punctuation_inherit | def _speak_as_literal_punctuation_inherit(self, element):
"""
Speak the punctuation for elements and descendants.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
"""
self._reverse_speak_as(element, 'literal-punctuation')
self._reverse_speak_as(element, 'no-punctuation')
self._isolate_text_node(element)
self._visit(element, self._speak_as_literal_punctuation) | python | def _speak_as_literal_punctuation_inherit(self, element):
"""
Speak the punctuation for elements and descendants.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
"""
self._reverse_speak_as(element, 'literal-punctuation')
self._reverse_speak_as(element, 'no-punctuation')
self._isolate_text_node(element)
self._visit(element, self._speak_as_literal_punctuation) | ['def', '_speak_as_literal_punctuation_inherit', '(', 'self', ',', 'element', ')', ':', 'self', '.', '_reverse_speak_as', '(', 'element', ',', "'literal-punctuation'", ')', 'self', '.', '_reverse_speak_as', '(', 'element', ',', "'no-punctuation'", ')', 'self', '.', '_isolate_text_node', '(', 'element', ')', 'self', '.', '_visit', '(', 'element', ',', 'self', '.', '_speak_as_literal_punctuation', ')'] | Speak the punctuation for elements and descendants.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement | ['Speak', 'the', 'punctuation', 'for', 'elements', 'and', 'descendants', '.'] | train | https://github.com/hatemile/hatemile-for-python/blob/1e914f9aa09f6f8d78282af131311546ecba9fb8/hatemile/implementation/css.py#L816-L829 |
7,578 | googleapis/google-cloud-python | dataproc/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client.py | ClusterControllerClient.list_clusters | def list_clusters(
self,
project_id,
region,
filter_=None,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists all regions/{region}/clusters in a project.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # Iterate over all results
>>> for element in client.list_clusters(project_id, region):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_clusters(project_id, region).pages:
... for element in page:
... # process element
... pass
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
filter_ (str): Optional. A filter constraining the clusters to list. Filters are
case-sensitive and have the following syntax:
field = value [AND [field = value]] ...
where **field** is one of ``status.state``, ``clusterName``, or
``labels.[KEY]``, and ``[KEY]`` is a label key. **value** can be ``*``
to match all values. ``status.state`` can be one of the following:
``ACTIVE``, ``INACTIVE``, ``CREATING``, ``RUNNING``, ``ERROR``,
``DELETING``, or ``UPDATING``. ``ACTIVE`` contains the ``CREATING``,
``UPDATING``, and ``RUNNING`` states. ``INACTIVE`` contains the
``DELETING`` and ``ERROR`` states. ``clusterName`` is the name of the
cluster provided at creation time. Only the logical ``AND`` operator is
supported; space-separated items are treated as having an implicit
``AND`` operator.
Example filter:
status.state = ACTIVE AND clusterName = mycluster AND labels.env =
staging AND labels.starred = \*
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.dataproc_v1beta2.types.Cluster` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_clusters" not in self._inner_api_calls:
self._inner_api_calls[
"list_clusters"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_clusters,
default_retry=self._method_configs["ListClusters"].retry,
default_timeout=self._method_configs["ListClusters"].timeout,
client_info=self._client_info,
)
request = clusters_pb2.ListClustersRequest(
project_id=project_id, region=region, filter=filter_, page_size=page_size
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_clusters"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="clusters",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator | python | def list_clusters(
self,
project_id,
region,
filter_=None,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists all regions/{region}/clusters in a project.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # Iterate over all results
>>> for element in client.list_clusters(project_id, region):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_clusters(project_id, region).pages:
... for element in page:
... # process element
... pass
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
filter_ (str): Optional. A filter constraining the clusters to list. Filters are
case-sensitive and have the following syntax:
field = value [AND [field = value]] ...
where **field** is one of ``status.state``, ``clusterName``, or
``labels.[KEY]``, and ``[KEY]`` is a label key. **value** can be ``*``
to match all values. ``status.state`` can be one of the following:
``ACTIVE``, ``INACTIVE``, ``CREATING``, ``RUNNING``, ``ERROR``,
``DELETING``, or ``UPDATING``. ``ACTIVE`` contains the ``CREATING``,
``UPDATING``, and ``RUNNING`` states. ``INACTIVE`` contains the
``DELETING`` and ``ERROR`` states. ``clusterName`` is the name of the
cluster provided at creation time. Only the logical ``AND`` operator is
supported; space-separated items are treated as having an implicit
``AND`` operator.
Example filter:
status.state = ACTIVE AND clusterName = mycluster AND labels.env =
staging AND labels.starred = \*
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.dataproc_v1beta2.types.Cluster` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_clusters" not in self._inner_api_calls:
self._inner_api_calls[
"list_clusters"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_clusters,
default_retry=self._method_configs["ListClusters"].retry,
default_timeout=self._method_configs["ListClusters"].timeout,
client_info=self._client_info,
)
request = clusters_pb2.ListClustersRequest(
project_id=project_id, region=region, filter=filter_, page_size=page_size
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_clusters"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="clusters",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator | ['def', 'list_clusters', '(', 'self', ',', 'project_id', ',', 'region', ',', 'filter_', '=', 'None', ',', 'page_size', '=', 'None', ',', 'retry', '=', 'google', '.', 'api_core', '.', 'gapic_v1', '.', 'method', '.', 'DEFAULT', ',', 'timeout', '=', 'google', '.', 'api_core', '.', 'gapic_v1', '.', 'method', '.', 'DEFAULT', ',', 'metadata', '=', 'None', ',', ')', ':', '# Wrap the transport method to add retry and timeout logic.', 'if', '"list_clusters"', 'not', 'in', 'self', '.', '_inner_api_calls', ':', 'self', '.', '_inner_api_calls', '[', '"list_clusters"', ']', '=', 'google', '.', 'api_core', '.', 'gapic_v1', '.', 'method', '.', 'wrap_method', '(', 'self', '.', 'transport', '.', 'list_clusters', ',', 'default_retry', '=', 'self', '.', '_method_configs', '[', '"ListClusters"', ']', '.', 'retry', ',', 'default_timeout', '=', 'self', '.', '_method_configs', '[', '"ListClusters"', ']', '.', 'timeout', ',', 'client_info', '=', 'self', '.', '_client_info', ',', ')', 'request', '=', 'clusters_pb2', '.', 'ListClustersRequest', '(', 'project_id', '=', 'project_id', ',', 'region', '=', 'region', ',', 'filter', '=', 'filter_', ',', 'page_size', '=', 'page_size', ')', 'iterator', '=', 'google', '.', 'api_core', '.', 'page_iterator', '.', 'GRPCIterator', '(', 'client', '=', 'None', ',', 'method', '=', 'functools', '.', 'partial', '(', 'self', '.', '_inner_api_calls', '[', '"list_clusters"', ']', ',', 'retry', '=', 'retry', ',', 'timeout', '=', 'timeout', ',', 'metadata', '=', 'metadata', ',', ')', ',', 'request', '=', 'request', ',', 'items_field', '=', '"clusters"', ',', 'request_token_field', '=', '"page_token"', ',', 'response_token_field', '=', '"next_page_token"', ',', ')', 'return', 'iterator'] | Lists all regions/{region}/clusters in a project.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # Iterate over all results
>>> for element in client.list_clusters(project_id, region):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_clusters(project_id, region).pages:
... for element in page:
... # process element
... pass
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
filter_ (str): Optional. A filter constraining the clusters to list. Filters are
case-sensitive and have the following syntax:
field = value [AND [field = value]] ...
where **field** is one of ``status.state``, ``clusterName``, or
``labels.[KEY]``, and ``[KEY]`` is a label key. **value** can be ``*``
to match all values. ``status.state`` can be one of the following:
``ACTIVE``, ``INACTIVE``, ``CREATING``, ``RUNNING``, ``ERROR``,
``DELETING``, or ``UPDATING``. ``ACTIVE`` contains the ``CREATING``,
``UPDATING``, and ``RUNNING`` states. ``INACTIVE`` contains the
``DELETING`` and ``ERROR`` states. ``clusterName`` is the name of the
cluster provided at creation time. Only the logical ``AND`` operator is
supported; space-separated items are treated as having an implicit
``AND`` operator.
Example filter:
status.state = ACTIVE AND clusterName = mycluster AND labels.env =
staging AND labels.starred = \*
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.dataproc_v1beta2.types.Cluster` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. | ['Lists', 'all', 'regions', '/', '{', 'region', '}', '/', 'clusters', 'in', 'a', 'project', '.'] | train | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/dataproc/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client.py#L820-L936 |
7,579 | bsolomon1124/pyfinance | pyfinance/ols.py | RollingOLS._predicted | def _predicted(self):
"""The predicted values of y ('yhat')."""
return np.squeeze(
np.matmul(self.xwins, np.expand_dims(self.solution, axis=-1))
) | python | def _predicted(self):
"""The predicted values of y ('yhat')."""
return np.squeeze(
np.matmul(self.xwins, np.expand_dims(self.solution, axis=-1))
) | ['def', '_predicted', '(', 'self', ')', ':', 'return', 'np', '.', 'squeeze', '(', 'np', '.', 'matmul', '(', 'self', '.', 'xwins', ',', 'np', '.', 'expand_dims', '(', 'self', '.', 'solution', ',', 'axis', '=', '-', '1', ')', ')', ')'] | The predicted values of y ('yhat'). | ['The', 'predicted', 'values', 'of', 'y', '(', 'yhat', ')', '.'] | train | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/ols.py#L430-L434 |
7,580 | maxcountryman/flask-login | flask_login/login_manager.py | LoginManager.unauthorized | def unauthorized(self):
'''
This is called when the user is required to log in. If you register a
callback with :meth:`LoginManager.unauthorized_handler`, then it will
be called. Otherwise, it will take the following actions:
- Flash :attr:`LoginManager.login_message` to the user.
- If the app is using blueprints find the login view for
the current blueprint using `blueprint_login_views`. If the app
is not using blueprints or the login view for the current
blueprint is not specified use the value of `login_view`.
- Redirect the user to the login view. (The page they were
attempting to access will be passed in the ``next`` query
string variable, so you can redirect there if present instead
of the homepage. Alternatively, it will be added to the session
as ``next`` if USE_SESSION_FOR_NEXT is set.)
If :attr:`LoginManager.login_view` is not defined, then it will simply
raise a HTTP 401 (Unauthorized) error instead.
This should be returned from a view or before/after_request function,
otherwise the redirect will have no effect.
'''
user_unauthorized.send(current_app._get_current_object())
if self.unauthorized_callback:
return self.unauthorized_callback()
if request.blueprint in self.blueprint_login_views:
login_view = self.blueprint_login_views[request.blueprint]
else:
login_view = self.login_view
if not login_view:
abort(401)
if self.login_message:
if self.localize_callback is not None:
flash(self.localize_callback(self.login_message),
category=self.login_message_category)
else:
flash(self.login_message, category=self.login_message_category)
config = current_app.config
if config.get('USE_SESSION_FOR_NEXT', USE_SESSION_FOR_NEXT):
login_url = expand_login_view(login_view)
session['_id'] = self._session_identifier_generator()
session['next'] = make_next_param(login_url, request.url)
redirect_url = make_login_url(login_view)
else:
redirect_url = make_login_url(login_view, next_url=request.url)
return redirect(redirect_url) | python | def unauthorized(self):
'''
This is called when the user is required to log in. If you register a
callback with :meth:`LoginManager.unauthorized_handler`, then it will
be called. Otherwise, it will take the following actions:
- Flash :attr:`LoginManager.login_message` to the user.
- If the app is using blueprints find the login view for
the current blueprint using `blueprint_login_views`. If the app
is not using blueprints or the login view for the current
blueprint is not specified use the value of `login_view`.
- Redirect the user to the login view. (The page they were
attempting to access will be passed in the ``next`` query
string variable, so you can redirect there if present instead
of the homepage. Alternatively, it will be added to the session
as ``next`` if USE_SESSION_FOR_NEXT is set.)
If :attr:`LoginManager.login_view` is not defined, then it will simply
raise a HTTP 401 (Unauthorized) error instead.
This should be returned from a view or before/after_request function,
otherwise the redirect will have no effect.
'''
user_unauthorized.send(current_app._get_current_object())
if self.unauthorized_callback:
return self.unauthorized_callback()
if request.blueprint in self.blueprint_login_views:
login_view = self.blueprint_login_views[request.blueprint]
else:
login_view = self.login_view
if not login_view:
abort(401)
if self.login_message:
if self.localize_callback is not None:
flash(self.localize_callback(self.login_message),
category=self.login_message_category)
else:
flash(self.login_message, category=self.login_message_category)
config = current_app.config
if config.get('USE_SESSION_FOR_NEXT', USE_SESSION_FOR_NEXT):
login_url = expand_login_view(login_view)
session['_id'] = self._session_identifier_generator()
session['next'] = make_next_param(login_url, request.url)
redirect_url = make_login_url(login_view)
else:
redirect_url = make_login_url(login_view, next_url=request.url)
return redirect(redirect_url) | ['def', 'unauthorized', '(', 'self', ')', ':', 'user_unauthorized', '.', 'send', '(', 'current_app', '.', '_get_current_object', '(', ')', ')', 'if', 'self', '.', 'unauthorized_callback', ':', 'return', 'self', '.', 'unauthorized_callback', '(', ')', 'if', 'request', '.', 'blueprint', 'in', 'self', '.', 'blueprint_login_views', ':', 'login_view', '=', 'self', '.', 'blueprint_login_views', '[', 'request', '.', 'blueprint', ']', 'else', ':', 'login_view', '=', 'self', '.', 'login_view', 'if', 'not', 'login_view', ':', 'abort', '(', '401', ')', 'if', 'self', '.', 'login_message', ':', 'if', 'self', '.', 'localize_callback', 'is', 'not', 'None', ':', 'flash', '(', 'self', '.', 'localize_callback', '(', 'self', '.', 'login_message', ')', ',', 'category', '=', 'self', '.', 'login_message_category', ')', 'else', ':', 'flash', '(', 'self', '.', 'login_message', ',', 'category', '=', 'self', '.', 'login_message_category', ')', 'config', '=', 'current_app', '.', 'config', 'if', 'config', '.', 'get', '(', "'USE_SESSION_FOR_NEXT'", ',', 'USE_SESSION_FOR_NEXT', ')', ':', 'login_url', '=', 'expand_login_view', '(', 'login_view', ')', 'session', '[', "'_id'", ']', '=', 'self', '.', '_session_identifier_generator', '(', ')', 'session', '[', "'next'", ']', '=', 'make_next_param', '(', 'login_url', ',', 'request', '.', 'url', ')', 'redirect_url', '=', 'make_login_url', '(', 'login_view', ')', 'else', ':', 'redirect_url', '=', 'make_login_url', '(', 'login_view', ',', 'next_url', '=', 'request', '.', 'url', ')', 'return', 'redirect', '(', 'redirect_url', ')'] | This is called when the user is required to log in. If you register a
callback with :meth:`LoginManager.unauthorized_handler`, then it will
be called. Otherwise, it will take the following actions:
- Flash :attr:`LoginManager.login_message` to the user.
- If the app is using blueprints find the login view for
the current blueprint using `blueprint_login_views`. If the app
is not using blueprints or the login view for the current
blueprint is not specified use the value of `login_view`.
- Redirect the user to the login view. (The page they were
attempting to access will be passed in the ``next`` query
string variable, so you can redirect there if present instead
of the homepage. Alternatively, it will be added to the session
as ``next`` if USE_SESSION_FOR_NEXT is set.)
If :attr:`LoginManager.login_view` is not defined, then it will simply
raise a HTTP 401 (Unauthorized) error instead.
This should be returned from a view or before/after_request function,
otherwise the redirect will have no effect. | ['This', 'is', 'called', 'when', 'the', 'user', 'is', 'required', 'to', 'log', 'in', '.', 'If', 'you', 'register', 'a', 'callback', 'with', ':', 'meth', ':', 'LoginManager', '.', 'unauthorized_handler', 'then', 'it', 'will', 'be', 'called', '.', 'Otherwise', 'it', 'will', 'take', 'the', 'following', 'actions', ':'] | train | https://github.com/maxcountryman/flask-login/blob/d22f80d166ee260d44e0d2d9ea973b784ef3621b/flask_login/login_manager.py#L122-L176 |
7,581 | Alir3z4/django-databrowse | django_databrowse/sites.py | DatabrowseSite.unregister | def unregister(self, *model_list):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
for model in model_list:
if model not in self.registry:
raise NotRegistered('The model %s is not registered' %
model.__name__)
del self.registry[model] | python | def unregister(self, *model_list):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
for model in model_list:
if model not in self.registry:
raise NotRegistered('The model %s is not registered' %
model.__name__)
del self.registry[model] | ['def', 'unregister', '(', 'self', ',', '*', 'model_list', ')', ':', 'for', 'model', 'in', 'model_list', ':', 'if', 'model', 'not', 'in', 'self', '.', 'registry', ':', 'raise', 'NotRegistered', '(', "'The model %s is not registered'", '%', 'model', '.', '__name__', ')', 'del', 'self', '.', 'registry', '[', 'model', ']'] | Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered. | ['Unregisters', 'the', 'given', 'model', '(', 's', ')', '.'] | train | https://github.com/Alir3z4/django-databrowse/blob/4469495cd47a0da506ddf4e8cc752c2f453e0339/django_databrowse/sites.py#L138-L148 |
7,582 | houluy/chessboard | chessboard/__init__.py | Chessboard.set_pos | def set_pos(self, pos, check=False):
'''Set a chess'''
self.validate_pos(pos)
x, y = pos
user = self.get_player()
self.history[self._game_round] = copy.deepcopy(self.pos)
self.pos[x][y] = user
pos_str = self._cal_key(pos)
self._pos_dict[pos_str] = user
self._user_pos_dict[user].append(pos)
self._game_round += 1
if check:
winning = self.check_win_by_step(x, y, user)
return winning
else:
return (x, y) | python | def set_pos(self, pos, check=False):
'''Set a chess'''
self.validate_pos(pos)
x, y = pos
user = self.get_player()
self.history[self._game_round] = copy.deepcopy(self.pos)
self.pos[x][y] = user
pos_str = self._cal_key(pos)
self._pos_dict[pos_str] = user
self._user_pos_dict[user].append(pos)
self._game_round += 1
if check:
winning = self.check_win_by_step(x, y, user)
return winning
else:
return (x, y) | ['def', 'set_pos', '(', 'self', ',', 'pos', ',', 'check', '=', 'False', ')', ':', 'self', '.', 'validate_pos', '(', 'pos', ')', 'x', ',', 'y', '=', 'pos', 'user', '=', 'self', '.', 'get_player', '(', ')', 'self', '.', 'history', '[', 'self', '.', '_game_round', ']', '=', 'copy', '.', 'deepcopy', '(', 'self', '.', 'pos', ')', 'self', '.', 'pos', '[', 'x', ']', '[', 'y', ']', '=', 'user', 'pos_str', '=', 'self', '.', '_cal_key', '(', 'pos', ')', 'self', '.', '_pos_dict', '[', 'pos_str', ']', '=', 'user', 'self', '.', '_user_pos_dict', '[', 'user', ']', '.', 'append', '(', 'pos', ')', 'self', '.', '_game_round', '+=', '1', 'if', 'check', ':', 'winning', '=', 'self', '.', 'check_win_by_step', '(', 'x', ',', 'y', ',', 'user', ')', 'return', 'winning', 'else', ':', 'return', '(', 'x', ',', 'y', ')'] | Set a chess | ['Set', 'a', 'chess'] | train | https://github.com/houluy/chessboard/blob/b834819d93d71b492f27780a58dfbb3a107d7e85/chessboard/__init__.py#L235-L250 |
7,583 | agile4you/bottle-neck | bottle_neck/cbv.py | plugin_method | def plugin_method(*plugin_names):
"""Plugin Method decorator.
Signs a web handler function with the plugins to be applied as attributes.
Args:
plugin_names (list): A list of plugin callable names
Returns:
A wrapped handler callable.
Examples:
>>> @plugin_method('json', 'bill')
... def method():
... return "Hello!"
...
>>> print method.json
True
>>> print method.bill
True
"""
def wrapper(callable_obj):
for plugin_name in plugin_names:
if not hasattr(callable_obj, plugin_name):
setattr(callable_obj, plugin_name, True)
return callable_obj
return wrapper | python | def plugin_method(*plugin_names):
"""Plugin Method decorator.
Signs a web handler function with the plugins to be applied as attributes.
Args:
plugin_names (list): A list of plugin callable names
Returns:
A wrapped handler callable.
Examples:
>>> @plugin_method('json', 'bill')
... def method():
... return "Hello!"
...
>>> print method.json
True
>>> print method.bill
True
"""
def wrapper(callable_obj):
for plugin_name in plugin_names:
if not hasattr(callable_obj, plugin_name):
setattr(callable_obj, plugin_name, True)
return callable_obj
return wrapper | ['def', 'plugin_method', '(', '*', 'plugin_names', ')', ':', 'def', 'wrapper', '(', 'callable_obj', ')', ':', 'for', 'plugin_name', 'in', 'plugin_names', ':', 'if', 'not', 'hasattr', '(', 'callable_obj', ',', 'plugin_name', ')', ':', 'setattr', '(', 'callable_obj', ',', 'plugin_name', ',', 'True', ')', 'return', 'callable_obj', 'return', 'wrapper'] | Plugin Method decorator.
Signs a web handler function with the plugins to be applied as attributes.
Args:
plugin_names (list): A list of plugin callable names
Returns:
A wrapped handler callable.
Examples:
>>> @plugin_method('json', 'bill')
... def method():
... return "Hello!"
...
>>> print method.json
True
>>> print method.bill
True | ['Plugin', 'Method', 'decorator', '.', 'Signs', 'a', 'web', 'handler', 'function', 'with', 'the', 'plugins', 'to', 'be', 'applied', 'as', 'attributes', '.'] | train | https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/cbv.py#L112-L138 |
7,584 | sorgerlab/indra | rest_api/api.py | cwms_process_text | def cwms_process_text():
"""Process text with CWMS and return INDRA Statements."""
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
text = body.get('text')
cp = cwms.process_text(text)
return _stmts_from_proc(cp) | python | def cwms_process_text():
"""Process text with CWMS and return INDRA Statements."""
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
text = body.get('text')
cp = cwms.process_text(text)
return _stmts_from_proc(cp) | ['def', 'cwms_process_text', '(', ')', ':', 'if', 'request', '.', 'method', '==', "'OPTIONS'", ':', 'return', '{', '}', 'response', '=', 'request', '.', 'body', '.', 'read', '(', ')', '.', 'decode', '(', "'utf-8'", ')', 'body', '=', 'json', '.', 'loads', '(', 'response', ')', 'text', '=', 'body', '.', 'get', '(', "'text'", ')', 'cp', '=', 'cwms', '.', 'process_text', '(', 'text', ')', 'return', '_stmts_from_proc', '(', 'cp', ')'] | Process text with CWMS and return INDRA Statements. | ['Process', 'text', 'with', 'CWMS', 'and', 'return', 'INDRA', 'Statements', '.'] | train | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L246-L254 |
7,585 | lappis-unb/salic-ml | tasks.py | update_data | def update_data(ctx, models=True, pickles=False, f=False):
"""
Updates local django db projects and pickle files using salic database from
MinC
Pickles are saved in /data/raw/ from sql queries in /data/scripts/
Models are created from /data/scripts/models/
"""
if pickles:
save_sql_to_files(f)
if models:
if f:
manage(ctx, 'create_models_from_sql --force True', env={})
else:
manage(ctx, 'create_models_from_sql', env={}) | python | def update_data(ctx, models=True, pickles=False, f=False):
"""
Updates local django db projects and pickle files using salic database from
MinC
Pickles are saved in /data/raw/ from sql queries in /data/scripts/
Models are created from /data/scripts/models/
"""
if pickles:
save_sql_to_files(f)
if models:
if f:
manage(ctx, 'create_models_from_sql --force True', env={})
else:
manage(ctx, 'create_models_from_sql', env={}) | ['def', 'update_data', '(', 'ctx', ',', 'models', '=', 'True', ',', 'pickles', '=', 'False', ',', 'f', '=', 'False', ')', ':', 'if', 'pickles', ':', 'save_sql_to_files', '(', 'f', ')', 'if', 'models', ':', 'if', 'f', ':', 'manage', '(', 'ctx', ',', "'create_models_from_sql --force True'", ',', 'env', '=', '{', '}', ')', 'else', ':', 'manage', '(', 'ctx', ',', "'create_models_from_sql'", ',', 'env', '=', '{', '}', ')'] | Updates local django db projects and pickle files using salic database from
MinC
Pickles are saved in /data/raw/ from sql queries in /data/scripts/
Models are created from /data/scripts/models/ | ['Updates', 'local', 'django', 'db', 'projects', 'and', 'pickle', 'files', 'using', 'salic', 'database', 'from', 'MinC', 'Pickles', 'are', 'saved', 'in', '/', 'data', '/', 'raw', '/', 'from', 'sql', 'queries', 'in', '/', 'data', '/', 'scripts', '/', 'Models', 'are', 'created', 'from', '/', 'data', '/', 'scripts', '/', 'models', '/'] | train | https://github.com/lappis-unb/salic-ml/blob/1b3ebc4f8067740999897ccffd9892dc94482a93/tasks.py#L72-L85 |
7,586 | thomasdelaet/python-velbus | velbus/module.py | Module.on_message | def on_message(self, message):
"""
Process received message
"""
if message.address != self._address:
return
if isinstance(message, velbus.ChannelNamePart1Message) or isinstance(message, velbus.ChannelNamePart1Message2):
self._process_channel_name_message(1, message)
elif isinstance(message, velbus.ChannelNamePart2Message) or isinstance(message, velbus.ChannelNamePart2Message2):
self._process_channel_name_message(2, message)
elif isinstance(message, velbus.ChannelNamePart3Message) or isinstance(message, velbus.ChannelNamePart3Message2):
self._process_channel_name_message(3, message)
elif isinstance(message, velbus.ModuleTypeMessage):
self._process_module_type_message(message)
else:
self._on_message(message) | python | def on_message(self, message):
"""
Process received message
"""
if message.address != self._address:
return
if isinstance(message, velbus.ChannelNamePart1Message) or isinstance(message, velbus.ChannelNamePart1Message2):
self._process_channel_name_message(1, message)
elif isinstance(message, velbus.ChannelNamePart2Message) or isinstance(message, velbus.ChannelNamePart2Message2):
self._process_channel_name_message(2, message)
elif isinstance(message, velbus.ChannelNamePart3Message) or isinstance(message, velbus.ChannelNamePart3Message2):
self._process_channel_name_message(3, message)
elif isinstance(message, velbus.ModuleTypeMessage):
self._process_module_type_message(message)
else:
self._on_message(message) | ['def', 'on_message', '(', 'self', ',', 'message', ')', ':', 'if', 'message', '.', 'address', '!=', 'self', '.', '_address', ':', 'return', 'if', 'isinstance', '(', 'message', ',', 'velbus', '.', 'ChannelNamePart1Message', ')', 'or', 'isinstance', '(', 'message', ',', 'velbus', '.', 'ChannelNamePart1Message2', ')', ':', 'self', '.', '_process_channel_name_message', '(', '1', ',', 'message', ')', 'elif', 'isinstance', '(', 'message', ',', 'velbus', '.', 'ChannelNamePart2Message', ')', 'or', 'isinstance', '(', 'message', ',', 'velbus', '.', 'ChannelNamePart2Message2', ')', ':', 'self', '.', '_process_channel_name_message', '(', '2', ',', 'message', ')', 'elif', 'isinstance', '(', 'message', ',', 'velbus', '.', 'ChannelNamePart3Message', ')', 'or', 'isinstance', '(', 'message', ',', 'velbus', '.', 'ChannelNamePart3Message2', ')', ':', 'self', '.', '_process_channel_name_message', '(', '3', ',', 'message', ')', 'elif', 'isinstance', '(', 'message', ',', 'velbus', '.', 'ModuleTypeMessage', ')', ':', 'self', '.', '_process_module_type_message', '(', 'message', ')', 'else', ':', 'self', '.', '_on_message', '(', 'message', ')'] | Process received message | ['Process', 'received', 'message'] | train | https://github.com/thomasdelaet/python-velbus/blob/af2f8af43f1a24bf854eff9f3126fd7b5c41b3dd/velbus/module.py#L65-L80 |
7,587 | JensAstrup/pyOutlook | pyOutlook/core/message.py | Message.move_to | def move_to(self, folder):
"""Moves the email to the folder specified by the folder parameter.
Args:
folder: A string containing the folder ID the message should be moved to, or a Folder instance
"""
if isinstance(folder, Folder):
self.move_to(folder.id)
else:
self._move_to(folder) | python | def move_to(self, folder):
"""Moves the email to the folder specified by the folder parameter.
Args:
folder: A string containing the folder ID the message should be moved to, or a Folder instance
"""
if isinstance(folder, Folder):
self.move_to(folder.id)
else:
self._move_to(folder) | ['def', 'move_to', '(', 'self', ',', 'folder', ')', ':', 'if', 'isinstance', '(', 'folder', ',', 'Folder', ')', ':', 'self', '.', 'move_to', '(', 'folder', '.', 'id', ')', 'else', ':', 'self', '.', '_move_to', '(', 'folder', ')'] | Moves the email to the folder specified by the folder parameter.
Args:
folder: A string containing the folder ID the message should be moved to, or a Folder instance | ['Moves', 'the', 'email', 'to', 'the', 'folder', 'specified', 'by', 'the', 'folder', 'parameter', '.'] | train | https://github.com/JensAstrup/pyOutlook/blob/f4ca9d4a8629c0a41f78102ce84fab702a841167/pyOutlook/core/message.py#L397-L407 |
7,588 | rkhleics/wagtailmenus | wagtailmenus/models/menus.py | Menu.render_to_template | def render_to_template(self):
"""
Render the current menu instance to a template and return a string
"""
context_data = self.get_context_data()
template = self.get_template()
context_data['current_template'] = template.template.name
return template.render(context_data) | python | def render_to_template(self):
"""
Render the current menu instance to a template and return a string
"""
context_data = self.get_context_data()
template = self.get_template()
context_data['current_template'] = template.template.name
return template.render(context_data) | ['def', 'render_to_template', '(', 'self', ')', ':', 'context_data', '=', 'self', '.', 'get_context_data', '(', ')', 'template', '=', 'self', '.', 'get_template', '(', ')', 'context_data', '[', "'current_template'", ']', '=', 'template', '.', 'template', '.', 'name', 'return', 'template', '.', 'render', '(', 'context_data', ')'] | Render the current menu instance to a template and return a string | ['Render', 'the', 'current', 'menu', 'instance', 'to', 'a', 'template', 'and', 'return', 'a', 'string'] | train | https://github.com/rkhleics/wagtailmenus/blob/a41f240bed0d362e0d4dd4ef04a230f2b1827a93/wagtailmenus/models/menus.py#L222-L230 |
7,589 | linkedin/luminol | src/luminol/algorithms/anomaly_detector_algorithms/default_detector.py | DefaultDetector._set_scores | def _set_scores(self):
"""
Set anomaly scores using a weighted sum.
"""
anom_scores_ema = self.exp_avg_detector.run()
anom_scores_deri = self.derivative_detector.run()
anom_scores = {}
for timestamp in anom_scores_ema.timestamps:
# Compute a weighted anomaly score.
anom_scores[timestamp] = max(anom_scores_ema[timestamp],
anom_scores_ema[timestamp] * DEFAULT_DETECTOR_EMA_WEIGHT + anom_scores_deri[timestamp] * (1 - DEFAULT_DETECTOR_EMA_WEIGHT))
# If ema score is significant enough, take the bigger one of the weighted score and deri score.
if anom_scores_ema[timestamp] > DEFAULT_DETECTOR_EMA_SIGNIFICANT:
anom_scores[timestamp] = max(anom_scores[timestamp], anom_scores_deri[timestamp])
self.anom_scores = TimeSeries(self._denoise_scores(anom_scores)) | python | def _set_scores(self):
"""
Set anomaly scores using a weighted sum.
"""
anom_scores_ema = self.exp_avg_detector.run()
anom_scores_deri = self.derivative_detector.run()
anom_scores = {}
for timestamp in anom_scores_ema.timestamps:
# Compute a weighted anomaly score.
anom_scores[timestamp] = max(anom_scores_ema[timestamp],
anom_scores_ema[timestamp] * DEFAULT_DETECTOR_EMA_WEIGHT + anom_scores_deri[timestamp] * (1 - DEFAULT_DETECTOR_EMA_WEIGHT))
# If ema score is significant enough, take the bigger one of the weighted score and deri score.
if anom_scores_ema[timestamp] > DEFAULT_DETECTOR_EMA_SIGNIFICANT:
anom_scores[timestamp] = max(anom_scores[timestamp], anom_scores_deri[timestamp])
self.anom_scores = TimeSeries(self._denoise_scores(anom_scores)) | ['def', '_set_scores', '(', 'self', ')', ':', 'anom_scores_ema', '=', 'self', '.', 'exp_avg_detector', '.', 'run', '(', ')', 'anom_scores_deri', '=', 'self', '.', 'derivative_detector', '.', 'run', '(', ')', 'anom_scores', '=', '{', '}', 'for', 'timestamp', 'in', 'anom_scores_ema', '.', 'timestamps', ':', '# Compute a weighted anomaly score.', 'anom_scores', '[', 'timestamp', ']', '=', 'max', '(', 'anom_scores_ema', '[', 'timestamp', ']', ',', 'anom_scores_ema', '[', 'timestamp', ']', '*', 'DEFAULT_DETECTOR_EMA_WEIGHT', '+', 'anom_scores_deri', '[', 'timestamp', ']', '*', '(', '1', '-', 'DEFAULT_DETECTOR_EMA_WEIGHT', ')', ')', '# If ema score is significant enough, take the bigger one of the weighted score and deri score.', 'if', 'anom_scores_ema', '[', 'timestamp', ']', '>', 'DEFAULT_DETECTOR_EMA_SIGNIFICANT', ':', 'anom_scores', '[', 'timestamp', ']', '=', 'max', '(', 'anom_scores', '[', 'timestamp', ']', ',', 'anom_scores_deri', '[', 'timestamp', ']', ')', 'self', '.', 'anom_scores', '=', 'TimeSeries', '(', 'self', '.', '_denoise_scores', '(', 'anom_scores', ')', ')'] | Set anomaly scores using a weighted sum. | ['Set', 'anomaly', 'scores', 'using', 'a', 'weighted', 'sum', '.'] | train | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/algorithms/anomaly_detector_algorithms/default_detector.py#L35-L49 |
7,590 | prthkms/alex | alex/duckduckgo.py | parse_result | def parse_result(result):
"""parse_result(json result) -- print the web query according to the type
of result from duckduckgo.
"""
if(result['Type'] == 'D'):
print """There is more than one answer for this. Try making your query\
more specific. For example, if you want to learn about apple the company\
and not apple the fruit, try something like apple inc or apple computers.
"""
elif(result['Type'] == 'A'):
print result['AbstractText']
print '\nResults from DuckDuckGo'
elif(result['Type'] == 'C'):
for entry in result['RelatedTopics']:
print entry['Text']
print "\n"
else:
print "I do not know how to process this query at the moment." | python | def parse_result(result):
"""parse_result(json result) -- print the web query according to the type
of result from duckduckgo.
"""
if(result['Type'] == 'D'):
print """There is more than one answer for this. Try making your query\
more specific. For example, if you want to learn about apple the company\
and not apple the fruit, try something like apple inc or apple computers.
"""
elif(result['Type'] == 'A'):
print result['AbstractText']
print '\nResults from DuckDuckGo'
elif(result['Type'] == 'C'):
for entry in result['RelatedTopics']:
print entry['Text']
print "\n"
else:
print "I do not know how to process this query at the moment." | ['def', 'parse_result', '(', 'result', ')', ':', 'if', '(', 'result', '[', "'Type'", ']', '==', "'D'", ')', ':', 'print', '"""There is more than one answer for this. Try making your query\\\n\t\tmore specific. For example, if you want to learn about apple the company\\\n\t\tand not apple the fruit, try something like apple inc or apple computers. \n\t\t"""', 'elif', '(', 'result', '[', "'Type'", ']', '==', "'A'", ')', ':', 'print', 'result', '[', "'AbstractText'", ']', 'print', "'\\nResults from DuckDuckGo'", 'elif', '(', 'result', '[', "'Type'", ']', '==', "'C'", ')', ':', 'for', 'entry', 'in', 'result', '[', "'RelatedTopics'", ']', ':', 'print', 'entry', '[', "'Text'", ']', 'print', '"\\n"', 'else', ':', 'print', '"I do not know how to process this query at the moment."'] | parse_result(json result) -- print the web query according to the type
of result from duckduckgo. | ['parse_result', '(', 'json', 'result', ')', '--', 'print', 'the', 'web', 'query', 'according', 'to', 'the', 'type', 'of', 'result', 'from', 'duckduckgo', '.'] | train | https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/duckduckgo.py#L8-L28 |
7,591 | saltstack/salt | salt/modules/file.py | blockreplace | def blockreplace(path,
marker_start='#-- start managed zone --',
marker_end='#-- end managed zone --',
content='',
append_if_not_found=False,
prepend_if_not_found=False,
backup='.bak',
dry_run=False,
show_changes=True,
append_newline=False,
insert_before_match=None,
insert_after_match=None):
'''
.. versionadded:: 2014.1.0
Replace content of a text block in a file, delimited by line markers
A block of content delimited by comments can help you manage several lines
entries without worrying about old entries removal.
.. note::
This function will store two copies of the file in-memory (the original
version and the edited version) in order to detect changes and only
edit the targeted file if necessary.
path
Filesystem path to the file to be edited
marker_start
The line content identifying a line as the start of the content block.
Note that the whole line containing this marker will be considered, so
whitespace or extra content before or after the marker is included in
final output
marker_end
The line content identifying the end of the content block. As of
versions 2017.7.5 and 2018.3.1, everything up to the text matching the
marker will be replaced, so it's important to ensure that your marker
includes the beginning of the text you wish to replace.
content
The content to be used between the two lines identified by marker_start
and marker_stop.
append_if_not_found : False
If markers are not found and set to ``True`` then, the markers and
content will be appended to the file.
prepend_if_not_found : False
If markers are not found and set to ``True`` then, the markers and
content will be prepended to the file.
insert_before_match
If markers are not found, this parameter can be set to a regex which will
insert the block before the first found occurrence in the file.
.. versionadded:: Neon
insert_after_match
If markers are not found, this parameter can be set to a regex which will
insert the block after the first found occurrence in the file.
.. versionadded:: Neon
backup
The file extension to use for a backup of the file if any edit is made.
Set to ``False`` to skip making a backup.
dry_run : False
If ``True``, do not make any edits to the file and simply return the
changes that *would* be made.
show_changes : True
Controls how changes are presented. If ``True``, this function will
return a unified diff of the changes made. If False, then it will
return a boolean (``True`` if any changes were made, otherwise
``False``).
append_newline : False
Controls whether or not a newline is appended to the content block. If
the value of this argument is ``True`` then a newline will be added to
the content block. If it is ``False``, then a newline will *not* be
added to the content block. If it is ``None`` then a newline will only
be added to the content block if it does not already end in a newline.
.. versionadded:: 2016.3.4
.. versionchanged:: 2017.7.5,2018.3.1
New behavior added when value is ``None``.
.. versionchanged:: 2019.2.0
The default value of this argument will change to ``None`` to match
the behavior of the :py:func:`file.blockreplace state
<salt.states.file.blockreplace>`
CLI Example:
.. code-block:: bash
salt '*' file.blockreplace /etc/hosts '#-- start managed zone foobar : DO NOT EDIT --' \\
'#-- end managed zone foobar --' $'10.0.1.1 foo.foobar\\n10.0.1.2 bar.foobar' True
'''
exclusive_params = [append_if_not_found, prepend_if_not_found, bool(insert_before_match), bool(insert_after_match)]
if sum(exclusive_params) > 1:
raise SaltInvocationError(
'Only one of append_if_not_found, prepend_if_not_found,'
' insert_before_match, and insert_after_match is permitted'
)
path = os.path.expanduser(path)
if not os.path.exists(path):
raise SaltInvocationError('File not found: {0}'.format(path))
try:
file_encoding = __utils__['files.get_encoding'](path)
except CommandExecutionError:
file_encoding = None
if __utils__['files.is_binary'](path):
if not file_encoding:
raise SaltInvocationError(
'Cannot perform string replacements on a binary file: {0}'
.format(path)
)
if insert_before_match or insert_after_match:
if insert_before_match:
if not isinstance(insert_before_match, six.string_types):
raise CommandExecutionError(
'RegEx expected in insert_before_match parameter.'
)
elif insert_after_match:
if not isinstance(insert_after_match, six.string_types):
raise CommandExecutionError(
'RegEx expected in insert_after_match parameter.'
)
if append_newline is None and not content.endswith((os.linesep, '\n')):
append_newline = True
# Split the content into a list of lines, removing newline characters. To
# ensure that we handle both Windows and POSIX newlines, first split on
# Windows newlines, and then split on POSIX newlines.
split_content = []
for win_line in content.split('\r\n'):
for content_line in win_line.split('\n'):
split_content.append(content_line)
line_count = len(split_content)
has_changes = False
orig_file = []
new_file = []
in_block = False
block_found = False
linesep = None
def _add_content(linesep, lines=None, include_marker_start=True,
end_line=None):
if lines is None:
lines = []
include_marker_start = True
if end_line is None:
end_line = marker_end
end_line = end_line.rstrip('\r\n') + linesep
if include_marker_start:
lines.append(marker_start + linesep)
if split_content:
for index, content_line in enumerate(split_content, 1):
if index != line_count:
lines.append(content_line + linesep)
else:
# We're on the last line of the content block
if append_newline:
lines.append(content_line + linesep)
lines.append(end_line)
else:
lines.append(content_line + end_line)
else:
lines.append(end_line)
return lines
# We do not use in-place editing to avoid file attrs modifications when
# no changes are required and to avoid any file access on a partially
# written file.
try:
fi_file = io.open(path, mode='r', encoding=file_encoding, newline='')
for line in fi_file:
write_line_to_new_file = True
if linesep is None:
# Auto-detect line separator
if line.endswith('\r\n'):
linesep = '\r\n'
elif line.endswith('\n'):
linesep = '\n'
else:
# No newline(s) in file, fall back to system's linesep
linesep = os.linesep
if marker_start in line:
# We've entered the content block
in_block = True
else:
if in_block:
# We're not going to write the lines from the old file to
# the new file until we have exited the block.
write_line_to_new_file = False
marker_end_pos = line.find(marker_end)
if marker_end_pos != -1:
# End of block detected
in_block = False
# We've found and exited the block
block_found = True
_add_content(linesep, lines=new_file,
include_marker_start=False,
end_line=line[marker_end_pos:])
# Save the line from the original file
orig_file.append(line)
if write_line_to_new_file:
new_file.append(line)
except (IOError, OSError) as exc:
raise CommandExecutionError(
'Failed to read from {0}: {1}'.format(path, exc)
)
finally:
if linesep is None:
# If the file was empty, we will not have set linesep yet. Assume
# the system's line separator. This is needed for when we
# prepend/append later on.
linesep = os.linesep
try:
fi_file.close()
except Exception:
pass
if in_block:
# unterminated block => bad, always fail
raise CommandExecutionError(
'Unterminated marked block. End of file reached before marker_end.'
)
if not block_found:
if prepend_if_not_found:
# add the markers and content at the beginning of file
prepended_content = _add_content(linesep)
prepended_content.extend(new_file)
new_file = prepended_content
block_found = True
elif append_if_not_found:
# Make sure we have a newline at the end of the file
if new_file:
if not new_file[-1].endswith(linesep):
new_file[-1] += linesep
# add the markers and content at the end of file
_add_content(linesep, lines=new_file)
block_found = True
elif insert_before_match or insert_after_match:
match_regex = insert_before_match or insert_after_match
match_idx = [i for i, item in enumerate(orig_file) if re.search(match_regex, item)]
if match_idx:
match_idx = match_idx[0]
for line in _add_content(linesep):
if insert_after_match:
match_idx += 1
new_file.insert(match_idx, line)
if insert_before_match:
match_idx += 1
block_found = True
if not block_found:
raise CommandExecutionError(
'Cannot edit marked block. Markers were not found in file.'
)
diff = __utils__['stringutils.get_diff'](orig_file, new_file)
has_changes = diff is not ''
if has_changes and not dry_run:
# changes detected
# backup file attrs
perms = {}
perms['user'] = get_user(path)
perms['group'] = get_group(path)
perms['mode'] = salt.utils.files.normalize_mode(get_mode(path))
# backup old content
if backup is not False:
backup_path = '{0}{1}'.format(path, backup)
shutil.copy2(path, backup_path)
# copy2 does not preserve ownership
check_perms(backup_path,
None,
perms['user'],
perms['group'],
perms['mode'])
# write new content in the file while avoiding partial reads
try:
fh_ = salt.utils.atomicfile.atomic_open(path, 'wb')
for line in new_file:
fh_.write(salt.utils.stringutils.to_bytes(line, encoding=file_encoding))
finally:
fh_.close()
# this may have overwritten file attrs
check_perms(path,
None,
perms['user'],
perms['group'],
perms['mode'])
if show_changes:
return diff
return has_changes | python | def blockreplace(path,
marker_start='#-- start managed zone --',
marker_end='#-- end managed zone --',
content='',
append_if_not_found=False,
prepend_if_not_found=False,
backup='.bak',
dry_run=False,
show_changes=True,
append_newline=False,
insert_before_match=None,
insert_after_match=None):
'''
.. versionadded:: 2014.1.0
Replace content of a text block in a file, delimited by line markers
A block of content delimited by comments can help you manage several lines
entries without worrying about old entries removal.
.. note::
This function will store two copies of the file in-memory (the original
version and the edited version) in order to detect changes and only
edit the targeted file if necessary.
path
Filesystem path to the file to be edited
marker_start
The line content identifying a line as the start of the content block.
Note that the whole line containing this marker will be considered, so
whitespace or extra content before or after the marker is included in
final output
marker_end
The line content identifying the end of the content block. As of
versions 2017.7.5 and 2018.3.1, everything up to the text matching the
marker will be replaced, so it's important to ensure that your marker
includes the beginning of the text you wish to replace.
content
The content to be used between the two lines identified by marker_start
and marker_stop.
append_if_not_found : False
If markers are not found and set to ``True`` then, the markers and
content will be appended to the file.
prepend_if_not_found : False
If markers are not found and set to ``True`` then, the markers and
content will be prepended to the file.
insert_before_match
If markers are not found, this parameter can be set to a regex which will
insert the block before the first found occurrence in the file.
.. versionadded:: Neon
insert_after_match
If markers are not found, this parameter can be set to a regex which will
insert the block after the first found occurrence in the file.
.. versionadded:: Neon
backup
The file extension to use for a backup of the file if any edit is made.
Set to ``False`` to skip making a backup.
dry_run : False
If ``True``, do not make any edits to the file and simply return the
changes that *would* be made.
show_changes : True
Controls how changes are presented. If ``True``, this function will
return a unified diff of the changes made. If False, then it will
return a boolean (``True`` if any changes were made, otherwise
``False``).
append_newline : False
Controls whether or not a newline is appended to the content block. If
the value of this argument is ``True`` then a newline will be added to
the content block. If it is ``False``, then a newline will *not* be
added to the content block. If it is ``None`` then a newline will only
be added to the content block if it does not already end in a newline.
.. versionadded:: 2016.3.4
.. versionchanged:: 2017.7.5,2018.3.1
New behavior added when value is ``None``.
.. versionchanged:: 2019.2.0
The default value of this argument will change to ``None`` to match
the behavior of the :py:func:`file.blockreplace state
<salt.states.file.blockreplace>`
CLI Example:
.. code-block:: bash
salt '*' file.blockreplace /etc/hosts '#-- start managed zone foobar : DO NOT EDIT --' \\
'#-- end managed zone foobar --' $'10.0.1.1 foo.foobar\\n10.0.1.2 bar.foobar' True
'''
exclusive_params = [append_if_not_found, prepend_if_not_found, bool(insert_before_match), bool(insert_after_match)]
if sum(exclusive_params) > 1:
raise SaltInvocationError(
'Only one of append_if_not_found, prepend_if_not_found,'
' insert_before_match, and insert_after_match is permitted'
)
path = os.path.expanduser(path)
if not os.path.exists(path):
raise SaltInvocationError('File not found: {0}'.format(path))
try:
file_encoding = __utils__['files.get_encoding'](path)
except CommandExecutionError:
file_encoding = None
if __utils__['files.is_binary'](path):
if not file_encoding:
raise SaltInvocationError(
'Cannot perform string replacements on a binary file: {0}'
.format(path)
)
if insert_before_match or insert_after_match:
if insert_before_match:
if not isinstance(insert_before_match, six.string_types):
raise CommandExecutionError(
'RegEx expected in insert_before_match parameter.'
)
elif insert_after_match:
if not isinstance(insert_after_match, six.string_types):
raise CommandExecutionError(
'RegEx expected in insert_after_match parameter.'
)
if append_newline is None and not content.endswith((os.linesep, '\n')):
append_newline = True
# Split the content into a list of lines, removing newline characters. To
# ensure that we handle both Windows and POSIX newlines, first split on
# Windows newlines, and then split on POSIX newlines.
split_content = []
for win_line in content.split('\r\n'):
for content_line in win_line.split('\n'):
split_content.append(content_line)
line_count = len(split_content)
has_changes = False
orig_file = []
new_file = []
in_block = False
block_found = False
linesep = None
def _add_content(linesep, lines=None, include_marker_start=True,
end_line=None):
if lines is None:
lines = []
include_marker_start = True
if end_line is None:
end_line = marker_end
end_line = end_line.rstrip('\r\n') + linesep
if include_marker_start:
lines.append(marker_start + linesep)
if split_content:
for index, content_line in enumerate(split_content, 1):
if index != line_count:
lines.append(content_line + linesep)
else:
# We're on the last line of the content block
if append_newline:
lines.append(content_line + linesep)
lines.append(end_line)
else:
lines.append(content_line + end_line)
else:
lines.append(end_line)
return lines
# We do not use in-place editing to avoid file attrs modifications when
# no changes are required and to avoid any file access on a partially
# written file.
try:
fi_file = io.open(path, mode='r', encoding=file_encoding, newline='')
for line in fi_file:
write_line_to_new_file = True
if linesep is None:
# Auto-detect line separator
if line.endswith('\r\n'):
linesep = '\r\n'
elif line.endswith('\n'):
linesep = '\n'
else:
# No newline(s) in file, fall back to system's linesep
linesep = os.linesep
if marker_start in line:
# We've entered the content block
in_block = True
else:
if in_block:
# We're not going to write the lines from the old file to
# the new file until we have exited the block.
write_line_to_new_file = False
marker_end_pos = line.find(marker_end)
if marker_end_pos != -1:
# End of block detected
in_block = False
# We've found and exited the block
block_found = True
_add_content(linesep, lines=new_file,
include_marker_start=False,
end_line=line[marker_end_pos:])
# Save the line from the original file
orig_file.append(line)
if write_line_to_new_file:
new_file.append(line)
except (IOError, OSError) as exc:
raise CommandExecutionError(
'Failed to read from {0}: {1}'.format(path, exc)
)
finally:
if linesep is None:
# If the file was empty, we will not have set linesep yet. Assume
# the system's line separator. This is needed for when we
# prepend/append later on.
linesep = os.linesep
try:
fi_file.close()
except Exception:
pass
if in_block:
# unterminated block => bad, always fail
raise CommandExecutionError(
'Unterminated marked block. End of file reached before marker_end.'
)
if not block_found:
if prepend_if_not_found:
# add the markers and content at the beginning of file
prepended_content = _add_content(linesep)
prepended_content.extend(new_file)
new_file = prepended_content
block_found = True
elif append_if_not_found:
# Make sure we have a newline at the end of the file
if new_file:
if not new_file[-1].endswith(linesep):
new_file[-1] += linesep
# add the markers and content at the end of file
_add_content(linesep, lines=new_file)
block_found = True
elif insert_before_match or insert_after_match:
match_regex = insert_before_match or insert_after_match
match_idx = [i for i, item in enumerate(orig_file) if re.search(match_regex, item)]
if match_idx:
match_idx = match_idx[0]
for line in _add_content(linesep):
if insert_after_match:
match_idx += 1
new_file.insert(match_idx, line)
if insert_before_match:
match_idx += 1
block_found = True
if not block_found:
raise CommandExecutionError(
'Cannot edit marked block. Markers were not found in file.'
)
diff = __utils__['stringutils.get_diff'](orig_file, new_file)
has_changes = diff is not ''
if has_changes and not dry_run:
# changes detected
# backup file attrs
perms = {}
perms['user'] = get_user(path)
perms['group'] = get_group(path)
perms['mode'] = salt.utils.files.normalize_mode(get_mode(path))
# backup old content
if backup is not False:
backup_path = '{0}{1}'.format(path, backup)
shutil.copy2(path, backup_path)
# copy2 does not preserve ownership
check_perms(backup_path,
None,
perms['user'],
perms['group'],
perms['mode'])
# write new content in the file while avoiding partial reads
try:
fh_ = salt.utils.atomicfile.atomic_open(path, 'wb')
for line in new_file:
fh_.write(salt.utils.stringutils.to_bytes(line, encoding=file_encoding))
finally:
fh_.close()
# this may have overwritten file attrs
check_perms(path,
None,
perms['user'],
perms['group'],
perms['mode'])
if show_changes:
return diff
return has_changes | ['def', 'blockreplace', '(', 'path', ',', 'marker_start', '=', "'#-- start managed zone --'", ',', 'marker_end', '=', "'#-- end managed zone --'", ',', 'content', '=', "''", ',', 'append_if_not_found', '=', 'False', ',', 'prepend_if_not_found', '=', 'False', ',', 'backup', '=', "'.bak'", ',', 'dry_run', '=', 'False', ',', 'show_changes', '=', 'True', ',', 'append_newline', '=', 'False', ',', 'insert_before_match', '=', 'None', ',', 'insert_after_match', '=', 'None', ')', ':', 'exclusive_params', '=', '[', 'append_if_not_found', ',', 'prepend_if_not_found', ',', 'bool', '(', 'insert_before_match', ')', ',', 'bool', '(', 'insert_after_match', ')', ']', 'if', 'sum', '(', 'exclusive_params', ')', '>', '1', ':', 'raise', 'SaltInvocationError', '(', "'Only one of append_if_not_found, prepend_if_not_found,'", "' insert_before_match, and insert_after_match is permitted'", ')', 'path', '=', 'os', '.', 'path', '.', 'expanduser', '(', 'path', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'path', ')', ':', 'raise', 'SaltInvocationError', '(', "'File not found: {0}'", '.', 'format', '(', 'path', ')', ')', 'try', ':', 'file_encoding', '=', '__utils__', '[', "'files.get_encoding'", ']', '(', 'path', ')', 'except', 'CommandExecutionError', ':', 'file_encoding', '=', 'None', 'if', '__utils__', '[', "'files.is_binary'", ']', '(', 'path', ')', ':', 'if', 'not', 'file_encoding', ':', 'raise', 'SaltInvocationError', '(', "'Cannot perform string replacements on a binary file: {0}'", '.', 'format', '(', 'path', ')', ')', 'if', 'insert_before_match', 'or', 'insert_after_match', ':', 'if', 'insert_before_match', ':', 'if', 'not', 'isinstance', '(', 'insert_before_match', ',', 'six', '.', 'string_types', ')', ':', 'raise', 'CommandExecutionError', '(', "'RegEx expected in insert_before_match parameter.'", ')', 'elif', 'insert_after_match', ':', 'if', 'not', 'isinstance', '(', 'insert_after_match', ',', 'six', '.', 'string_types', ')', ':', 'raise', 'CommandExecutionError', '(', "'RegEx expected in insert_after_match parameter.'", ')', 'if', 'append_newline', 'is', 'None', 'and', 'not', 'content', '.', 'endswith', '(', '(', 'os', '.', 'linesep', ',', "'\\n'", ')', ')', ':', 'append_newline', '=', 'True', '# Split the content into a list of lines, removing newline characters. To', '# ensure that we handle both Windows and POSIX newlines, first split on', '# Windows newlines, and then split on POSIX newlines.', 'split_content', '=', '[', ']', 'for', 'win_line', 'in', 'content', '.', 'split', '(', "'\\r\\n'", ')', ':', 'for', 'content_line', 'in', 'win_line', '.', 'split', '(', "'\\n'", ')', ':', 'split_content', '.', 'append', '(', 'content_line', ')', 'line_count', '=', 'len', '(', 'split_content', ')', 'has_changes', '=', 'False', 'orig_file', '=', '[', ']', 'new_file', '=', '[', ']', 'in_block', '=', 'False', 'block_found', '=', 'False', 'linesep', '=', 'None', 'def', '_add_content', '(', 'linesep', ',', 'lines', '=', 'None', ',', 'include_marker_start', '=', 'True', ',', 'end_line', '=', 'None', ')', ':', 'if', 'lines', 'is', 'None', ':', 'lines', '=', '[', ']', 'include_marker_start', '=', 'True', 'if', 'end_line', 'is', 'None', ':', 'end_line', '=', 'marker_end', 'end_line', '=', 'end_line', '.', 'rstrip', '(', "'\\r\\n'", ')', '+', 'linesep', 'if', 'include_marker_start', ':', 'lines', '.', 'append', '(', 'marker_start', '+', 'linesep', ')', 'if', 'split_content', ':', 'for', 'index', ',', 'content_line', 'in', 'enumerate', '(', 'split_content', ',', '1', ')', ':', 'if', 'index', '!=', 'line_count', ':', 'lines', '.', 'append', '(', 'content_line', '+', 'linesep', ')', 'else', ':', "# We're on the last line of the content block", 'if', 'append_newline', ':', 'lines', '.', 'append', '(', 'content_line', '+', 'linesep', ')', 'lines', '.', 'append', '(', 'end_line', ')', 'else', ':', 'lines', '.', 'append', '(', 'content_line', '+', 'end_line', ')', 'else', ':', 'lines', '.', 'append', '(', 'end_line', ')', 'return', 'lines', '# We do not use in-place editing to avoid file attrs modifications when', '# no changes are required and to avoid any file access on a partially', '# written file.', 'try', ':', 'fi_file', '=', 'io', '.', 'open', '(', 'path', ',', 'mode', '=', "'r'", ',', 'encoding', '=', 'file_encoding', ',', 'newline', '=', "''", ')', 'for', 'line', 'in', 'fi_file', ':', 'write_line_to_new_file', '=', 'True', 'if', 'linesep', 'is', 'None', ':', '# Auto-detect line separator', 'if', 'line', '.', 'endswith', '(', "'\\r\\n'", ')', ':', 'linesep', '=', "'\\r\\n'", 'elif', 'line', '.', 'endswith', '(', "'\\n'", ')', ':', 'linesep', '=', "'\\n'", 'else', ':', "# No newline(s) in file, fall back to system's linesep", 'linesep', '=', 'os', '.', 'linesep', 'if', 'marker_start', 'in', 'line', ':', "# We've entered the content block", 'in_block', '=', 'True', 'else', ':', 'if', 'in_block', ':', "# We're not going to write the lines from the old file to", '# the new file until we have exited the block.', 'write_line_to_new_file', '=', 'False', 'marker_end_pos', '=', 'line', '.', 'find', '(', 'marker_end', ')', 'if', 'marker_end_pos', '!=', '-', '1', ':', '# End of block detected', 'in_block', '=', 'False', "# We've found and exited the block", 'block_found', '=', 'True', '_add_content', '(', 'linesep', ',', 'lines', '=', 'new_file', ',', 'include_marker_start', '=', 'False', ',', 'end_line', '=', 'line', '[', 'marker_end_pos', ':', ']', ')', '# Save the line from the original file', 'orig_file', '.', 'append', '(', 'line', ')', 'if', 'write_line_to_new_file', ':', 'new_file', '.', 'append', '(', 'line', ')', 'except', '(', 'IOError', ',', 'OSError', ')', 'as', 'exc', ':', 'raise', 'CommandExecutionError', '(', "'Failed to read from {0}: {1}'", '.', 'format', '(', 'path', ',', 'exc', ')', ')', 'finally', ':', 'if', 'linesep', 'is', 'None', ':', '# If the file was empty, we will not have set linesep yet. Assume', "# the system's line separator. This is needed for when we", '# prepend/append later on.', 'linesep', '=', 'os', '.', 'linesep', 'try', ':', 'fi_file', '.', 'close', '(', ')', 'except', 'Exception', ':', 'pass', 'if', 'in_block', ':', '# unterminated block => bad, always fail', 'raise', 'CommandExecutionError', '(', "'Unterminated marked block. End of file reached before marker_end.'", ')', 'if', 'not', 'block_found', ':', 'if', 'prepend_if_not_found', ':', '# add the markers and content at the beginning of file', 'prepended_content', '=', '_add_content', '(', 'linesep', ')', 'prepended_content', '.', 'extend', '(', 'new_file', ')', 'new_file', '=', 'prepended_content', 'block_found', '=', 'True', 'elif', 'append_if_not_found', ':', '# Make sure we have a newline at the end of the file', 'if', 'new_file', ':', 'if', 'not', 'new_file', '[', '-', '1', ']', '.', 'endswith', '(', 'linesep', ')', ':', 'new_file', '[', '-', '1', ']', '+=', 'linesep', '# add the markers and content at the end of file', '_add_content', '(', 'linesep', ',', 'lines', '=', 'new_file', ')', 'block_found', '=', 'True', 'elif', 'insert_before_match', 'or', 'insert_after_match', ':', 'match_regex', '=', 'insert_before_match', 'or', 'insert_after_match', 'match_idx', '=', '[', 'i', 'for', 'i', ',', 'item', 'in', 'enumerate', '(', 'orig_file', ')', 'if', 're', '.', 'search', '(', 'match_regex', ',', 'item', ')', ']', 'if', 'match_idx', ':', 'match_idx', '=', 'match_idx', '[', '0', ']', 'for', 'line', 'in', '_add_content', '(', 'linesep', ')', ':', 'if', 'insert_after_match', ':', 'match_idx', '+=', '1', 'new_file', '.', 'insert', '(', 'match_idx', ',', 'line', ')', 'if', 'insert_before_match', ':', 'match_idx', '+=', '1', 'block_found', '=', 'True', 'if', 'not', 'block_found', ':', 'raise', 'CommandExecutionError', '(', "'Cannot edit marked block. Markers were not found in file.'", ')', 'diff', '=', '__utils__', '[', "'stringutils.get_diff'", ']', '(', 'orig_file', ',', 'new_file', ')', 'has_changes', '=', 'diff', 'is', 'not', "''", 'if', 'has_changes', 'and', 'not', 'dry_run', ':', '# changes detected', '# backup file attrs', 'perms', '=', '{', '}', 'perms', '[', "'user'", ']', '=', 'get_user', '(', 'path', ')', 'perms', '[', "'group'", ']', '=', 'get_group', '(', 'path', ')', 'perms', '[', "'mode'", ']', '=', 'salt', '.', 'utils', '.', 'files', '.', 'normalize_mode', '(', 'get_mode', '(', 'path', ')', ')', '# backup old content', 'if', 'backup', 'is', 'not', 'False', ':', 'backup_path', '=', "'{0}{1}'", '.', 'format', '(', 'path', ',', 'backup', ')', 'shutil', '.', 'copy2', '(', 'path', ',', 'backup_path', ')', '# copy2 does not preserve ownership', 'check_perms', '(', 'backup_path', ',', 'None', ',', 'perms', '[', "'user'", ']', ',', 'perms', '[', "'group'", ']', ',', 'perms', '[', "'mode'", ']', ')', '# write new content in the file while avoiding partial reads', 'try', ':', 'fh_', '=', 'salt', '.', 'utils', '.', 'atomicfile', '.', 'atomic_open', '(', 'path', ',', "'wb'", ')', 'for', 'line', 'in', 'new_file', ':', 'fh_', '.', 'write', '(', 'salt', '.', 'utils', '.', 'stringutils', '.', 'to_bytes', '(', 'line', ',', 'encoding', '=', 'file_encoding', ')', ')', 'finally', ':', 'fh_', '.', 'close', '(', ')', '# this may have overwritten file attrs', 'check_perms', '(', 'path', ',', 'None', ',', 'perms', '[', "'user'", ']', ',', 'perms', '[', "'group'", ']', ',', 'perms', '[', "'mode'", ']', ')', 'if', 'show_changes', ':', 'return', 'diff', 'return', 'has_changes'] | .. versionadded:: 2014.1.0
Replace content of a text block in a file, delimited by line markers
A block of content delimited by comments can help you manage several lines
entries without worrying about old entries removal.
.. note::
This function will store two copies of the file in-memory (the original
version and the edited version) in order to detect changes and only
edit the targeted file if necessary.
path
Filesystem path to the file to be edited
marker_start
The line content identifying a line as the start of the content block.
Note that the whole line containing this marker will be considered, so
whitespace or extra content before or after the marker is included in
final output
marker_end
The line content identifying the end of the content block. As of
versions 2017.7.5 and 2018.3.1, everything up to the text matching the
marker will be replaced, so it's important to ensure that your marker
includes the beginning of the text you wish to replace.
content
The content to be used between the two lines identified by marker_start
and marker_stop.
append_if_not_found : False
If markers are not found and set to ``True`` then, the markers and
content will be appended to the file.
prepend_if_not_found : False
If markers are not found and set to ``True`` then, the markers and
content will be prepended to the file.
insert_before_match
If markers are not found, this parameter can be set to a regex which will
insert the block before the first found occurrence in the file.
.. versionadded:: Neon
insert_after_match
If markers are not found, this parameter can be set to a regex which will
insert the block after the first found occurrence in the file.
.. versionadded:: Neon
backup
The file extension to use for a backup of the file if any edit is made.
Set to ``False`` to skip making a backup.
dry_run : False
If ``True``, do not make any edits to the file and simply return the
changes that *would* be made.
show_changes : True
Controls how changes are presented. If ``True``, this function will
return a unified diff of the changes made. If False, then it will
return a boolean (``True`` if any changes were made, otherwise
``False``).
append_newline : False
Controls whether or not a newline is appended to the content block. If
the value of this argument is ``True`` then a newline will be added to
the content block. If it is ``False``, then a newline will *not* be
added to the content block. If it is ``None`` then a newline will only
be added to the content block if it does not already end in a newline.
.. versionadded:: 2016.3.4
.. versionchanged:: 2017.7.5,2018.3.1
New behavior added when value is ``None``.
.. versionchanged:: 2019.2.0
The default value of this argument will change to ``None`` to match
the behavior of the :py:func:`file.blockreplace state
<salt.states.file.blockreplace>`
CLI Example:
.. code-block:: bash
salt '*' file.blockreplace /etc/hosts '#-- start managed zone foobar : DO NOT EDIT --' \\
'#-- end managed zone foobar --' $'10.0.1.1 foo.foobar\\n10.0.1.2 bar.foobar' True | ['..', 'versionadded', '::', '2014', '.', '1', '.', '0'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L2453-L2776 |
7,592 | Huong-nt/flask-rak | flask_rak/core.py | RAK.intent | def intent(self, intent_name):
"""Decorator routes an Rogo IntentRequest.
Functions decorated as an intent are registered as the view function for the Intent's URL,
and provide the backend responses to give your Skill its functionality.
@ask.intent('WeatherIntent')
def weather(city):
return statement('I predict great weather for {}'.format(city))
Arguments:
intent_name {str} -- Name of the intent request to be mapped to the decorated function
"""
def decorator(f):
self._intent_view_funcs[intent_name] = f
@wraps(f)
def wrapper(*args, **kw):
self._flask_view_func(*args, **kw)
return f
return decorator | python | def intent(self, intent_name):
"""Decorator routes an Rogo IntentRequest.
Functions decorated as an intent are registered as the view function for the Intent's URL,
and provide the backend responses to give your Skill its functionality.
@ask.intent('WeatherIntent')
def weather(city):
return statement('I predict great weather for {}'.format(city))
Arguments:
intent_name {str} -- Name of the intent request to be mapped to the decorated function
"""
def decorator(f):
self._intent_view_funcs[intent_name] = f
@wraps(f)
def wrapper(*args, **kw):
self._flask_view_func(*args, **kw)
return f
return decorator | ['def', 'intent', '(', 'self', ',', 'intent_name', ')', ':', 'def', 'decorator', '(', 'f', ')', ':', 'self', '.', '_intent_view_funcs', '[', 'intent_name', ']', '=', 'f', '@', 'wraps', '(', 'f', ')', 'def', 'wrapper', '(', '*', 'args', ',', '*', '*', 'kw', ')', ':', 'self', '.', '_flask_view_func', '(', '*', 'args', ',', '*', '*', 'kw', ')', 'return', 'f', 'return', 'decorator'] | Decorator routes an Rogo IntentRequest.
Functions decorated as an intent are registered as the view function for the Intent's URL,
and provide the backend responses to give your Skill its functionality.
@ask.intent('WeatherIntent')
def weather(city):
return statement('I predict great weather for {}'.format(city))
Arguments:
intent_name {str} -- Name of the intent request to be mapped to the decorated function | ['Decorator', 'routes', 'an', 'Rogo', 'IntentRequest', '.', 'Functions', 'decorated', 'as', 'an', 'intent', 'are', 'registered', 'as', 'the', 'view', 'function', 'for', 'the', 'Intent', 's', 'URL', 'and', 'provide', 'the', 'backend', 'responses', 'to', 'give', 'your', 'Skill', 'its', 'functionality', '.'] | train | https://github.com/Huong-nt/flask-rak/blob/ffe16b0fc3d49e83c1d220c445ce14632219f69d/flask_rak/core.py#L155-L172 |
7,593 | matthias-k/cyipopt | setup.py | pkgconfig | def pkgconfig(*packages, **kw):
"""Based on http://code.activestate.com/recipes/502261-python-distutils-pkg-config/#c2"""
flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'}
output = sp.Popen(["pkg-config", "--libs", "--cflags"] + list(packages),
stdout=sp.PIPE).communicate()[0]
if six.PY3:
output = output.decode('utf8')
for token in output.split():
if token[:2] in flag_map:
kw.setdefault(flag_map.get(token[:2]), []).append(token[2:])
else:
kw.setdefault('extra_compile_args', []).append(token)
kw['include_dirs'] += [np.get_include()]
return kw | python | def pkgconfig(*packages, **kw):
"""Based on http://code.activestate.com/recipes/502261-python-distutils-pkg-config/#c2"""
flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'}
output = sp.Popen(["pkg-config", "--libs", "--cflags"] + list(packages),
stdout=sp.PIPE).communicate()[0]
if six.PY3:
output = output.decode('utf8')
for token in output.split():
if token[:2] in flag_map:
kw.setdefault(flag_map.get(token[:2]), []).append(token[2:])
else:
kw.setdefault('extra_compile_args', []).append(token)
kw['include_dirs'] += [np.get_include()]
return kw | ['def', 'pkgconfig', '(', '*', 'packages', ',', '*', '*', 'kw', ')', ':', 'flag_map', '=', '{', "'-I'", ':', "'include_dirs'", ',', "'-L'", ':', "'library_dirs'", ',', "'-l'", ':', "'libraries'", '}', 'output', '=', 'sp', '.', 'Popen', '(', '[', '"pkg-config"', ',', '"--libs"', ',', '"--cflags"', ']', '+', 'list', '(', 'packages', ')', ',', 'stdout', '=', 'sp', '.', 'PIPE', ')', '.', 'communicate', '(', ')', '[', '0', ']', 'if', 'six', '.', 'PY3', ':', 'output', '=', 'output', '.', 'decode', '(', "'utf8'", ')', 'for', 'token', 'in', 'output', '.', 'split', '(', ')', ':', 'if', 'token', '[', ':', '2', ']', 'in', 'flag_map', ':', 'kw', '.', 'setdefault', '(', 'flag_map', '.', 'get', '(', 'token', '[', ':', '2', ']', ')', ',', '[', ']', ')', '.', 'append', '(', 'token', '[', '2', ':', ']', ')', 'else', ':', 'kw', '.', 'setdefault', '(', "'extra_compile_args'", ',', '[', ']', ')', '.', 'append', '(', 'token', ')', 'kw', '[', "'include_dirs'", ']', '+=', '[', 'np', '.', 'get_include', '(', ')', ']', 'return', 'kw'] | Based on http://code.activestate.com/recipes/502261-python-distutils-pkg-config/#c2 | ['Based', 'on', 'http', ':', '//', 'code', '.', 'activestate', '.', 'com', '/', 'recipes', '/', '502261', '-', 'python', '-', 'distutils', '-', 'pkg', '-', 'config', '/', '#c2'] | train | https://github.com/matthias-k/cyipopt/blob/ed03f54de2e0b8c8ba4c0aa18ab9ab6c8846bc19/setup.py#L48-L64 |
7,594 | sighingnow/parsec.py | src/parsec/__init__.py | one_of | def one_of(s):
'''Parser a char from specified string.'''
@Parser
def one_of_parser(text, index=0):
if index < len(text) and text[index] in s:
return Value.success(index + 1, text[index])
else:
return Value.failure(index, 'one of {}'.format(s))
return one_of_parser | python | def one_of(s):
'''Parser a char from specified string.'''
@Parser
def one_of_parser(text, index=0):
if index < len(text) and text[index] in s:
return Value.success(index + 1, text[index])
else:
return Value.failure(index, 'one of {}'.format(s))
return one_of_parser | ['def', 'one_of', '(', 's', ')', ':', '@', 'Parser', 'def', 'one_of_parser', '(', 'text', ',', 'index', '=', '0', ')', ':', 'if', 'index', '<', 'len', '(', 'text', ')', 'and', 'text', '[', 'index', ']', 'in', 's', ':', 'return', 'Value', '.', 'success', '(', 'index', '+', '1', ',', 'text', '[', 'index', ']', ')', 'else', ':', 'return', 'Value', '.', 'failure', '(', 'index', ',', "'one of {}'", '.', 'format', '(', 's', ')', ')', 'return', 'one_of_parser'] | Parser a char from specified string. | ['Parser', 'a', 'char', 'from', 'specified', 'string', '.'] | train | https://github.com/sighingnow/parsec.py/blob/ed50e1e259142757470b925f8d20dfe5ad223af0/src/parsec/__init__.py#L567-L575 |
7,595 | koszullab/metaTOR | metator/scripts/figures.py | draw_sparse_matrix | def draw_sparse_matrix(
array_filename,
output_image,
vmax=DEFAULT_SATURATION_THRESHOLD,
max_size_matrix=DEFAULT_MAX_SIZE_MATRIX,
):
"""Draw a quick preview of a sparse matrix with automated
binning and normalization.
"""
matrix = np.loadtxt(array_filename, dtype=np.int32, skiprows=1)
try:
row, col, data = matrix.T
except ValueError:
row, col, data = matrix
size = max(np.amax(row), np.amax(col)) + 1
S = sparse.coo_matrix((data, (row, col)), shape=(size, size))
if max_size_matrix <= 0:
binning = 1
else:
binning = (size // max_size_matrix) + 1
binned_S = hcs.bin_sparse(S, subsampling_factor=binning)
dense_S = binned_S.todense()
dense_S = dense_S + dense_S.T - np.diag(np.diag(dense_S))
normed_S = hcs.normalize_dense(dense_S)
spaceless_pdf_plot_maker(normed_S, output_image, vmax=vmax) | python | def draw_sparse_matrix(
array_filename,
output_image,
vmax=DEFAULT_SATURATION_THRESHOLD,
max_size_matrix=DEFAULT_MAX_SIZE_MATRIX,
):
"""Draw a quick preview of a sparse matrix with automated
binning and normalization.
"""
matrix = np.loadtxt(array_filename, dtype=np.int32, skiprows=1)
try:
row, col, data = matrix.T
except ValueError:
row, col, data = matrix
size = max(np.amax(row), np.amax(col)) + 1
S = sparse.coo_matrix((data, (row, col)), shape=(size, size))
if max_size_matrix <= 0:
binning = 1
else:
binning = (size // max_size_matrix) + 1
binned_S = hcs.bin_sparse(S, subsampling_factor=binning)
dense_S = binned_S.todense()
dense_S = dense_S + dense_S.T - np.diag(np.diag(dense_S))
normed_S = hcs.normalize_dense(dense_S)
spaceless_pdf_plot_maker(normed_S, output_image, vmax=vmax) | ['def', 'draw_sparse_matrix', '(', 'array_filename', ',', 'output_image', ',', 'vmax', '=', 'DEFAULT_SATURATION_THRESHOLD', ',', 'max_size_matrix', '=', 'DEFAULT_MAX_SIZE_MATRIX', ',', ')', ':', 'matrix', '=', 'np', '.', 'loadtxt', '(', 'array_filename', ',', 'dtype', '=', 'np', '.', 'int32', ',', 'skiprows', '=', '1', ')', 'try', ':', 'row', ',', 'col', ',', 'data', '=', 'matrix', '.', 'T', 'except', 'ValueError', ':', 'row', ',', 'col', ',', 'data', '=', 'matrix', 'size', '=', 'max', '(', 'np', '.', 'amax', '(', 'row', ')', ',', 'np', '.', 'amax', '(', 'col', ')', ')', '+', '1', 'S', '=', 'sparse', '.', 'coo_matrix', '(', '(', 'data', ',', '(', 'row', ',', 'col', ')', ')', ',', 'shape', '=', '(', 'size', ',', 'size', ')', ')', 'if', 'max_size_matrix', '<=', '0', ':', 'binning', '=', '1', 'else', ':', 'binning', '=', '(', 'size', '//', 'max_size_matrix', ')', '+', '1', 'binned_S', '=', 'hcs', '.', 'bin_sparse', '(', 'S', ',', 'subsampling_factor', '=', 'binning', ')', 'dense_S', '=', 'binned_S', '.', 'todense', '(', ')', 'dense_S', '=', 'dense_S', '+', 'dense_S', '.', 'T', '-', 'np', '.', 'diag', '(', 'np', '.', 'diag', '(', 'dense_S', ')', ')', 'normed_S', '=', 'hcs', '.', 'normalize_dense', '(', 'dense_S', ')', 'spaceless_pdf_plot_maker', '(', 'normed_S', ',', 'output_image', ',', 'vmax', '=', 'vmax', ')'] | Draw a quick preview of a sparse matrix with automated
binning and normalization. | ['Draw', 'a', 'quick', 'preview', 'of', 'a', 'sparse', 'matrix', 'with', 'automated', 'binning', 'and', 'normalization', '.'] | train | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/figures.py#L75-L100 |
7,596 | protream/iquery | iquery/lyrics.py | query | def query(song_name):
"""CLI:
$ iquery -l song_name
"""
r = requests_get(SONG_SEARCH_URL.format(song_name))
try:
# Get the first result.
song_url = re.search(r'(http://www.xiami.com/song/\d+)', r.text).group(0)
except AttributeError:
exit_after_echo(SONG_NOT_FOUND)
return SongPage(song_url) | python | def query(song_name):
"""CLI:
$ iquery -l song_name
"""
r = requests_get(SONG_SEARCH_URL.format(song_name))
try:
# Get the first result.
song_url = re.search(r'(http://www.xiami.com/song/\d+)', r.text).group(0)
except AttributeError:
exit_after_echo(SONG_NOT_FOUND)
return SongPage(song_url) | ['def', 'query', '(', 'song_name', ')', ':', 'r', '=', 'requests_get', '(', 'SONG_SEARCH_URL', '.', 'format', '(', 'song_name', ')', ')', 'try', ':', '# Get the first result.', 'song_url', '=', 're', '.', 'search', '(', "r'(http://www.xiami.com/song/\\d+)'", ',', 'r', '.', 'text', ')', '.', 'group', '(', '0', ')', 'except', 'AttributeError', ':', 'exit_after_echo', '(', 'SONG_NOT_FOUND', ')', 'return', 'SongPage', '(', 'song_url', ')'] | CLI:
$ iquery -l song_name | ['CLI', ':'] | train | https://github.com/protream/iquery/blob/7272e68af610f1dd63cf695209cfa44b75adc0e6/iquery/lyrics.py#L58-L71 |
7,597 | estnltk/estnltk | estnltk/text.py | Text.timex_starts | def timex_starts(self):
"""The list of start positions of ``timexes`` layer elements."""
if not self.is_tagged(TIMEXES):
self.tag_timexes()
return self.starts(TIMEXES) | python | def timex_starts(self):
"""The list of start positions of ``timexes`` layer elements."""
if not self.is_tagged(TIMEXES):
self.tag_timexes()
return self.starts(TIMEXES) | ['def', 'timex_starts', '(', 'self', ')', ':', 'if', 'not', 'self', '.', 'is_tagged', '(', 'TIMEXES', ')', ':', 'self', '.', 'tag_timexes', '(', ')', 'return', 'self', '.', 'starts', '(', 'TIMEXES', ')'] | The list of start positions of ``timexes`` layer elements. | ['The', 'list', 'of', 'start', 'positions', 'of', 'timexes', 'layer', 'elements', '.'] | train | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L923-L927 |
7,598 | kevin-brown/drf-json-api | rest_framework_json_api/renderers.py | JsonApiMixin.render | def render(self, data, accepted_media_type=None, renderer_context=None):
"""Convert native data to JSON API
Tries each of the methods in `wrappers`, using the first successful
one, or raises `WrapperNotApplicable`.
"""
wrapper = None
success = False
for wrapper_name in self.wrappers:
wrapper_method = getattr(self, wrapper_name)
try:
wrapper = wrapper_method(data, renderer_context)
except WrapperNotApplicable:
pass
else:
success = True
break
if not success:
raise WrapperNotApplicable(
'No acceptable wrappers found for response.',
data=data, renderer_context=renderer_context)
renderer_context["indent"] = 4
return super(JsonApiMixin, self).render(
data=wrapper,
accepted_media_type=accepted_media_type,
renderer_context=renderer_context) | python | def render(self, data, accepted_media_type=None, renderer_context=None):
"""Convert native data to JSON API
Tries each of the methods in `wrappers`, using the first successful
one, or raises `WrapperNotApplicable`.
"""
wrapper = None
success = False
for wrapper_name in self.wrappers:
wrapper_method = getattr(self, wrapper_name)
try:
wrapper = wrapper_method(data, renderer_context)
except WrapperNotApplicable:
pass
else:
success = True
break
if not success:
raise WrapperNotApplicable(
'No acceptable wrappers found for response.',
data=data, renderer_context=renderer_context)
renderer_context["indent"] = 4
return super(JsonApiMixin, self).render(
data=wrapper,
accepted_media_type=accepted_media_type,
renderer_context=renderer_context) | ['def', 'render', '(', 'self', ',', 'data', ',', 'accepted_media_type', '=', 'None', ',', 'renderer_context', '=', 'None', ')', ':', 'wrapper', '=', 'None', 'success', '=', 'False', 'for', 'wrapper_name', 'in', 'self', '.', 'wrappers', ':', 'wrapper_method', '=', 'getattr', '(', 'self', ',', 'wrapper_name', ')', 'try', ':', 'wrapper', '=', 'wrapper_method', '(', 'data', ',', 'renderer_context', ')', 'except', 'WrapperNotApplicable', ':', 'pass', 'else', ':', 'success', '=', 'True', 'break', 'if', 'not', 'success', ':', 'raise', 'WrapperNotApplicable', '(', "'No acceptable wrappers found for response.'", ',', 'data', '=', 'data', ',', 'renderer_context', '=', 'renderer_context', ')', 'renderer_context', '[', '"indent"', ']', '=', '4', 'return', 'super', '(', 'JsonApiMixin', ',', 'self', ')', '.', 'render', '(', 'data', '=', 'wrapper', ',', 'accepted_media_type', '=', 'accepted_media_type', ',', 'renderer_context', '=', 'renderer_context', ')'] | Convert native data to JSON API
Tries each of the methods in `wrappers`, using the first successful
one, or raises `WrapperNotApplicable`. | ['Convert', 'native', 'data', 'to', 'JSON', 'API'] | train | https://github.com/kevin-brown/drf-json-api/blob/664643bd02c0d92eadbd1f8c9d8507adf0538df6/rest_framework_json_api/renderers.py#L47-L77 |
7,599 | pandas-dev/pandas | pandas/core/indexing.py | _NDFrameIndexer._multi_take | def _multi_take(self, tup):
"""
Create the indexers for the passed tuple of keys, and execute the take
operation. This allows the take operation to be executed all at once -
rather than once for each dimension - improving efficiency.
Parameters
----------
tup : tuple
Tuple of indexers, one per axis
Returns
-------
values: same type as the object being indexed
"""
# GH 836
o = self.obj
d = {axis: self._get_listlike_indexer(key, axis)
for (key, axis) in zip(tup, o._AXIS_ORDERS)}
return o._reindex_with_indexers(d, copy=True, allow_dups=True) | python | def _multi_take(self, tup):
"""
Create the indexers for the passed tuple of keys, and execute the take
operation. This allows the take operation to be executed all at once -
rather than once for each dimension - improving efficiency.
Parameters
----------
tup : tuple
Tuple of indexers, one per axis
Returns
-------
values: same type as the object being indexed
"""
# GH 836
o = self.obj
d = {axis: self._get_listlike_indexer(key, axis)
for (key, axis) in zip(tup, o._AXIS_ORDERS)}
return o._reindex_with_indexers(d, copy=True, allow_dups=True) | ['def', '_multi_take', '(', 'self', ',', 'tup', ')', ':', '# GH 836', 'o', '=', 'self', '.', 'obj', 'd', '=', '{', 'axis', ':', 'self', '.', '_get_listlike_indexer', '(', 'key', ',', 'axis', ')', 'for', '(', 'key', ',', 'axis', ')', 'in', 'zip', '(', 'tup', ',', 'o', '.', '_AXIS_ORDERS', ')', '}', 'return', 'o', '.', '_reindex_with_indexers', '(', 'd', ',', 'copy', '=', 'True', ',', 'allow_dups', '=', 'True', ')'] | Create the indexers for the passed tuple of keys, and execute the take
operation. This allows the take operation to be executed all at once -
rather than once for each dimension - improving efficiency.
Parameters
----------
tup : tuple
Tuple of indexers, one per axis
Returns
-------
values: same type as the object being indexed | ['Create', 'the', 'indexers', 'for', 'the', 'passed', 'tuple', 'of', 'keys', 'and', 'execute', 'the', 'take', 'operation', '.', 'This', 'allows', 'the', 'take', 'operation', 'to', 'be', 'executed', 'all', 'at', 'once', '-', 'rather', 'than', 'once', 'for', 'each', 'dimension', '-', 'improving', 'efficiency', '.'] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexing.py#L914-L933 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.