Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
4,000 | lincolnloop/salmon | salmon/core/graph.py | WhisperDatabase._create | def _create(self):
"""Create the Whisper file on disk"""
if not os.path.exists(settings.SALMON_WHISPER_DB_PATH):
os.makedirs(settings.SALMON_WHISPER_DB_PATH)
archives = [whisper.parseRetentionDef(retentionDef)
for retentionDef in settings.ARCHIVES.split(",")]
whisper.create(self.path, archives,
xFilesFactor=settings.XFILEFACTOR,
aggregationMethod=settings.AGGREGATION_METHOD) | python | def _create(self):
"""Create the Whisper file on disk"""
if not os.path.exists(settings.SALMON_WHISPER_DB_PATH):
os.makedirs(settings.SALMON_WHISPER_DB_PATH)
archives = [whisper.parseRetentionDef(retentionDef)
for retentionDef in settings.ARCHIVES.split(",")]
whisper.create(self.path, archives,
xFilesFactor=settings.XFILEFACTOR,
aggregationMethod=settings.AGGREGATION_METHOD) | ['def', '_create', '(', 'self', ')', ':', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'settings', '.', 'SALMON_WHISPER_DB_PATH', ')', ':', 'os', '.', 'makedirs', '(', 'settings', '.', 'SALMON_WHISPER_DB_PATH', ')', 'archives', '=', '[', 'whisper', '.', 'parseRetentionDef', '(', 'retentionDef', ')', 'for', 'retentionDef', 'in', 'settings', '.', 'ARCHIVES', '.', 'split', '(', '","', ')', ']', 'whisper', '.', 'create', '(', 'self', '.', 'path', ',', 'archives', ',', 'xFilesFactor', '=', 'settings', '.', 'XFILEFACTOR', ',', 'aggregationMethod', '=', 'settings', '.', 'AGGREGATION_METHOD', ')'] | Create the Whisper file on disk | ['Create', 'the', 'Whisper', 'file', 'on', 'disk'] | train | https://github.com/lincolnloop/salmon/blob/62a965ad9716707ea1db4afb5d9646766f29b64b/salmon/core/graph.py#L18-L26 |
4,001 | boriel/zxbasic | zxbparser.py | p_arr_access_expr | def p_arr_access_expr(p):
""" func_call : ARRAY_ID arg_list
""" # This is an array access
p[0] = make_call(p[1], p.lineno(1), p[2])
if p[0] is None:
return
entry = SYMBOL_TABLE.access_call(p[1], p.lineno(1))
entry.accessed = True | python | def p_arr_access_expr(p):
""" func_call : ARRAY_ID arg_list
""" # This is an array access
p[0] = make_call(p[1], p.lineno(1), p[2])
if p[0] is None:
return
entry = SYMBOL_TABLE.access_call(p[1], p.lineno(1))
entry.accessed = True | ['def', 'p_arr_access_expr', '(', 'p', ')', ':', '# This is an array access', 'p', '[', '0', ']', '=', 'make_call', '(', 'p', '[', '1', ']', ',', 'p', '.', 'lineno', '(', '1', ')', ',', 'p', '[', '2', ']', ')', 'if', 'p', '[', '0', ']', 'is', 'None', ':', 'return', 'entry', '=', 'SYMBOL_TABLE', '.', 'access_call', '(', 'p', '[', '1', ']', ',', 'p', '.', 'lineno', '(', '1', ')', ')', 'entry', '.', 'accessed', '=', 'True'] | func_call : ARRAY_ID arg_list | ['func_call', ':', 'ARRAY_ID', 'arg_list'] | train | https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxbparser.py#L2581-L2589 |
4,002 | SeleniumHQ/selenium | py/selenium/webdriver/firefox/firefox_binary.py | FirefoxBinary.launch_browser | def launch_browser(self, profile, timeout=30):
"""Launches the browser for the given profile name.
It is assumed the profile already exists.
"""
self.profile = profile
self._start_from_profile_path(self.profile.path)
self._wait_until_connectable(timeout=timeout) | python | def launch_browser(self, profile, timeout=30):
"""Launches the browser for the given profile name.
It is assumed the profile already exists.
"""
self.profile = profile
self._start_from_profile_path(self.profile.path)
self._wait_until_connectable(timeout=timeout) | ['def', 'launch_browser', '(', 'self', ',', 'profile', ',', 'timeout', '=', '30', ')', ':', 'self', '.', 'profile', '=', 'profile', 'self', '.', '_start_from_profile_path', '(', 'self', '.', 'profile', '.', 'path', ')', 'self', '.', '_wait_until_connectable', '(', 'timeout', '=', 'timeout', ')'] | Launches the browser for the given profile name.
It is assumed the profile already exists. | ['Launches', 'the', 'browser', 'for', 'the', 'given', 'profile', 'name', '.', 'It', 'is', 'assumed', 'the', 'profile', 'already', 'exists', '.'] | train | https://github.com/SeleniumHQ/selenium/blob/df40c28b41d4b3953f90eaff84838a9ac052b84a/py/selenium/webdriver/firefox/firefox_binary.py#L66-L73 |
4,003 | pantsbuild/pex | pex/common.py | Chroot.clone | def clone(self, into=None):
"""Clone this chroot.
:keyword into: (optional) An optional destination directory to clone the
Chroot into. If not specified, a temporary directory will be created.
.. versionchanged:: 0.8
The temporary directory created when ``into`` is not specified is now garbage collected on
interpreter exit.
"""
into = into or safe_mkdtemp()
new_chroot = Chroot(into)
for label, fileset in self.filesets.items():
for fn in fileset:
new_chroot.link(os.path.join(self.chroot, fn), fn, label=label)
return new_chroot | python | def clone(self, into=None):
"""Clone this chroot.
:keyword into: (optional) An optional destination directory to clone the
Chroot into. If not specified, a temporary directory will be created.
.. versionchanged:: 0.8
The temporary directory created when ``into`` is not specified is now garbage collected on
interpreter exit.
"""
into = into or safe_mkdtemp()
new_chroot = Chroot(into)
for label, fileset in self.filesets.items():
for fn in fileset:
new_chroot.link(os.path.join(self.chroot, fn), fn, label=label)
return new_chroot | ['def', 'clone', '(', 'self', ',', 'into', '=', 'None', ')', ':', 'into', '=', 'into', 'or', 'safe_mkdtemp', '(', ')', 'new_chroot', '=', 'Chroot', '(', 'into', ')', 'for', 'label', ',', 'fileset', 'in', 'self', '.', 'filesets', '.', 'items', '(', ')', ':', 'for', 'fn', 'in', 'fileset', ':', 'new_chroot', '.', 'link', '(', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'chroot', ',', 'fn', ')', ',', 'fn', ',', 'label', '=', 'label', ')', 'return', 'new_chroot'] | Clone this chroot.
:keyword into: (optional) An optional destination directory to clone the
Chroot into. If not specified, a temporary directory will be created.
.. versionchanged:: 0.8
The temporary directory created when ``into`` is not specified is now garbage collected on
interpreter exit. | ['Clone', 'this', 'chroot', '.'] | train | https://github.com/pantsbuild/pex/blob/87b2129d860250d3b9edce75b9cb62f9789ee521/pex/common.py#L239-L254 |
4,004 | etcher-be/epab | epab/utils/_repo.py | Repo.list_staged_files | def list_staged_files(self) -> typing.List[str]:
"""
:return: staged files
:rtype: list of str
"""
staged_files: typing.List[str] = [x.a_path for x in self.repo.index.diff('HEAD')]
LOGGER.debug('staged files: %s', staged_files)
return staged_files | python | def list_staged_files(self) -> typing.List[str]:
"""
:return: staged files
:rtype: list of str
"""
staged_files: typing.List[str] = [x.a_path for x in self.repo.index.diff('HEAD')]
LOGGER.debug('staged files: %s', staged_files)
return staged_files | ['def', 'list_staged_files', '(', 'self', ')', '->', 'typing', '.', 'List', '[', 'str', ']', ':', 'staged_files', ':', 'typing', '.', 'List', '[', 'str', ']', '=', '[', 'x', '.', 'a_path', 'for', 'x', 'in', 'self', '.', 'repo', '.', 'index', '.', 'diff', '(', "'HEAD'", ')', ']', 'LOGGER', '.', 'debug', '(', "'staged files: %s'", ',', 'staged_files', ')', 'return', 'staged_files'] | :return: staged files
:rtype: list of str | [':', 'return', ':', 'staged', 'files', ':', 'rtype', ':', 'list', 'of', 'str'] | train | https://github.com/etcher-be/epab/blob/024cde74d058281aa66e6e4b7b71dccbe803b1c1/epab/utils/_repo.py#L212-L219 |
4,005 | bskinn/opan | opan/vpt2/repo.py | OpanAnharmRepo.load | def load(self, fname):
""" .. todo:: REPO.load docstring
"""
# Imports
import h5py as h5
from ..error import RepoError
# If repo not None, complain
if not self._repo == None:
raise RepoError(RepoError.STATUS,
"Repository already open",
"File: {0}".format(self.fname))
## end if
# If string passed, try opening h5.File; otherwise complain
if isinstance(fname, str):
self.fname = fname
self._repo = h5.File(fname)
else:
raise TypeError("Invalid filename type: {0}".format(type(fname))) | python | def load(self, fname):
""" .. todo:: REPO.load docstring
"""
# Imports
import h5py as h5
from ..error import RepoError
# If repo not None, complain
if not self._repo == None:
raise RepoError(RepoError.STATUS,
"Repository already open",
"File: {0}".format(self.fname))
## end if
# If string passed, try opening h5.File; otherwise complain
if isinstance(fname, str):
self.fname = fname
self._repo = h5.File(fname)
else:
raise TypeError("Invalid filename type: {0}".format(type(fname))) | ['def', 'load', '(', 'self', ',', 'fname', ')', ':', '# Imports', 'import', 'h5py', 'as', 'h5', 'from', '.', '.', 'error', 'import', 'RepoError', '# If repo not None, complain', 'if', 'not', 'self', '.', '_repo', '==', 'None', ':', 'raise', 'RepoError', '(', 'RepoError', '.', 'STATUS', ',', '"Repository already open"', ',', '"File: {0}"', '.', 'format', '(', 'self', '.', 'fname', ')', ')', '## end if', '# If string passed, try opening h5.File; otherwise complain', 'if', 'isinstance', '(', 'fname', ',', 'str', ')', ':', 'self', '.', 'fname', '=', 'fname', 'self', '.', '_repo', '=', 'h5', '.', 'File', '(', 'fname', ')', 'else', ':', 'raise', 'TypeError', '(', '"Invalid filename type: {0}"', '.', 'format', '(', 'type', '(', 'fname', ')', ')', ')'] | .. todo:: REPO.load docstring | ['..', 'todo', '::', 'REPO', '.', 'load', 'docstring'] | train | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/vpt2/repo.py#L142-L162 |
4,006 | Azure/msrest-for-python | msrest/serialization.py | Deserializer._unpack_content | def _unpack_content(raw_data, content_type=None):
"""Extract the correct structure for deserialization.
If raw_data is a PipelineResponse, try to extract the result of RawDeserializer.
if we can't, raise. Your Pipeline should have a RawDeserializer.
If not a pipeline response and raw_data is bytes or string, use content-type
to decode it. If no content-type, try JSON.
If raw_data is something else, bypass all logic and return it directly.
:param raw_data: Data to be processed.
:param content_type: How to parse if raw_data is a string/bytes.
:raises JSONDecodeError: If JSON is requested and parsing is impossible.
:raises UnicodeDecodeError: If bytes is not UTF8
"""
# This avoids a circular dependency. We might want to consider RawDesializer is more generic
# than the pipeline concept, and put it in a toolbox, used both here and in pipeline. TBD.
from .pipeline.universal import RawDeserializer
# Assume this is enough to detect a Pipeline Response without importing it
context = getattr(raw_data, "context", {})
if context:
if RawDeserializer.CONTEXT_NAME in context:
return context[RawDeserializer.CONTEXT_NAME]
raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize")
#Assume this is enough to recognize universal_http.ClientResponse without importing it
if hasattr(raw_data, "body"):
return RawDeserializer.deserialize_from_http_generics(
raw_data.text(),
raw_data.headers
)
# Assume this enough to recognize requests.Response without importing it.
if hasattr(raw_data, '_content_consumed'):
return RawDeserializer.deserialize_from_http_generics(
raw_data.text,
raw_data.headers
)
if isinstance(raw_data, (basestring, bytes)) or hasattr(raw_data, 'read'):
return RawDeserializer.deserialize_from_text(raw_data, content_type)
return raw_data | python | def _unpack_content(raw_data, content_type=None):
"""Extract the correct structure for deserialization.
If raw_data is a PipelineResponse, try to extract the result of RawDeserializer.
if we can't, raise. Your Pipeline should have a RawDeserializer.
If not a pipeline response and raw_data is bytes or string, use content-type
to decode it. If no content-type, try JSON.
If raw_data is something else, bypass all logic and return it directly.
:param raw_data: Data to be processed.
:param content_type: How to parse if raw_data is a string/bytes.
:raises JSONDecodeError: If JSON is requested and parsing is impossible.
:raises UnicodeDecodeError: If bytes is not UTF8
"""
# This avoids a circular dependency. We might want to consider RawDesializer is more generic
# than the pipeline concept, and put it in a toolbox, used both here and in pipeline. TBD.
from .pipeline.universal import RawDeserializer
# Assume this is enough to detect a Pipeline Response without importing it
context = getattr(raw_data, "context", {})
if context:
if RawDeserializer.CONTEXT_NAME in context:
return context[RawDeserializer.CONTEXT_NAME]
raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize")
#Assume this is enough to recognize universal_http.ClientResponse without importing it
if hasattr(raw_data, "body"):
return RawDeserializer.deserialize_from_http_generics(
raw_data.text(),
raw_data.headers
)
# Assume this enough to recognize requests.Response without importing it.
if hasattr(raw_data, '_content_consumed'):
return RawDeserializer.deserialize_from_http_generics(
raw_data.text,
raw_data.headers
)
if isinstance(raw_data, (basestring, bytes)) or hasattr(raw_data, 'read'):
return RawDeserializer.deserialize_from_text(raw_data, content_type)
return raw_data | ['def', '_unpack_content', '(', 'raw_data', ',', 'content_type', '=', 'None', ')', ':', '# This avoids a circular dependency. We might want to consider RawDesializer is more generic', '# than the pipeline concept, and put it in a toolbox, used both here and in pipeline. TBD.', 'from', '.', 'pipeline', '.', 'universal', 'import', 'RawDeserializer', '# Assume this is enough to detect a Pipeline Response without importing it', 'context', '=', 'getattr', '(', 'raw_data', ',', '"context"', ',', '{', '}', ')', 'if', 'context', ':', 'if', 'RawDeserializer', '.', 'CONTEXT_NAME', 'in', 'context', ':', 'return', 'context', '[', 'RawDeserializer', '.', 'CONTEXT_NAME', ']', 'raise', 'ValueError', '(', '"This pipeline didn\'t have the RawDeserializer policy; can\'t deserialize"', ')', '#Assume this is enough to recognize universal_http.ClientResponse without importing it', 'if', 'hasattr', '(', 'raw_data', ',', '"body"', ')', ':', 'return', 'RawDeserializer', '.', 'deserialize_from_http_generics', '(', 'raw_data', '.', 'text', '(', ')', ',', 'raw_data', '.', 'headers', ')', '# Assume this enough to recognize requests.Response without importing it.', 'if', 'hasattr', '(', 'raw_data', ',', "'_content_consumed'", ')', ':', 'return', 'RawDeserializer', '.', 'deserialize_from_http_generics', '(', 'raw_data', '.', 'text', ',', 'raw_data', '.', 'headers', ')', 'if', 'isinstance', '(', 'raw_data', ',', '(', 'basestring', ',', 'bytes', ')', ')', 'or', 'hasattr', '(', 'raw_data', ',', "'read'", ')', ':', 'return', 'RawDeserializer', '.', 'deserialize_from_text', '(', 'raw_data', ',', 'content_type', ')', 'return', 'raw_data'] | Extract the correct structure for deserialization.
If raw_data is a PipelineResponse, try to extract the result of RawDeserializer.
if we can't, raise. Your Pipeline should have a RawDeserializer.
If not a pipeline response and raw_data is bytes or string, use content-type
to decode it. If no content-type, try JSON.
If raw_data is something else, bypass all logic and return it directly.
:param raw_data: Data to be processed.
:param content_type: How to parse if raw_data is a string/bytes.
:raises JSONDecodeError: If JSON is requested and parsing is impossible.
:raises UnicodeDecodeError: If bytes is not UTF8 | ['Extract', 'the', 'correct', 'structure', 'for', 'deserialization', '.'] | train | https://github.com/Azure/msrest-for-python/blob/0732bc90bdb290e5f58c675ffdd7dbfa9acefc93/msrest/serialization.py#L1339-L1382 |
4,007 | duniter/duniter-python-api | duniterpy/key/ascii_armor.py | AsciiArmor.create | def create(message: str, pubkey: Optional[str] = None, signing_keys: Optional[List[SigningKey]] = None,
message_comment: Optional[str] = None, signatures_comment: Optional[str] = None) -> str:
"""
Encrypt a message in ascii armor format, optionally signing it
:param message: Utf-8 message
:param pubkey: Public key of recipient for encryption
:param signing_keys: Optional list of SigningKey instances
:param message_comment: Optional message comment field
:param signatures_comment: Optional signatures comment field
:return:
"""
# if no public key and no signing key...
if not pubkey and not signing_keys:
# We can not create an Ascii Armor Message
raise MISSING_PUBLIC_KEY_AND_SIGNING_KEY_EXCEPTION
# keep only one newline at the end of the message
message = message.rstrip("\n\r") + "\n"
# create block with headers
ascii_armor_block = """{begin_message_header}
""".format(begin_message_header=BEGIN_MESSAGE_HEADER)
# if encrypted message...
if pubkey:
# add encrypted message fields
ascii_armor_block += """{version_field}
""".format(version_field=AsciiArmor._get_version_field())
# add message comment if specified
if message_comment:
ascii_armor_block += """{comment_field}
""".format(comment_field=AsciiArmor._get_comment_field(message_comment))
# blank line separator
ascii_armor_block += '\n'
if pubkey:
# add encrypted message
pubkey_instance = PublicKey(pubkey)
base64_encrypted_message = base64.b64encode(pubkey_instance.encrypt_seal(message)) # type: bytes
ascii_armor_block += """{base64_encrypted_message}
""".format(base64_encrypted_message=base64_encrypted_message.decode('utf-8'))
else:
# remove trailing spaces
message = AsciiArmor._remove_trailing_spaces(message)
# add dash escaped message to ascii armor content
ascii_armor_block += AsciiArmor._dash_escape_text(message)
# if no signature...
if signing_keys is None:
# add message tail
ascii_armor_block += END_MESSAGE_HEADER
else:
# add signature blocks and close block on last signature
count = 1
for signing_key in signing_keys:
ascii_armor_block += AsciiArmor._get_signature_block(message, signing_key, count == len(signing_keys),
signatures_comment)
count += 1
return ascii_armor_block | python | def create(message: str, pubkey: Optional[str] = None, signing_keys: Optional[List[SigningKey]] = None,
message_comment: Optional[str] = None, signatures_comment: Optional[str] = None) -> str:
"""
Encrypt a message in ascii armor format, optionally signing it
:param message: Utf-8 message
:param pubkey: Public key of recipient for encryption
:param signing_keys: Optional list of SigningKey instances
:param message_comment: Optional message comment field
:param signatures_comment: Optional signatures comment field
:return:
"""
# if no public key and no signing key...
if not pubkey and not signing_keys:
# We can not create an Ascii Armor Message
raise MISSING_PUBLIC_KEY_AND_SIGNING_KEY_EXCEPTION
# keep only one newline at the end of the message
message = message.rstrip("\n\r") + "\n"
# create block with headers
ascii_armor_block = """{begin_message_header}
""".format(begin_message_header=BEGIN_MESSAGE_HEADER)
# if encrypted message...
if pubkey:
# add encrypted message fields
ascii_armor_block += """{version_field}
""".format(version_field=AsciiArmor._get_version_field())
# add message comment if specified
if message_comment:
ascii_armor_block += """{comment_field}
""".format(comment_field=AsciiArmor._get_comment_field(message_comment))
# blank line separator
ascii_armor_block += '\n'
if pubkey:
# add encrypted message
pubkey_instance = PublicKey(pubkey)
base64_encrypted_message = base64.b64encode(pubkey_instance.encrypt_seal(message)) # type: bytes
ascii_armor_block += """{base64_encrypted_message}
""".format(base64_encrypted_message=base64_encrypted_message.decode('utf-8'))
else:
# remove trailing spaces
message = AsciiArmor._remove_trailing_spaces(message)
# add dash escaped message to ascii armor content
ascii_armor_block += AsciiArmor._dash_escape_text(message)
# if no signature...
if signing_keys is None:
# add message tail
ascii_armor_block += END_MESSAGE_HEADER
else:
# add signature blocks and close block on last signature
count = 1
for signing_key in signing_keys:
ascii_armor_block += AsciiArmor._get_signature_block(message, signing_key, count == len(signing_keys),
signatures_comment)
count += 1
return ascii_armor_block | ['def', 'create', '(', 'message', ':', 'str', ',', 'pubkey', ':', 'Optional', '[', 'str', ']', '=', 'None', ',', 'signing_keys', ':', 'Optional', '[', 'List', '[', 'SigningKey', ']', ']', '=', 'None', ',', 'message_comment', ':', 'Optional', '[', 'str', ']', '=', 'None', ',', 'signatures_comment', ':', 'Optional', '[', 'str', ']', '=', 'None', ')', '->', 'str', ':', '# if no public key and no signing key...', 'if', 'not', 'pubkey', 'and', 'not', 'signing_keys', ':', '# We can not create an Ascii Armor Message', 'raise', 'MISSING_PUBLIC_KEY_AND_SIGNING_KEY_EXCEPTION', '# keep only one newline at the end of the message', 'message', '=', 'message', '.', 'rstrip', '(', '"\\n\\r"', ')', '+', '"\\n"', '# create block with headers', 'ascii_armor_block', '=', '"""{begin_message_header}\n"""', '.', 'format', '(', 'begin_message_header', '=', 'BEGIN_MESSAGE_HEADER', ')', '# if encrypted message...', 'if', 'pubkey', ':', '# add encrypted message fields', 'ascii_armor_block', '+=', '"""{version_field}\n"""', '.', 'format', '(', 'version_field', '=', 'AsciiArmor', '.', '_get_version_field', '(', ')', ')', '# add message comment if specified', 'if', 'message_comment', ':', 'ascii_armor_block', '+=', '"""{comment_field}\n"""', '.', 'format', '(', 'comment_field', '=', 'AsciiArmor', '.', '_get_comment_field', '(', 'message_comment', ')', ')', '# blank line separator', 'ascii_armor_block', '+=', "'\\n'", 'if', 'pubkey', ':', '# add encrypted message', 'pubkey_instance', '=', 'PublicKey', '(', 'pubkey', ')', 'base64_encrypted_message', '=', 'base64', '.', 'b64encode', '(', 'pubkey_instance', '.', 'encrypt_seal', '(', 'message', ')', ')', '# type: bytes', 'ascii_armor_block', '+=', '"""{base64_encrypted_message}\n"""', '.', 'format', '(', 'base64_encrypted_message', '=', 'base64_encrypted_message', '.', 'decode', '(', "'utf-8'", ')', ')', 'else', ':', '# remove trailing spaces', 'message', '=', 'AsciiArmor', '.', '_remove_trailing_spaces', '(', 'message', ')', '# add dash escaped message to ascii armor content', 'ascii_armor_block', '+=', 'AsciiArmor', '.', '_dash_escape_text', '(', 'message', ')', '# if no signature...', 'if', 'signing_keys', 'is', 'None', ':', '# add message tail', 'ascii_armor_block', '+=', 'END_MESSAGE_HEADER', 'else', ':', '# add signature blocks and close block on last signature', 'count', '=', '1', 'for', 'signing_key', 'in', 'signing_keys', ':', 'ascii_armor_block', '+=', 'AsciiArmor', '.', '_get_signature_block', '(', 'message', ',', 'signing_key', ',', 'count', '==', 'len', '(', 'signing_keys', ')', ',', 'signatures_comment', ')', 'count', '+=', '1', 'return', 'ascii_armor_block'] | Encrypt a message in ascii armor format, optionally signing it
:param message: Utf-8 message
:param pubkey: Public key of recipient for encryption
:param signing_keys: Optional list of SigningKey instances
:param message_comment: Optional message comment field
:param signatures_comment: Optional signatures comment field
:return: | ['Encrypt', 'a', 'message', 'in', 'ascii', 'armor', 'format', 'optionally', 'signing', 'it'] | train | https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/key/ascii_armor.py#L67-L130 |
4,008 | edibledinos/pwnypack | pwnypack/codec.py | find_xor_mask | def find_xor_mask(data, alphabet=None, max_depth=3, min_depth=0, iv=None):
"""
Produce a series of bytestrings that when XORed together end up being
equal to ``data`` and only contain characters from the giving
``alphabet``. The initial state (or previous state) can be given as
``iv``.
Arguments:
data (bytes): The data to recreate as a series of XOR operations.
alphabet (bytes): The bytestring containing the allowed characters
for the XOR values. If ``None``, all characters except NUL bytes,
carriage returns and newlines will be allowed.
max_depth (int): The maximum depth to look for a solution.
min_depth (int): The minimum depth to look for a solution.
iv (bytes): Initialization vector. If ``None``, it will be assumed the
operation starts at an all zero string.
Returns:
A list of bytestrings that, when XOR'ed with ``iv`` (or just eachother
if ``iv` is not providede) will be the same as ``data``.
Examples:
Produce a series of strings that when XORed together will result in
the string 'pwnypack' using only ASCII characters in the range 65 to
96:
>>> from pwny import *
>>> find_xor_mask('pwnypack', alphabet=''.join(chr(c) for c in range(65, 97)))
[b'````````', b'AAAAABAA', b'QVOXQCBJ']
>>> xor(xor(b'````````', b'AAAAABAA'), b'QVOXQCBJ')
'pwnypack'
"""
if alphabet is None:
alphabet = set(i for i in range(256) if i not in (0, 10, 13))
else:
alphabet = set(six.iterbytes(alphabet))
if iv is None:
iv = b'\0' * len(data)
if len(data) != len(iv):
raise ValueError('length of iv differs from data')
if not min_depth and data == iv:
return []
data = xor(data, iv)
# Pre-flight check to see if we have all the bits we need.
mask = 0
for ch in alphabet:
mask |= ch
mask = ~mask
# Map all bytes in data into a {byte: [pos...]} dictionary, check
# if we have enough bits along the way.
data_map_tmpl = {}
for i, ch in enumerate(six.iterbytes(data)):
if ch & mask:
raise ValueError('Alphabet does not contain enough bits.')
data_map_tmpl.setdefault(ch, []).append(i)
# Let's try to find a solution.
for depth in range(max(min_depth, 1), max_depth + 1):
# Prepare for round.
data_map = data_map_tmpl.copy()
results = [[None] * len(data) for _ in range(depth)]
for values in itertools.product(*([alphabet] * (depth - 1))):
# Prepare cumulative mask for this combination of alphabet.
mask = 0
for value in values:
mask ^= value
for ch in list(data_map):
r = ch ^ mask
if r in alphabet:
# Found a solution for this character, mark the result.
pos = data_map.pop(ch)
for p in pos:
results[0][p] = r
for i, value in enumerate(values):
results[i + 1][p] = value
if not data_map:
# Aaaand.. We're done!
return [
b''.join(six.int2byte(b) for b in r)
for r in results
]
# No solution found at this depth. Increase depth, try again.
raise ValueError('No solution found.') | python | def find_xor_mask(data, alphabet=None, max_depth=3, min_depth=0, iv=None):
"""
Produce a series of bytestrings that when XORed together end up being
equal to ``data`` and only contain characters from the giving
``alphabet``. The initial state (or previous state) can be given as
``iv``.
Arguments:
data (bytes): The data to recreate as a series of XOR operations.
alphabet (bytes): The bytestring containing the allowed characters
for the XOR values. If ``None``, all characters except NUL bytes,
carriage returns and newlines will be allowed.
max_depth (int): The maximum depth to look for a solution.
min_depth (int): The minimum depth to look for a solution.
iv (bytes): Initialization vector. If ``None``, it will be assumed the
operation starts at an all zero string.
Returns:
A list of bytestrings that, when XOR'ed with ``iv`` (or just eachother
if ``iv` is not providede) will be the same as ``data``.
Examples:
Produce a series of strings that when XORed together will result in
the string 'pwnypack' using only ASCII characters in the range 65 to
96:
>>> from pwny import *
>>> find_xor_mask('pwnypack', alphabet=''.join(chr(c) for c in range(65, 97)))
[b'````````', b'AAAAABAA', b'QVOXQCBJ']
>>> xor(xor(b'````````', b'AAAAABAA'), b'QVOXQCBJ')
'pwnypack'
"""
if alphabet is None:
alphabet = set(i for i in range(256) if i not in (0, 10, 13))
else:
alphabet = set(six.iterbytes(alphabet))
if iv is None:
iv = b'\0' * len(data)
if len(data) != len(iv):
raise ValueError('length of iv differs from data')
if not min_depth and data == iv:
return []
data = xor(data, iv)
# Pre-flight check to see if we have all the bits we need.
mask = 0
for ch in alphabet:
mask |= ch
mask = ~mask
# Map all bytes in data into a {byte: [pos...]} dictionary, check
# if we have enough bits along the way.
data_map_tmpl = {}
for i, ch in enumerate(six.iterbytes(data)):
if ch & mask:
raise ValueError('Alphabet does not contain enough bits.')
data_map_tmpl.setdefault(ch, []).append(i)
# Let's try to find a solution.
for depth in range(max(min_depth, 1), max_depth + 1):
# Prepare for round.
data_map = data_map_tmpl.copy()
results = [[None] * len(data) for _ in range(depth)]
for values in itertools.product(*([alphabet] * (depth - 1))):
# Prepare cumulative mask for this combination of alphabet.
mask = 0
for value in values:
mask ^= value
for ch in list(data_map):
r = ch ^ mask
if r in alphabet:
# Found a solution for this character, mark the result.
pos = data_map.pop(ch)
for p in pos:
results[0][p] = r
for i, value in enumerate(values):
results[i + 1][p] = value
if not data_map:
# Aaaand.. We're done!
return [
b''.join(six.int2byte(b) for b in r)
for r in results
]
# No solution found at this depth. Increase depth, try again.
raise ValueError('No solution found.') | ['def', 'find_xor_mask', '(', 'data', ',', 'alphabet', '=', 'None', ',', 'max_depth', '=', '3', ',', 'min_depth', '=', '0', ',', 'iv', '=', 'None', ')', ':', 'if', 'alphabet', 'is', 'None', ':', 'alphabet', '=', 'set', '(', 'i', 'for', 'i', 'in', 'range', '(', '256', ')', 'if', 'i', 'not', 'in', '(', '0', ',', '10', ',', '13', ')', ')', 'else', ':', 'alphabet', '=', 'set', '(', 'six', '.', 'iterbytes', '(', 'alphabet', ')', ')', 'if', 'iv', 'is', 'None', ':', 'iv', '=', "b'\\0'", '*', 'len', '(', 'data', ')', 'if', 'len', '(', 'data', ')', '!=', 'len', '(', 'iv', ')', ':', 'raise', 'ValueError', '(', "'length of iv differs from data'", ')', 'if', 'not', 'min_depth', 'and', 'data', '==', 'iv', ':', 'return', '[', ']', 'data', '=', 'xor', '(', 'data', ',', 'iv', ')', '# Pre-flight check to see if we have all the bits we need.', 'mask', '=', '0', 'for', 'ch', 'in', 'alphabet', ':', 'mask', '|=', 'ch', 'mask', '=', '~', 'mask', '# Map all bytes in data into a {byte: [pos...]} dictionary, check', '# if we have enough bits along the way.', 'data_map_tmpl', '=', '{', '}', 'for', 'i', ',', 'ch', 'in', 'enumerate', '(', 'six', '.', 'iterbytes', '(', 'data', ')', ')', ':', 'if', 'ch', '&', 'mask', ':', 'raise', 'ValueError', '(', "'Alphabet does not contain enough bits.'", ')', 'data_map_tmpl', '.', 'setdefault', '(', 'ch', ',', '[', ']', ')', '.', 'append', '(', 'i', ')', "# Let's try to find a solution.", 'for', 'depth', 'in', 'range', '(', 'max', '(', 'min_depth', ',', '1', ')', ',', 'max_depth', '+', '1', ')', ':', '# Prepare for round.', 'data_map', '=', 'data_map_tmpl', '.', 'copy', '(', ')', 'results', '=', '[', '[', 'None', ']', '*', 'len', '(', 'data', ')', 'for', '_', 'in', 'range', '(', 'depth', ')', ']', 'for', 'values', 'in', 'itertools', '.', 'product', '(', '*', '(', '[', 'alphabet', ']', '*', '(', 'depth', '-', '1', ')', ')', ')', ':', '# Prepare cumulative mask for this combination of alphabet.', 'mask', '=', '0', 'for', 'value', 'in', 'values', ':', 'mask', '^=', 'value', 'for', 'ch', 'in', 'list', '(', 'data_map', ')', ':', 'r', '=', 'ch', '^', 'mask', 'if', 'r', 'in', 'alphabet', ':', '# Found a solution for this character, mark the result.', 'pos', '=', 'data_map', '.', 'pop', '(', 'ch', ')', 'for', 'p', 'in', 'pos', ':', 'results', '[', '0', ']', '[', 'p', ']', '=', 'r', 'for', 'i', ',', 'value', 'in', 'enumerate', '(', 'values', ')', ':', 'results', '[', 'i', '+', '1', ']', '[', 'p', ']', '=', 'value', 'if', 'not', 'data_map', ':', "# Aaaand.. We're done!", 'return', '[', "b''", '.', 'join', '(', 'six', '.', 'int2byte', '(', 'b', ')', 'for', 'b', 'in', 'r', ')', 'for', 'r', 'in', 'results', ']', '# No solution found at this depth. Increase depth, try again.', 'raise', 'ValueError', '(', "'No solution found.'", ')'] | Produce a series of bytestrings that when XORed together end up being
equal to ``data`` and only contain characters from the giving
``alphabet``. The initial state (or previous state) can be given as
``iv``.
Arguments:
data (bytes): The data to recreate as a series of XOR operations.
alphabet (bytes): The bytestring containing the allowed characters
for the XOR values. If ``None``, all characters except NUL bytes,
carriage returns and newlines will be allowed.
max_depth (int): The maximum depth to look for a solution.
min_depth (int): The minimum depth to look for a solution.
iv (bytes): Initialization vector. If ``None``, it will be assumed the
operation starts at an all zero string.
Returns:
A list of bytestrings that, when XOR'ed with ``iv`` (or just eachother
if ``iv` is not providede) will be the same as ``data``.
Examples:
Produce a series of strings that when XORed together will result in
the string 'pwnypack' using only ASCII characters in the range 65 to
96:
>>> from pwny import *
>>> find_xor_mask('pwnypack', alphabet=''.join(chr(c) for c in range(65, 97)))
[b'````````', b'AAAAABAA', b'QVOXQCBJ']
>>> xor(xor(b'````````', b'AAAAABAA'), b'QVOXQCBJ')
'pwnypack' | ['Produce', 'a', 'series', 'of', 'bytestrings', 'that', 'when', 'XORed', 'together', 'end', 'up', 'being', 'equal', 'to', 'data', 'and', 'only', 'contain', 'characters', 'from', 'the', 'giving', 'alphabet', '.', 'The', 'initial', 'state', '(', 'or', 'previous', 'state', ')', 'can', 'be', 'given', 'as', 'iv', '.'] | train | https://github.com/edibledinos/pwnypack/blob/e0a5a8e6ef3f4f1f7e1b91ee379711f4a49cb0e6/pwnypack/codec.py#L79-L172 |
4,009 | aws/chalice | chalice/utils.py | remove_stage_from_deployed_values | def remove_stage_from_deployed_values(key, filename):
# type: (str, str) -> None
"""Delete a top level key from the deployed JSON file."""
final_values = {} # type: Dict[str, Any]
try:
with open(filename, 'r') as f:
final_values = json.load(f)
except IOError:
# If there is no file to delete from, then this funciton is a noop.
return
try:
del final_values[key]
with open(filename, 'wb') as f:
data = serialize_to_json(final_values)
f.write(data.encode('utf-8'))
except KeyError:
# If they key didn't exist then there is nothing to remove.
pass | python | def remove_stage_from_deployed_values(key, filename):
# type: (str, str) -> None
"""Delete a top level key from the deployed JSON file."""
final_values = {} # type: Dict[str, Any]
try:
with open(filename, 'r') as f:
final_values = json.load(f)
except IOError:
# If there is no file to delete from, then this funciton is a noop.
return
try:
del final_values[key]
with open(filename, 'wb') as f:
data = serialize_to_json(final_values)
f.write(data.encode('utf-8'))
except KeyError:
# If they key didn't exist then there is nothing to remove.
pass | ['def', 'remove_stage_from_deployed_values', '(', 'key', ',', 'filename', ')', ':', '# type: (str, str) -> None', 'final_values', '=', '{', '}', '# type: Dict[str, Any]', 'try', ':', 'with', 'open', '(', 'filename', ',', "'r'", ')', 'as', 'f', ':', 'final_values', '=', 'json', '.', 'load', '(', 'f', ')', 'except', 'IOError', ':', '# If there is no file to delete from, then this funciton is a noop.', 'return', 'try', ':', 'del', 'final_values', '[', 'key', ']', 'with', 'open', '(', 'filename', ',', "'wb'", ')', 'as', 'f', ':', 'data', '=', 'serialize_to_json', '(', 'final_values', ')', 'f', '.', 'write', '(', 'data', '.', 'encode', '(', "'utf-8'", ')', ')', 'except', 'KeyError', ':', "# If they key didn't exist then there is nothing to remove.", 'pass'] | Delete a top level key from the deployed JSON file. | ['Delete', 'a', 'top', 'level', 'key', 'from', 'the', 'deployed', 'JSON', 'file', '.'] | train | https://github.com/aws/chalice/blob/10d7fb52e68bd1c52aae251c97e3939fc0190412/chalice/utils.py#L49-L67 |
4,010 | lawsie/guizero | guizero/Box.py | Box.set_border | def set_border(self, thickness, color="black"):
"""
Sets the border thickness and color.
:param int thickness:
The thickenss of the border.
:param str color:
The color of the border.
"""
self._set_tk_config("highlightthickness", thickness)
self._set_tk_config("highlightbackground", utils.convert_color(color)) | python | def set_border(self, thickness, color="black"):
"""
Sets the border thickness and color.
:param int thickness:
The thickenss of the border.
:param str color:
The color of the border.
"""
self._set_tk_config("highlightthickness", thickness)
self._set_tk_config("highlightbackground", utils.convert_color(color)) | ['def', 'set_border', '(', 'self', ',', 'thickness', ',', 'color', '=', '"black"', ')', ':', 'self', '.', '_set_tk_config', '(', '"highlightthickness"', ',', 'thickness', ')', 'self', '.', '_set_tk_config', '(', '"highlightbackground"', ',', 'utils', '.', 'convert_color', '(', 'color', ')', ')'] | Sets the border thickness and color.
:param int thickness:
The thickenss of the border.
:param str color:
The color of the border. | ['Sets', 'the', 'border', 'thickness', 'and', 'color', '.'] | train | https://github.com/lawsie/guizero/blob/84c7f0b314fa86f9fc88eb11c9a0f6c4b57155e2/guizero/Box.py#L87-L98 |
4,011 | hyperledger/indy-plenum | plenum/server/client_authn.py | ClientAuthNr.authenticate | def authenticate(self,
msg: Dict,
identifier: Optional[str] = None,
signature: Optional[str] = None,
threshold: Optional[int] = None,
key: Optional[str] = None) -> str:
"""
Authenticate the client's message with the signature provided.
:param identifier: some unique identifier; if None, then try to use
msg['identifier'] as identifier
:param signature: a utf-8 and base58 encoded signature
:param msg: the message to authenticate
:param threshold: The number of successful signature verification
:param key: The key of request for storing in internal maps
required. By default all signatures are required to be verified.
:return: the identifier; an exception of type SigningException is
raised if the signature is not valid
""" | python | def authenticate(self,
msg: Dict,
identifier: Optional[str] = None,
signature: Optional[str] = None,
threshold: Optional[int] = None,
key: Optional[str] = None) -> str:
"""
Authenticate the client's message with the signature provided.
:param identifier: some unique identifier; if None, then try to use
msg['identifier'] as identifier
:param signature: a utf-8 and base58 encoded signature
:param msg: the message to authenticate
:param threshold: The number of successful signature verification
:param key: The key of request for storing in internal maps
required. By default all signatures are required to be verified.
:return: the identifier; an exception of type SigningException is
raised if the signature is not valid
""" | ['def', 'authenticate', '(', 'self', ',', 'msg', ':', 'Dict', ',', 'identifier', ':', 'Optional', '[', 'str', ']', '=', 'None', ',', 'signature', ':', 'Optional', '[', 'str', ']', '=', 'None', ',', 'threshold', ':', 'Optional', '[', 'int', ']', '=', 'None', ',', 'key', ':', 'Optional', '[', 'str', ']', '=', 'None', ')', '->', 'str', ':'] | Authenticate the client's message with the signature provided.
:param identifier: some unique identifier; if None, then try to use
msg['identifier'] as identifier
:param signature: a utf-8 and base58 encoded signature
:param msg: the message to authenticate
:param threshold: The number of successful signature verification
:param key: The key of request for storing in internal maps
required. By default all signatures are required to be verified.
:return: the identifier; an exception of type SigningException is
raised if the signature is not valid | ['Authenticate', 'the', 'client', 's', 'message', 'with', 'the', 'signature', 'provided', '.'] | train | https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/client_authn.py#L31-L49 |
4,012 | UCL-INGI/INGInious | inginious/frontend/task_problems.py | DisplayableMultipleChoiceProblem.show_input | def show_input(self, template_helper, language, seed):
""" Show multiple choice problems """
choices = []
limit = self._limit
if limit == 0:
limit = len(self._choices) # no limit
rand = Random("{}#{}#{}".format(self.get_task().get_id(), self.get_id(), seed))
# Ensure that the choices are random
# we *do* need to copy the choices here
random_order_choices = list(self._choices)
rand.shuffle(random_order_choices)
if self._multiple:
# take only the valid choices in the first pass
for entry in random_order_choices:
if entry['valid']:
choices.append(entry)
limit = limit - 1
# take everything else in a second pass
for entry in random_order_choices:
if limit == 0:
break
if not entry['valid']:
choices.append(entry)
limit = limit - 1
else:
# need to have ONE valid entry
for entry in random_order_choices:
if not entry['valid'] and limit > 1:
choices.append(entry)
limit = limit - 1
for entry in random_order_choices:
if entry['valid'] and limit > 0:
choices.append(entry)
limit = limit - 1
rand.shuffle(choices)
header = ParsableText(self.gettext(language, self._header), "rst",
translation=self._translations.get(language, gettext.NullTranslations()))
return str(DisplayableMultipleChoiceProblem.get_renderer(template_helper).tasks.multiple_choice(
self.get_id(), header, self._multiple, choices,
lambda text: ParsableText(self.gettext(language, text) if text else "", "rst",
translation=self._translations.get(language, gettext.NullTranslations())))) | python | def show_input(self, template_helper, language, seed):
""" Show multiple choice problems """
choices = []
limit = self._limit
if limit == 0:
limit = len(self._choices) # no limit
rand = Random("{}#{}#{}".format(self.get_task().get_id(), self.get_id(), seed))
# Ensure that the choices are random
# we *do* need to copy the choices here
random_order_choices = list(self._choices)
rand.shuffle(random_order_choices)
if self._multiple:
# take only the valid choices in the first pass
for entry in random_order_choices:
if entry['valid']:
choices.append(entry)
limit = limit - 1
# take everything else in a second pass
for entry in random_order_choices:
if limit == 0:
break
if not entry['valid']:
choices.append(entry)
limit = limit - 1
else:
# need to have ONE valid entry
for entry in random_order_choices:
if not entry['valid'] and limit > 1:
choices.append(entry)
limit = limit - 1
for entry in random_order_choices:
if entry['valid'] and limit > 0:
choices.append(entry)
limit = limit - 1
rand.shuffle(choices)
header = ParsableText(self.gettext(language, self._header), "rst",
translation=self._translations.get(language, gettext.NullTranslations()))
return str(DisplayableMultipleChoiceProblem.get_renderer(template_helper).tasks.multiple_choice(
self.get_id(), header, self._multiple, choices,
lambda text: ParsableText(self.gettext(language, text) if text else "", "rst",
translation=self._translations.get(language, gettext.NullTranslations())))) | ['def', 'show_input', '(', 'self', ',', 'template_helper', ',', 'language', ',', 'seed', ')', ':', 'choices', '=', '[', ']', 'limit', '=', 'self', '.', '_limit', 'if', 'limit', '==', '0', ':', 'limit', '=', 'len', '(', 'self', '.', '_choices', ')', '# no limit', 'rand', '=', 'Random', '(', '"{}#{}#{}"', '.', 'format', '(', 'self', '.', 'get_task', '(', ')', '.', 'get_id', '(', ')', ',', 'self', '.', 'get_id', '(', ')', ',', 'seed', ')', ')', '# Ensure that the choices are random', '# we *do* need to copy the choices here', 'random_order_choices', '=', 'list', '(', 'self', '.', '_choices', ')', 'rand', '.', 'shuffle', '(', 'random_order_choices', ')', 'if', 'self', '.', '_multiple', ':', '# take only the valid choices in the first pass', 'for', 'entry', 'in', 'random_order_choices', ':', 'if', 'entry', '[', "'valid'", ']', ':', 'choices', '.', 'append', '(', 'entry', ')', 'limit', '=', 'limit', '-', '1', '# take everything else in a second pass', 'for', 'entry', 'in', 'random_order_choices', ':', 'if', 'limit', '==', '0', ':', 'break', 'if', 'not', 'entry', '[', "'valid'", ']', ':', 'choices', '.', 'append', '(', 'entry', ')', 'limit', '=', 'limit', '-', '1', 'else', ':', '# need to have ONE valid entry', 'for', 'entry', 'in', 'random_order_choices', ':', 'if', 'not', 'entry', '[', "'valid'", ']', 'and', 'limit', '>', '1', ':', 'choices', '.', 'append', '(', 'entry', ')', 'limit', '=', 'limit', '-', '1', 'for', 'entry', 'in', 'random_order_choices', ':', 'if', 'entry', '[', "'valid'", ']', 'and', 'limit', '>', '0', ':', 'choices', '.', 'append', '(', 'entry', ')', 'limit', '=', 'limit', '-', '1', 'rand', '.', 'shuffle', '(', 'choices', ')', 'header', '=', 'ParsableText', '(', 'self', '.', 'gettext', '(', 'language', ',', 'self', '.', '_header', ')', ',', '"rst"', ',', 'translation', '=', 'self', '.', '_translations', '.', 'get', '(', 'language', ',', 'gettext', '.', 'NullTranslations', '(', ')', ')', ')', 'return', 'str', '(', 'DisplayableMultipleChoiceProblem', '.', 'get_renderer', '(', 'template_helper', ')', '.', 'tasks', '.', 'multiple_choice', '(', 'self', '.', 'get_id', '(', ')', ',', 'header', ',', 'self', '.', '_multiple', ',', 'choices', ',', 'lambda', 'text', ':', 'ParsableText', '(', 'self', '.', 'gettext', '(', 'language', ',', 'text', ')', 'if', 'text', 'else', '""', ',', '"rst"', ',', 'translation', '=', 'self', '.', '_translations', '.', 'get', '(', 'language', ',', 'gettext', '.', 'NullTranslations', '(', ')', ')', ')', ')', ')'] | Show multiple choice problems | ['Show', 'multiple', 'choice', 'problems'] | train | https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/task_problems.py#L153-L199 |
4,013 | thejunglejane/datums | datums/models/base.py | GhostBase.update | def update(cls, **kwargs):
'''
If a record matching the instance id already exists in the database,
update it. If a record matching the instance id does not already exist,
create a new record.
'''
q = cls._get_instance(**{'id': kwargs['id']})
if q:
for k, v in kwargs.items():
setattr(q, k, v)
_action_and_commit(q, session.add)
else:
cls.get_or_create(**kwargs) | python | def update(cls, **kwargs):
'''
If a record matching the instance id already exists in the database,
update it. If a record matching the instance id does not already exist,
create a new record.
'''
q = cls._get_instance(**{'id': kwargs['id']})
if q:
for k, v in kwargs.items():
setattr(q, k, v)
_action_and_commit(q, session.add)
else:
cls.get_or_create(**kwargs) | ['def', 'update', '(', 'cls', ',', '*', '*', 'kwargs', ')', ':', 'q', '=', 'cls', '.', '_get_instance', '(', '*', '*', '{', "'id'", ':', 'kwargs', '[', "'id'", ']', '}', ')', 'if', 'q', ':', 'for', 'k', ',', 'v', 'in', 'kwargs', '.', 'items', '(', ')', ':', 'setattr', '(', 'q', ',', 'k', ',', 'v', ')', '_action_and_commit', '(', 'q', ',', 'session', '.', 'add', ')', 'else', ':', 'cls', '.', 'get_or_create', '(', '*', '*', 'kwargs', ')'] | If a record matching the instance id already exists in the database,
update it. If a record matching the instance id does not already exist,
create a new record. | ['If', 'a', 'record', 'matching', 'the', 'instance', 'id', 'already', 'exists', 'in', 'the', 'database', 'update', 'it', '.', 'If', 'a', 'record', 'matching', 'the', 'instance', 'id', 'does', 'not', 'already', 'exist', 'create', 'a', 'new', 'record', '.'] | train | https://github.com/thejunglejane/datums/blob/2250b365e37ba952c2426edc615c1487afabae6e/datums/models/base.py#L70-L82 |
4,014 | anti1869/sunhead | src/sunhead/utils.py | parallel_results | async def parallel_results(future_map: Sequence[Tuple]) -> Dict:
"""
Run parallel execution of futures and return mapping of their results to the provided keys.
Just a neat shortcut around ``asyncio.gather()``
:param future_map: Keys to futures mapping, e.g.: ( ('nav', get_nav()), ('content, get_content()) )
:return: Dict with futures results mapped to keys {'nav': {1:2}, 'content': 'xyz'}
"""
ctx_methods = OrderedDict(future_map)
fs = list(ctx_methods.values())
results = await asyncio.gather(*fs)
results = {
key: results[idx] for idx, key in enumerate(ctx_methods.keys())
}
return results | python | async def parallel_results(future_map: Sequence[Tuple]) -> Dict:
"""
Run parallel execution of futures and return mapping of their results to the provided keys.
Just a neat shortcut around ``asyncio.gather()``
:param future_map: Keys to futures mapping, e.g.: ( ('nav', get_nav()), ('content, get_content()) )
:return: Dict with futures results mapped to keys {'nav': {1:2}, 'content': 'xyz'}
"""
ctx_methods = OrderedDict(future_map)
fs = list(ctx_methods.values())
results = await asyncio.gather(*fs)
results = {
key: results[idx] for idx, key in enumerate(ctx_methods.keys())
}
return results | ['async', 'def', 'parallel_results', '(', 'future_map', ':', 'Sequence', '[', 'Tuple', ']', ')', '->', 'Dict', ':', 'ctx_methods', '=', 'OrderedDict', '(', 'future_map', ')', 'fs', '=', 'list', '(', 'ctx_methods', '.', 'values', '(', ')', ')', 'results', '=', 'await', 'asyncio', '.', 'gather', '(', '*', 'fs', ')', 'results', '=', '{', 'key', ':', 'results', '[', 'idx', ']', 'for', 'idx', ',', 'key', 'in', 'enumerate', '(', 'ctx_methods', '.', 'keys', '(', ')', ')', '}', 'return', 'results'] | Run parallel execution of futures and return mapping of their results to the provided keys.
Just a neat shortcut around ``asyncio.gather()``
:param future_map: Keys to futures mapping, e.g.: ( ('nav', get_nav()), ('content, get_content()) )
:return: Dict with futures results mapped to keys {'nav': {1:2}, 'content': 'xyz'} | ['Run', 'parallel', 'execution', 'of', 'futures', 'and', 'return', 'mapping', 'of', 'their', 'results', 'to', 'the', 'provided', 'keys', '.', 'Just', 'a', 'neat', 'shortcut', 'around', 'asyncio', '.', 'gather', '()'] | train | https://github.com/anti1869/sunhead/blob/5117ec797a38eb82d955241d20547d125efe80f3/src/sunhead/utils.py#L99-L113 |
4,015 | rapidpro/expressions | python/temba_expressions/functions/__init__.py | FunctionManager.invoke_function | def invoke_function(self, ctx, name, arguments):
"""
Invokes the given function
:param ctx: the evaluation context
:param name: the function name (case insensitive)
:param arguments: the arguments to be passed to the function
:return: the function return value
"""
from temba_expressions import EvaluationError, conversions
# find function with given name
func = self.get_function(name)
if func is None:
raise EvaluationError("Undefined function: %s" % name)
args, varargs, defaults = self._get_arg_spec(func)
call_args = []
passed_args = list(arguments)
for arg in args:
if arg == 'ctx':
call_args.append(ctx)
elif passed_args:
call_args.append(passed_args.pop(0))
elif arg in defaults:
call_args.append(defaults[arg])
else:
raise EvaluationError("Too few arguments provided for function %s" % name)
if varargs is not None:
call_args.extend(passed_args)
passed_args = []
# any unused arguments?
if passed_args:
raise EvaluationError("Too many arguments provided for function %s" % name)
try:
return func(*call_args)
except Exception as e:
pretty_args = []
for arg in arguments:
if isinstance(arg, str):
pretty = '"%s"' % arg
else:
try:
pretty = conversions.to_string(arg, ctx)
except EvaluationError:
pretty = str(arg)
pretty_args.append(pretty)
raise EvaluationError("Error calling function %s with arguments %s" % (name, ', '.join(pretty_args)), e) | python | def invoke_function(self, ctx, name, arguments):
"""
Invokes the given function
:param ctx: the evaluation context
:param name: the function name (case insensitive)
:param arguments: the arguments to be passed to the function
:return: the function return value
"""
from temba_expressions import EvaluationError, conversions
# find function with given name
func = self.get_function(name)
if func is None:
raise EvaluationError("Undefined function: %s" % name)
args, varargs, defaults = self._get_arg_spec(func)
call_args = []
passed_args = list(arguments)
for arg in args:
if arg == 'ctx':
call_args.append(ctx)
elif passed_args:
call_args.append(passed_args.pop(0))
elif arg in defaults:
call_args.append(defaults[arg])
else:
raise EvaluationError("Too few arguments provided for function %s" % name)
if varargs is not None:
call_args.extend(passed_args)
passed_args = []
# any unused arguments?
if passed_args:
raise EvaluationError("Too many arguments provided for function %s" % name)
try:
return func(*call_args)
except Exception as e:
pretty_args = []
for arg in arguments:
if isinstance(arg, str):
pretty = '"%s"' % arg
else:
try:
pretty = conversions.to_string(arg, ctx)
except EvaluationError:
pretty = str(arg)
pretty_args.append(pretty)
raise EvaluationError("Error calling function %s with arguments %s" % (name, ', '.join(pretty_args)), e) | ['def', 'invoke_function', '(', 'self', ',', 'ctx', ',', 'name', ',', 'arguments', ')', ':', 'from', 'temba_expressions', 'import', 'EvaluationError', ',', 'conversions', '# find function with given name', 'func', '=', 'self', '.', 'get_function', '(', 'name', ')', 'if', 'func', 'is', 'None', ':', 'raise', 'EvaluationError', '(', '"Undefined function: %s"', '%', 'name', ')', 'args', ',', 'varargs', ',', 'defaults', '=', 'self', '.', '_get_arg_spec', '(', 'func', ')', 'call_args', '=', '[', ']', 'passed_args', '=', 'list', '(', 'arguments', ')', 'for', 'arg', 'in', 'args', ':', 'if', 'arg', '==', "'ctx'", ':', 'call_args', '.', 'append', '(', 'ctx', ')', 'elif', 'passed_args', ':', 'call_args', '.', 'append', '(', 'passed_args', '.', 'pop', '(', '0', ')', ')', 'elif', 'arg', 'in', 'defaults', ':', 'call_args', '.', 'append', '(', 'defaults', '[', 'arg', ']', ')', 'else', ':', 'raise', 'EvaluationError', '(', '"Too few arguments provided for function %s"', '%', 'name', ')', 'if', 'varargs', 'is', 'not', 'None', ':', 'call_args', '.', 'extend', '(', 'passed_args', ')', 'passed_args', '=', '[', ']', '# any unused arguments?', 'if', 'passed_args', ':', 'raise', 'EvaluationError', '(', '"Too many arguments provided for function %s"', '%', 'name', ')', 'try', ':', 'return', 'func', '(', '*', 'call_args', ')', 'except', 'Exception', 'as', 'e', ':', 'pretty_args', '=', '[', ']', 'for', 'arg', 'in', 'arguments', ':', 'if', 'isinstance', '(', 'arg', ',', 'str', ')', ':', 'pretty', '=', '\'"%s"\'', '%', 'arg', 'else', ':', 'try', ':', 'pretty', '=', 'conversions', '.', 'to_string', '(', 'arg', ',', 'ctx', ')', 'except', 'EvaluationError', ':', 'pretty', '=', 'str', '(', 'arg', ')', 'pretty_args', '.', 'append', '(', 'pretty', ')', 'raise', 'EvaluationError', '(', '"Error calling function %s with arguments %s"', '%', '(', 'name', ',', "', '", '.', 'join', '(', 'pretty_args', ')', ')', ',', 'e', ')'] | Invokes the given function
:param ctx: the evaluation context
:param name: the function name (case insensitive)
:param arguments: the arguments to be passed to the function
:return: the function return value | ['Invokes', 'the', 'given', 'function', ':', 'param', 'ctx', ':', 'the', 'evaluation', 'context', ':', 'param', 'name', ':', 'the', 'function', 'name', '(', 'case', 'insensitive', ')', ':', 'param', 'arguments', ':', 'the', 'arguments', 'to', 'be', 'passed', 'to', 'the', 'function', ':', 'return', ':', 'the', 'function', 'return', 'value'] | train | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/__init__.py#L29-L81 |
4,016 | tjvr/kurt | kurt/scratch14/objtable.py | decode_network | def decode_network(objects):
"""Return root object from ref-containing obj table entries"""
def resolve_ref(obj, objects=objects):
if isinstance(obj, Ref):
# first entry is 1
return objects[obj.index - 1]
else:
return obj
# Reading the ObjTable backwards somehow makes more sense.
for i in xrange(len(objects)-1, -1, -1):
obj = objects[i]
if isinstance(obj, Container):
obj.update((k, resolve_ref(v)) for (k, v) in obj.items())
elif isinstance(obj, Dictionary):
obj.value = dict(
(resolve_ref(field), resolve_ref(value))
for (field, value) in obj.value.items()
)
elif isinstance(obj, dict):
obj = dict(
(resolve_ref(field), resolve_ref(value))
for (field, value) in obj.items()
)
elif isinstance(obj, list):
obj = [resolve_ref(field) for field in obj]
elif isinstance(obj, Form):
for field in obj.value:
value = getattr(obj, field)
value = resolve_ref(value)
setattr(obj, field, value)
elif isinstance(obj, ContainsRefs):
obj.value = [resolve_ref(field) for field in obj.value]
objects[i] = obj
for obj in objects:
if isinstance(obj, Form):
obj.built()
root = objects[0]
return root | python | def decode_network(objects):
"""Return root object from ref-containing obj table entries"""
def resolve_ref(obj, objects=objects):
if isinstance(obj, Ref):
# first entry is 1
return objects[obj.index - 1]
else:
return obj
# Reading the ObjTable backwards somehow makes more sense.
for i in xrange(len(objects)-1, -1, -1):
obj = objects[i]
if isinstance(obj, Container):
obj.update((k, resolve_ref(v)) for (k, v) in obj.items())
elif isinstance(obj, Dictionary):
obj.value = dict(
(resolve_ref(field), resolve_ref(value))
for (field, value) in obj.value.items()
)
elif isinstance(obj, dict):
obj = dict(
(resolve_ref(field), resolve_ref(value))
for (field, value) in obj.items()
)
elif isinstance(obj, list):
obj = [resolve_ref(field) for field in obj]
elif isinstance(obj, Form):
for field in obj.value:
value = getattr(obj, field)
value = resolve_ref(value)
setattr(obj, field, value)
elif isinstance(obj, ContainsRefs):
obj.value = [resolve_ref(field) for field in obj.value]
objects[i] = obj
for obj in objects:
if isinstance(obj, Form):
obj.built()
root = objects[0]
return root | ['def', 'decode_network', '(', 'objects', ')', ':', 'def', 'resolve_ref', '(', 'obj', ',', 'objects', '=', 'objects', ')', ':', 'if', 'isinstance', '(', 'obj', ',', 'Ref', ')', ':', '# first entry is 1', 'return', 'objects', '[', 'obj', '.', 'index', '-', '1', ']', 'else', ':', 'return', 'obj', '# Reading the ObjTable backwards somehow makes more sense.', 'for', 'i', 'in', 'xrange', '(', 'len', '(', 'objects', ')', '-', '1', ',', '-', '1', ',', '-', '1', ')', ':', 'obj', '=', 'objects', '[', 'i', ']', 'if', 'isinstance', '(', 'obj', ',', 'Container', ')', ':', 'obj', '.', 'update', '(', '(', 'k', ',', 'resolve_ref', '(', 'v', ')', ')', 'for', '(', 'k', ',', 'v', ')', 'in', 'obj', '.', 'items', '(', ')', ')', 'elif', 'isinstance', '(', 'obj', ',', 'Dictionary', ')', ':', 'obj', '.', 'value', '=', 'dict', '(', '(', 'resolve_ref', '(', 'field', ')', ',', 'resolve_ref', '(', 'value', ')', ')', 'for', '(', 'field', ',', 'value', ')', 'in', 'obj', '.', 'value', '.', 'items', '(', ')', ')', 'elif', 'isinstance', '(', 'obj', ',', 'dict', ')', ':', 'obj', '=', 'dict', '(', '(', 'resolve_ref', '(', 'field', ')', ',', 'resolve_ref', '(', 'value', ')', ')', 'for', '(', 'field', ',', 'value', ')', 'in', 'obj', '.', 'items', '(', ')', ')', 'elif', 'isinstance', '(', 'obj', ',', 'list', ')', ':', 'obj', '=', '[', 'resolve_ref', '(', 'field', ')', 'for', 'field', 'in', 'obj', ']', 'elif', 'isinstance', '(', 'obj', ',', 'Form', ')', ':', 'for', 'field', 'in', 'obj', '.', 'value', ':', 'value', '=', 'getattr', '(', 'obj', ',', 'field', ')', 'value', '=', 'resolve_ref', '(', 'value', ')', 'setattr', '(', 'obj', ',', 'field', ',', 'value', ')', 'elif', 'isinstance', '(', 'obj', ',', 'ContainsRefs', ')', ':', 'obj', '.', 'value', '=', '[', 'resolve_ref', '(', 'field', ')', 'for', 'field', 'in', 'obj', '.', 'value', ']', 'objects', '[', 'i', ']', '=', 'obj', 'for', 'obj', 'in', 'objects', ':', 'if', 'isinstance', '(', 'obj', ',', 'Form', ')', ':', 'obj', '.', 'built', '(', ')', 'root', '=', 'objects', '[', '0', ']', 'return', 'root'] | Return root object from ref-containing obj table entries | ['Return', 'root', 'object', 'from', 'ref', '-', 'containing', 'obj', 'table', 'entries'] | train | https://github.com/tjvr/kurt/blob/fcccd80cae11dc233f6dd02b40ec9a388c62f259/kurt/scratch14/objtable.py#L251-L298 |
4,017 | nvbn/thefuck | thefuck/specific/git.py | git_support | def git_support(fn, command):
"""Resolves git aliases and supports testing for both git and hub."""
# supports GitHub's `hub` command
# which is recommended to be used with `alias git=hub`
# but at this point, shell aliases have already been resolved
if not is_app(command, 'git', 'hub'):
return False
# perform git aliases expansion
if 'trace: alias expansion:' in command.output:
search = re.search("trace: alias expansion: ([^ ]*) => ([^\n]*)",
command.output)
alias = search.group(1)
# by default git quotes everything, for example:
# 'commit' '--amend'
# which is surprising and does not allow to easily test for
# eg. 'git commit'
expansion = ' '.join(shell.quote(part)
for part in shell.split_command(search.group(2)))
new_script = command.script.replace(alias, expansion)
command = command.update(script=new_script)
return fn(command) | python | def git_support(fn, command):
"""Resolves git aliases and supports testing for both git and hub."""
# supports GitHub's `hub` command
# which is recommended to be used with `alias git=hub`
# but at this point, shell aliases have already been resolved
if not is_app(command, 'git', 'hub'):
return False
# perform git aliases expansion
if 'trace: alias expansion:' in command.output:
search = re.search("trace: alias expansion: ([^ ]*) => ([^\n]*)",
command.output)
alias = search.group(1)
# by default git quotes everything, for example:
# 'commit' '--amend'
# which is surprising and does not allow to easily test for
# eg. 'git commit'
expansion = ' '.join(shell.quote(part)
for part in shell.split_command(search.group(2)))
new_script = command.script.replace(alias, expansion)
command = command.update(script=new_script)
return fn(command) | ['def', 'git_support', '(', 'fn', ',', 'command', ')', ':', "# supports GitHub's `hub` command", '# which is recommended to be used with `alias git=hub`', '# but at this point, shell aliases have already been resolved', 'if', 'not', 'is_app', '(', 'command', ',', "'git'", ',', "'hub'", ')', ':', 'return', 'False', '# perform git aliases expansion', 'if', "'trace: alias expansion:'", 'in', 'command', '.', 'output', ':', 'search', '=', 're', '.', 'search', '(', '"trace: alias expansion: ([^ ]*) => ([^\\n]*)"', ',', 'command', '.', 'output', ')', 'alias', '=', 'search', '.', 'group', '(', '1', ')', '# by default git quotes everything, for example:', "# 'commit' '--amend'", '# which is surprising and does not allow to easily test for', "# eg. 'git commit'", 'expansion', '=', "' '", '.', 'join', '(', 'shell', '.', 'quote', '(', 'part', ')', 'for', 'part', 'in', 'shell', '.', 'split_command', '(', 'search', '.', 'group', '(', '2', ')', ')', ')', 'new_script', '=', 'command', '.', 'script', '.', 'replace', '(', 'alias', ',', 'expansion', ')', 'command', '=', 'command', '.', 'update', '(', 'script', '=', 'new_script', ')', 'return', 'fn', '(', 'command', ')'] | Resolves git aliases and supports testing for both git and hub. | ['Resolves', 'git', 'aliases', 'and', 'supports', 'testing', 'for', 'both', 'git', 'and', 'hub', '.'] | train | https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/specific/git.py#L8-L32 |
4,018 | DLR-RM/RAFCON | source/rafcon/gui/mygaphas/items/ports.py | PortView.get_port_area | def get_port_area(self, view):
"""Calculates the drawing area affected by the (hovered) port
"""
state_v = self.parent
center = self.handle.pos
margin = self.port_side_size / 4.
if self.side in [SnappedSide.LEFT, SnappedSide.RIGHT]:
height, width = self.port_size
else:
width, height = self.port_size
upper_left = center[0] - width / 2 - margin, center[1] - height / 2 - margin
lower_right = center[0] + width / 2 + margin, center[1] + height / 2 + margin
port_upper_left = view.get_matrix_i2v(state_v).transform_point(*upper_left)
port_lower_right = view.get_matrix_i2v(state_v).transform_point(*lower_right)
size = port_lower_right[0] - port_upper_left[0], port_lower_right[1] - port_upper_left[1]
return port_upper_left[0], port_upper_left[1], size[0], size[1] | python | def get_port_area(self, view):
"""Calculates the drawing area affected by the (hovered) port
"""
state_v = self.parent
center = self.handle.pos
margin = self.port_side_size / 4.
if self.side in [SnappedSide.LEFT, SnappedSide.RIGHT]:
height, width = self.port_size
else:
width, height = self.port_size
upper_left = center[0] - width / 2 - margin, center[1] - height / 2 - margin
lower_right = center[0] + width / 2 + margin, center[1] + height / 2 + margin
port_upper_left = view.get_matrix_i2v(state_v).transform_point(*upper_left)
port_lower_right = view.get_matrix_i2v(state_v).transform_point(*lower_right)
size = port_lower_right[0] - port_upper_left[0], port_lower_right[1] - port_upper_left[1]
return port_upper_left[0], port_upper_left[1], size[0], size[1] | ['def', 'get_port_area', '(', 'self', ',', 'view', ')', ':', 'state_v', '=', 'self', '.', 'parent', 'center', '=', 'self', '.', 'handle', '.', 'pos', 'margin', '=', 'self', '.', 'port_side_size', '/', '4.', 'if', 'self', '.', 'side', 'in', '[', 'SnappedSide', '.', 'LEFT', ',', 'SnappedSide', '.', 'RIGHT', ']', ':', 'height', ',', 'width', '=', 'self', '.', 'port_size', 'else', ':', 'width', ',', 'height', '=', 'self', '.', 'port_size', 'upper_left', '=', 'center', '[', '0', ']', '-', 'width', '/', '2', '-', 'margin', ',', 'center', '[', '1', ']', '-', 'height', '/', '2', '-', 'margin', 'lower_right', '=', 'center', '[', '0', ']', '+', 'width', '/', '2', '+', 'margin', ',', 'center', '[', '1', ']', '+', 'height', '/', '2', '+', 'margin', 'port_upper_left', '=', 'view', '.', 'get_matrix_i2v', '(', 'state_v', ')', '.', 'transform_point', '(', '*', 'upper_left', ')', 'port_lower_right', '=', 'view', '.', 'get_matrix_i2v', '(', 'state_v', ')', '.', 'transform_point', '(', '*', 'lower_right', ')', 'size', '=', 'port_lower_right', '[', '0', ']', '-', 'port_upper_left', '[', '0', ']', ',', 'port_lower_right', '[', '1', ']', '-', 'port_upper_left', '[', '1', ']', 'return', 'port_upper_left', '[', '0', ']', ',', 'port_upper_left', '[', '1', ']', ',', 'size', '[', '0', ']', ',', 'size', '[', '1', ']'] | Calculates the drawing area affected by the (hovered) port | ['Calculates', 'the', 'drawing', 'area', 'affected', 'by', 'the', '(', 'hovered', ')', 'port'] | train | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/mygaphas/items/ports.py#L225-L240 |
4,019 | geronimp/graftM | graftm/sequence_searcher.py | SequenceSearcher.search_and_extract_nucleotides_matching_nucleotide_database | def search_and_extract_nucleotides_matching_nucleotide_database(self,
unpack,
euk_check,
search_method,
maximum_range,
threads,
evalue,
hmmsearch_output_table,
hit_reads_fasta):
'''As per nt_db_search() except slightly lower level. Search an
input read set (unpack) and then extract the sequences that hit.
Parameters
----------
hmmsearch_output_table: str
path to hmmsearch output table
hit_reads_fasta: str
path to hit nucleotide sequences
Returns
-------
direction_information: dict
{read_1: False
...
read n: True}
where True = Forward direction
and False = Reverse direction
result: DBSearchResult object containing file locations and hit
information
'''
if search_method == "hmmsearch":
# First search the reads using the HMM
search_result, table_list = self.nhmmer(
hmmsearch_output_table,
unpack,
threads,
evalue
)
elif search_method == 'diamond':
raise Exception("Diamond searches not supported for nucelotide databases yet")
if maximum_range:
hits = self._get_read_names(
search_result, # define the span of hits
maximum_range
)
else:
hits = self._get_sequence_directions(search_result)
hit_readnames = hits.keys()
if euk_check:
euk_reads = self._check_euk_contamination(table_list)
hit_readnames = set([read for read in hit_readnames if read not in euk_reads])
hits = {key:item for key, item in hits.iteritems() if key in hit_readnames}
hit_read_count = [len(euk_reads), len(hit_readnames)]
else:
hit_read_count = [0, len(hit_readnames)]
hit_reads_fasta, direction_information = self._extract_from_raw_reads(
hit_reads_fasta,
hit_readnames,
unpack.read_file,
unpack.format(),
hits
)
if not hit_readnames:
result = DBSearchResult(None,
search_result,
hit_read_count,
None)
else:
slash_endings=self._check_for_slash_endings(hit_readnames)
result = DBSearchResult(hit_reads_fasta,
search_result,
hit_read_count,
slash_endings)
if maximum_range:
n_hits = sum([len(x["strand"]) for x in hits.values()])
else:
n_hits = len(hits)
logging.info("%s read(s) detected" % n_hits)
return result, direction_information | python | def search_and_extract_nucleotides_matching_nucleotide_database(self,
unpack,
euk_check,
search_method,
maximum_range,
threads,
evalue,
hmmsearch_output_table,
hit_reads_fasta):
'''As per nt_db_search() except slightly lower level. Search an
input read set (unpack) and then extract the sequences that hit.
Parameters
----------
hmmsearch_output_table: str
path to hmmsearch output table
hit_reads_fasta: str
path to hit nucleotide sequences
Returns
-------
direction_information: dict
{read_1: False
...
read n: True}
where True = Forward direction
and False = Reverse direction
result: DBSearchResult object containing file locations and hit
information
'''
if search_method == "hmmsearch":
# First search the reads using the HMM
search_result, table_list = self.nhmmer(
hmmsearch_output_table,
unpack,
threads,
evalue
)
elif search_method == 'diamond':
raise Exception("Diamond searches not supported for nucelotide databases yet")
if maximum_range:
hits = self._get_read_names(
search_result, # define the span of hits
maximum_range
)
else:
hits = self._get_sequence_directions(search_result)
hit_readnames = hits.keys()
if euk_check:
euk_reads = self._check_euk_contamination(table_list)
hit_readnames = set([read for read in hit_readnames if read not in euk_reads])
hits = {key:item for key, item in hits.iteritems() if key in hit_readnames}
hit_read_count = [len(euk_reads), len(hit_readnames)]
else:
hit_read_count = [0, len(hit_readnames)]
hit_reads_fasta, direction_information = self._extract_from_raw_reads(
hit_reads_fasta,
hit_readnames,
unpack.read_file,
unpack.format(),
hits
)
if not hit_readnames:
result = DBSearchResult(None,
search_result,
hit_read_count,
None)
else:
slash_endings=self._check_for_slash_endings(hit_readnames)
result = DBSearchResult(hit_reads_fasta,
search_result,
hit_read_count,
slash_endings)
if maximum_range:
n_hits = sum([len(x["strand"]) for x in hits.values()])
else:
n_hits = len(hits)
logging.info("%s read(s) detected" % n_hits)
return result, direction_information | ['def', 'search_and_extract_nucleotides_matching_nucleotide_database', '(', 'self', ',', 'unpack', ',', 'euk_check', ',', 'search_method', ',', 'maximum_range', ',', 'threads', ',', 'evalue', ',', 'hmmsearch_output_table', ',', 'hit_reads_fasta', ')', ':', 'if', 'search_method', '==', '"hmmsearch"', ':', '# First search the reads using the HMM', 'search_result', ',', 'table_list', '=', 'self', '.', 'nhmmer', '(', 'hmmsearch_output_table', ',', 'unpack', ',', 'threads', ',', 'evalue', ')', 'elif', 'search_method', '==', "'diamond'", ':', 'raise', 'Exception', '(', '"Diamond searches not supported for nucelotide databases yet"', ')', 'if', 'maximum_range', ':', 'hits', '=', 'self', '.', '_get_read_names', '(', 'search_result', ',', '# define the span of hits', 'maximum_range', ')', 'else', ':', 'hits', '=', 'self', '.', '_get_sequence_directions', '(', 'search_result', ')', 'hit_readnames', '=', 'hits', '.', 'keys', '(', ')', 'if', 'euk_check', ':', 'euk_reads', '=', 'self', '.', '_check_euk_contamination', '(', 'table_list', ')', 'hit_readnames', '=', 'set', '(', '[', 'read', 'for', 'read', 'in', 'hit_readnames', 'if', 'read', 'not', 'in', 'euk_reads', ']', ')', 'hits', '=', '{', 'key', ':', 'item', 'for', 'key', ',', 'item', 'in', 'hits', '.', 'iteritems', '(', ')', 'if', 'key', 'in', 'hit_readnames', '}', 'hit_read_count', '=', '[', 'len', '(', 'euk_reads', ')', ',', 'len', '(', 'hit_readnames', ')', ']', 'else', ':', 'hit_read_count', '=', '[', '0', ',', 'len', '(', 'hit_readnames', ')', ']', 'hit_reads_fasta', ',', 'direction_information', '=', 'self', '.', '_extract_from_raw_reads', '(', 'hit_reads_fasta', ',', 'hit_readnames', ',', 'unpack', '.', 'read_file', ',', 'unpack', '.', 'format', '(', ')', ',', 'hits', ')', 'if', 'not', 'hit_readnames', ':', 'result', '=', 'DBSearchResult', '(', 'None', ',', 'search_result', ',', 'hit_read_count', ',', 'None', ')', 'else', ':', 'slash_endings', '=', 'self', '.', '_check_for_slash_endings', '(', 'hit_readnames', ')', 'result', '=', 'DBSearchResult', '(', 'hit_reads_fasta', ',', 'search_result', ',', 'hit_read_count', ',', 'slash_endings', ')', 'if', 'maximum_range', ':', 'n_hits', '=', 'sum', '(', '[', 'len', '(', 'x', '[', '"strand"', ']', ')', 'for', 'x', 'in', 'hits', '.', 'values', '(', ')', ']', ')', 'else', ':', 'n_hits', '=', 'len', '(', 'hits', ')', 'logging', '.', 'info', '(', '"%s read(s) detected"', '%', 'n_hits', ')', 'return', 'result', ',', 'direction_information'] | As per nt_db_search() except slightly lower level. Search an
input read set (unpack) and then extract the sequences that hit.
Parameters
----------
hmmsearch_output_table: str
path to hmmsearch output table
hit_reads_fasta: str
path to hit nucleotide sequences
Returns
-------
direction_information: dict
{read_1: False
...
read n: True}
where True = Forward direction
and False = Reverse direction
result: DBSearchResult object containing file locations and hit
information | ['As', 'per', 'nt_db_search', '()', 'except', 'slightly', 'lower', 'level', '.', 'Search', 'an', 'input', 'read', 'set', '(', 'unpack', ')', 'and', 'then', 'extract', 'the', 'sequences', 'that', 'hit', '.'] | train | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/sequence_searcher.py#L1015-L1108 |
4,020 | jazzband/django-push-notifications | push_notifications/apns.py | apns_send_bulk_message | def apns_send_bulk_message(
registration_ids, alert, application_id=None, certfile=None, **kwargs
):
"""
Sends an APNS notification to one or more registration_ids.
The registration_ids argument needs to be a list.
Note that if set alert should always be a string. If it is not set,
it won"t be included in the notification. You will need to pass None
to this for silent notifications.
"""
results = _apns_send(
registration_ids, alert, batch=True, application_id=application_id,
certfile=certfile, **kwargs
)
inactive_tokens = [token for token, result in results.items() if result == "Unregistered"]
models.APNSDevice.objects.filter(registration_id__in=inactive_tokens).update(active=False)
return results | python | def apns_send_bulk_message(
registration_ids, alert, application_id=None, certfile=None, **kwargs
):
"""
Sends an APNS notification to one or more registration_ids.
The registration_ids argument needs to be a list.
Note that if set alert should always be a string. If it is not set,
it won"t be included in the notification. You will need to pass None
to this for silent notifications.
"""
results = _apns_send(
registration_ids, alert, batch=True, application_id=application_id,
certfile=certfile, **kwargs
)
inactive_tokens = [token for token, result in results.items() if result == "Unregistered"]
models.APNSDevice.objects.filter(registration_id__in=inactive_tokens).update(active=False)
return results | ['def', 'apns_send_bulk_message', '(', 'registration_ids', ',', 'alert', ',', 'application_id', '=', 'None', ',', 'certfile', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'results', '=', '_apns_send', '(', 'registration_ids', ',', 'alert', ',', 'batch', '=', 'True', ',', 'application_id', '=', 'application_id', ',', 'certfile', '=', 'certfile', ',', '*', '*', 'kwargs', ')', 'inactive_tokens', '=', '[', 'token', 'for', 'token', ',', 'result', 'in', 'results', '.', 'items', '(', ')', 'if', 'result', '==', '"Unregistered"', ']', 'models', '.', 'APNSDevice', '.', 'objects', '.', 'filter', '(', 'registration_id__in', '=', 'inactive_tokens', ')', '.', 'update', '(', 'active', '=', 'False', ')', 'return', 'results'] | Sends an APNS notification to one or more registration_ids.
The registration_ids argument needs to be a list.
Note that if set alert should always be a string. If it is not set,
it won"t be included in the notification. You will need to pass None
to this for silent notifications. | ['Sends', 'an', 'APNS', 'notification', 'to', 'one', 'or', 'more', 'registration_ids', '.', 'The', 'registration_ids', 'argument', 'needs', 'to', 'be', 'a', 'list', '.'] | train | https://github.com/jazzband/django-push-notifications/blob/c4a0d710711fa27bfb6533c0bf3468cb67a62679/push_notifications/apns.py#L123-L141 |
4,021 | MeaningCloud/meaningcloud-python | meaningcloud/Response.py | Response.getRemainingCredits | def getRemainingCredits(self):
"""
Returns the remaining credits for the license key used after the request was made
:return:
String with remaining credits
"""
if 'status' in self._response.keys():
if (self._response['status'] is not None) and ('remaining_credits' in self._response['status'].keys()):
if self._response['status']['remaining_credits'] is not None:
return self._response['status']['remaining_credits']
else:
return ''
else:
print("Not remaining credits field\n")
else:
return None | python | def getRemainingCredits(self):
"""
Returns the remaining credits for the license key used after the request was made
:return:
String with remaining credits
"""
if 'status' in self._response.keys():
if (self._response['status'] is not None) and ('remaining_credits' in self._response['status'].keys()):
if self._response['status']['remaining_credits'] is not None:
return self._response['status']['remaining_credits']
else:
return ''
else:
print("Not remaining credits field\n")
else:
return None | ['def', 'getRemainingCredits', '(', 'self', ')', ':', 'if', "'status'", 'in', 'self', '.', '_response', '.', 'keys', '(', ')', ':', 'if', '(', 'self', '.', '_response', '[', "'status'", ']', 'is', 'not', 'None', ')', 'and', '(', "'remaining_credits'", 'in', 'self', '.', '_response', '[', "'status'", ']', '.', 'keys', '(', ')', ')', ':', 'if', 'self', '.', '_response', '[', "'status'", ']', '[', "'remaining_credits'", ']', 'is', 'not', 'None', ':', 'return', 'self', '.', '_response', '[', "'status'", ']', '[', "'remaining_credits'", ']', 'else', ':', 'return', "''", 'else', ':', 'print', '(', '"Not remaining credits field\\n"', ')', 'else', ':', 'return', 'None'] | Returns the remaining credits for the license key used after the request was made
:return:
String with remaining credits | ['Returns', 'the', 'remaining', 'credits', 'for', 'the', 'license', 'key', 'used', 'after', 'the', 'request', 'was', 'made'] | train | https://github.com/MeaningCloud/meaningcloud-python/blob/1dd76ecabeedd80c9bb14a1716d39657d645775f/meaningcloud/Response.py#L87-L104 |
4,022 | joausaga/ideascaly | ideascaly/api.py | API.vote_up_idea | def vote_up_idea(self, *args, **kwargs):
""" :allowed_param: 'ideaId', 'myVote' (optional)
"""
kwargs.update({'headers': {'content-type':'application/json'}})
return bind_api(
api=self,
path='/ideas/{ideaId}/vote/up',
method='POST',
payload_type='vote',
allowed_param=['ideaId'],
post_param=['myVote']
)(*args, **kwargs) | python | def vote_up_idea(self, *args, **kwargs):
""" :allowed_param: 'ideaId', 'myVote' (optional)
"""
kwargs.update({'headers': {'content-type':'application/json'}})
return bind_api(
api=self,
path='/ideas/{ideaId}/vote/up',
method='POST',
payload_type='vote',
allowed_param=['ideaId'],
post_param=['myVote']
)(*args, **kwargs) | ['def', 'vote_up_idea', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'kwargs', '.', 'update', '(', '{', "'headers'", ':', '{', "'content-type'", ':', "'application/json'", '}', '}', ')', 'return', 'bind_api', '(', 'api', '=', 'self', ',', 'path', '=', "'/ideas/{ideaId}/vote/up'", ',', 'method', '=', "'POST'", ',', 'payload_type', '=', "'vote'", ',', 'allowed_param', '=', '[', "'ideaId'", ']', ',', 'post_param', '=', '[', "'myVote'", ']', ')', '(', '*', 'args', ',', '*', '*', 'kwargs', ')'] | :allowed_param: 'ideaId', 'myVote' (optional) | [':', 'allowed_param', ':', 'ideaId', 'myVote', '(', 'optional', ')'] | train | https://github.com/joausaga/ideascaly/blob/8aabb7bb1ed058406a8d352a7844e183ab64e008/ideascaly/api.py#L291-L302 |
4,023 | davidmcclure/textplot | textplot/text.py | Text.term_count_buckets | def term_count_buckets(self):
"""
Returns:
dict: A dictionary that maps occurrence counts to the terms that
appear that many times in the text.
"""
buckets = {}
for term, count in self.term_counts().items():
if count in buckets: buckets[count].append(term)
else: buckets[count] = [term]
return buckets | python | def term_count_buckets(self):
"""
Returns:
dict: A dictionary that maps occurrence counts to the terms that
appear that many times in the text.
"""
buckets = {}
for term, count in self.term_counts().items():
if count in buckets: buckets[count].append(term)
else: buckets[count] = [term]
return buckets | ['def', 'term_count_buckets', '(', 'self', ')', ':', 'buckets', '=', '{', '}', 'for', 'term', ',', 'count', 'in', 'self', '.', 'term_counts', '(', ')', '.', 'items', '(', ')', ':', 'if', 'count', 'in', 'buckets', ':', 'buckets', '[', 'count', ']', '.', 'append', '(', 'term', ')', 'else', ':', 'buckets', '[', 'count', ']', '=', '[', 'term', ']', 'return', 'buckets'] | Returns:
dict: A dictionary that maps occurrence counts to the terms that
appear that many times in the text. | ['Returns', ':', 'dict', ':', 'A', 'dictionary', 'that', 'maps', 'occurrence', 'counts', 'to', 'the', 'terms', 'that', 'appear', 'that', 'many', 'times', 'in', 'the', 'text', '.'] | train | https://github.com/davidmcclure/textplot/blob/889b949a637d99097ecec44ed4bfee53b1964dee/textplot/text.py#L112-L125 |
4,024 | explosion/spaCy | bin/ud/run_eval.py | _contains_blinded_text | def _contains_blinded_text(stats_xml):
""" Heuristic to determine whether the treebank has blinded texts or not """
tree = ET.parse(stats_xml)
root = tree.getroot()
total_tokens = int(root.find('size/total/tokens').text)
unique_lemmas = int(root.find('lemmas').get('unique'))
# assume the corpus is largely blinded when there are less than 1% unique tokens
return (unique_lemmas / total_tokens) < 0.01 | python | def _contains_blinded_text(stats_xml):
""" Heuristic to determine whether the treebank has blinded texts or not """
tree = ET.parse(stats_xml)
root = tree.getroot()
total_tokens = int(root.find('size/total/tokens').text)
unique_lemmas = int(root.find('lemmas').get('unique'))
# assume the corpus is largely blinded when there are less than 1% unique tokens
return (unique_lemmas / total_tokens) < 0.01 | ['def', '_contains_blinded_text', '(', 'stats_xml', ')', ':', 'tree', '=', 'ET', '.', 'parse', '(', 'stats_xml', ')', 'root', '=', 'tree', '.', 'getroot', '(', ')', 'total_tokens', '=', 'int', '(', 'root', '.', 'find', '(', "'size/total/tokens'", ')', '.', 'text', ')', 'unique_lemmas', '=', 'int', '(', 'root', '.', 'find', '(', "'lemmas'", ')', '.', 'get', '(', "'unique'", ')', ')', '# assume the corpus is largely blinded when there are less than 1% unique tokens', 'return', '(', 'unique_lemmas', '/', 'total_tokens', ')', '<', '0.01'] | Heuristic to determine whether the treebank has blinded texts or not | ['Heuristic', 'to', 'determine', 'whether', 'the', 'treebank', 'has', 'blinded', 'texts', 'or', 'not'] | train | https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/bin/ud/run_eval.py#L71-L79 |
4,025 | wakatime/wakatime | wakatime/packages/urllib3/connectionpool.py | HTTPConnectionPool.close | def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except queue.Empty:
pass | python | def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except queue.Empty:
pass | ['def', 'close', '(', 'self', ')', ':', '# Disable access to the pool', 'old_pool', ',', 'self', '.', 'pool', '=', 'self', '.', 'pool', ',', 'None', 'try', ':', 'while', 'True', ':', 'conn', '=', 'old_pool', '.', 'get', '(', 'block', '=', 'False', ')', 'if', 'conn', ':', 'conn', '.', 'close', '(', ')', 'except', 'queue', '.', 'Empty', ':', 'pass'] | Close all pooled connections and disable the pool. | ['Close', 'all', 'pooled', 'connections', 'and', 'disable', 'the', 'pool', '.'] | train | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/urllib3/connectionpool.py#L410-L424 |
4,026 | wavefrontHQ/python-client | wavefront_api_client/models/alert.py | Alert.alert_type | def alert_type(self, alert_type):
"""Sets the alert_type of this Alert.
Alert type. # noqa: E501
:param alert_type: The alert_type of this Alert. # noqa: E501
:type: str
"""
allowed_values = ["CLASSIC", "THRESHOLD"] # noqa: E501
if alert_type not in allowed_values:
raise ValueError(
"Invalid value for `alert_type` ({0}), must be one of {1}" # noqa: E501
.format(alert_type, allowed_values)
)
self._alert_type = alert_type | python | def alert_type(self, alert_type):
"""Sets the alert_type of this Alert.
Alert type. # noqa: E501
:param alert_type: The alert_type of this Alert. # noqa: E501
:type: str
"""
allowed_values = ["CLASSIC", "THRESHOLD"] # noqa: E501
if alert_type not in allowed_values:
raise ValueError(
"Invalid value for `alert_type` ({0}), must be one of {1}" # noqa: E501
.format(alert_type, allowed_values)
)
self._alert_type = alert_type | ['def', 'alert_type', '(', 'self', ',', 'alert_type', ')', ':', 'allowed_values', '=', '[', '"CLASSIC"', ',', '"THRESHOLD"', ']', '# noqa: E501', 'if', 'alert_type', 'not', 'in', 'allowed_values', ':', 'raise', 'ValueError', '(', '"Invalid value for `alert_type` ({0}), must be one of {1}"', '# noqa: E501', '.', 'format', '(', 'alert_type', ',', 'allowed_values', ')', ')', 'self', '.', '_alert_type', '=', 'alert_type'] | Sets the alert_type of this Alert.
Alert type. # noqa: E501
:param alert_type: The alert_type of this Alert. # noqa: E501
:type: str | ['Sets', 'the', 'alert_type', 'of', 'this', 'Alert', '.'] | train | https://github.com/wavefrontHQ/python-client/blob/b0f1046a8f68c2c7d69e395f7167241f224c738a/wavefront_api_client/models/alert.py#L395-L410 |
4,027 | saltstack/salt | salt/modules/opkg.py | _create_repo | def _create_repo(line, filename):
'''
Create repo
'''
repo = {}
if line.startswith('#'):
repo['enabled'] = False
line = line[1:]
else:
repo['enabled'] = True
cols = salt.utils.args.shlex_split(line.strip())
repo['compressed'] = not cols[0] in 'src'
repo['name'] = cols[1]
repo['uri'] = cols[2]
repo['file'] = os.path.join(OPKG_CONFDIR, filename)
if len(cols) > 3:
_set_repo_options(repo, cols[3:])
return repo | python | def _create_repo(line, filename):
'''
Create repo
'''
repo = {}
if line.startswith('#'):
repo['enabled'] = False
line = line[1:]
else:
repo['enabled'] = True
cols = salt.utils.args.shlex_split(line.strip())
repo['compressed'] = not cols[0] in 'src'
repo['name'] = cols[1]
repo['uri'] = cols[2]
repo['file'] = os.path.join(OPKG_CONFDIR, filename)
if len(cols) > 3:
_set_repo_options(repo, cols[3:])
return repo | ['def', '_create_repo', '(', 'line', ',', 'filename', ')', ':', 'repo', '=', '{', '}', 'if', 'line', '.', 'startswith', '(', "'#'", ')', ':', 'repo', '[', "'enabled'", ']', '=', 'False', 'line', '=', 'line', '[', '1', ':', ']', 'else', ':', 'repo', '[', "'enabled'", ']', '=', 'True', 'cols', '=', 'salt', '.', 'utils', '.', 'args', '.', 'shlex_split', '(', 'line', '.', 'strip', '(', ')', ')', 'repo', '[', "'compressed'", ']', '=', 'not', 'cols', '[', '0', ']', 'in', "'src'", 'repo', '[', "'name'", ']', '=', 'cols', '[', '1', ']', 'repo', '[', "'uri'", ']', '=', 'cols', '[', '2', ']', 'repo', '[', "'file'", ']', '=', 'os', '.', 'path', '.', 'join', '(', 'OPKG_CONFDIR', ',', 'filename', ')', 'if', 'len', '(', 'cols', ')', '>', '3', ':', '_set_repo_options', '(', 'repo', ',', 'cols', '[', '3', ':', ']', ')', 'return', 'repo'] | Create repo | ['Create', 'repo'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/opkg.py#L1274-L1291 |
4,028 | sethmlarson/virtualbox-python | virtualbox/library.py | IMedium.resize | def resize(self, logical_size):
"""Starts resizing this medium. This means that the nominal size of the
medium is set to the new value. Both increasing and decreasing the
size is possible, and there are no safety checks, since VirtualBox
does not make any assumptions about the medium contents.
Resizing usually needs additional disk space, and possibly also
some temporary disk space. Note that resize does not create a full
temporary copy of the medium, so the additional disk space requirement
is usually much lower than using the clone operation.
This medium will be placed to :py:attr:`MediumState.locked_write`
state for the duration of this operation.
Please note that the results can be either returned straight away,
or later as the result of the background operation via the object
returned via the @a progress parameter.
in logical_size of type int
New nominal capacity of the medium in bytes.
return progress of type :class:`IProgress`
Progress object to track the operation completion.
raises :class:`VBoxErrorNotSupported`
Medium format does not support resizing.
"""
if not isinstance(logical_size, baseinteger):
raise TypeError("logical_size can only be an instance of type baseinteger")
progress = self._call("resize",
in_p=[logical_size])
progress = IProgress(progress)
return progress | python | def resize(self, logical_size):
"""Starts resizing this medium. This means that the nominal size of the
medium is set to the new value. Both increasing and decreasing the
size is possible, and there are no safety checks, since VirtualBox
does not make any assumptions about the medium contents.
Resizing usually needs additional disk space, and possibly also
some temporary disk space. Note that resize does not create a full
temporary copy of the medium, so the additional disk space requirement
is usually much lower than using the clone operation.
This medium will be placed to :py:attr:`MediumState.locked_write`
state for the duration of this operation.
Please note that the results can be either returned straight away,
or later as the result of the background operation via the object
returned via the @a progress parameter.
in logical_size of type int
New nominal capacity of the medium in bytes.
return progress of type :class:`IProgress`
Progress object to track the operation completion.
raises :class:`VBoxErrorNotSupported`
Medium format does not support resizing.
"""
if not isinstance(logical_size, baseinteger):
raise TypeError("logical_size can only be an instance of type baseinteger")
progress = self._call("resize",
in_p=[logical_size])
progress = IProgress(progress)
return progress | ['def', 'resize', '(', 'self', ',', 'logical_size', ')', ':', 'if', 'not', 'isinstance', '(', 'logical_size', ',', 'baseinteger', ')', ':', 'raise', 'TypeError', '(', '"logical_size can only be an instance of type baseinteger"', ')', 'progress', '=', 'self', '.', '_call', '(', '"resize"', ',', 'in_p', '=', '[', 'logical_size', ']', ')', 'progress', '=', 'IProgress', '(', 'progress', ')', 'return', 'progress'] | Starts resizing this medium. This means that the nominal size of the
medium is set to the new value. Both increasing and decreasing the
size is possible, and there are no safety checks, since VirtualBox
does not make any assumptions about the medium contents.
Resizing usually needs additional disk space, and possibly also
some temporary disk space. Note that resize does not create a full
temporary copy of the medium, so the additional disk space requirement
is usually much lower than using the clone operation.
This medium will be placed to :py:attr:`MediumState.locked_write`
state for the duration of this operation.
Please note that the results can be either returned straight away,
or later as the result of the background operation via the object
returned via the @a progress parameter.
in logical_size of type int
New nominal capacity of the medium in bytes.
return progress of type :class:`IProgress`
Progress object to track the operation completion.
raises :class:`VBoxErrorNotSupported`
Medium format does not support resizing. | ['Starts', 'resizing', 'this', 'medium', '.', 'This', 'means', 'that', 'the', 'nominal', 'size', 'of', 'the', 'medium', 'is', 'set', 'to', 'the', 'new', 'value', '.', 'Both', 'increasing', 'and', 'decreasing', 'the', 'size', 'is', 'possible', 'and', 'there', 'are', 'no', 'safety', 'checks', 'since', 'VirtualBox', 'does', 'not', 'make', 'any', 'assumptions', 'about', 'the', 'medium', 'contents', '.', 'Resizing', 'usually', 'needs', 'additional', 'disk', 'space', 'and', 'possibly', 'also', 'some', 'temporary', 'disk', 'space', '.', 'Note', 'that', 'resize', 'does', 'not', 'create', 'a', 'full', 'temporary', 'copy', 'of', 'the', 'medium', 'so', 'the', 'additional', 'disk', 'space', 'requirement', 'is', 'usually', 'much', 'lower', 'than', 'using', 'the', 'clone', 'operation', '.', 'This', 'medium', 'will', 'be', 'placed', 'to', ':', 'py', ':', 'attr', ':', 'MediumState', '.', 'locked_write', 'state', 'for', 'the', 'duration', 'of', 'this', 'operation', '.', 'Please', 'note', 'that', 'the', 'results', 'can', 'be', 'either', 'returned', 'straight', 'away', 'or', 'later', 'as', 'the', 'result', 'of', 'the', 'background', 'operation', 'via', 'the', 'object', 'returned', 'via', 'the', '@a', 'progress', 'parameter', '.'] | train | https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L23749-L23782 |
4,029 | estnltk/estnltk | estnltk/text.py | Text.verb_chain_texts | def verb_chain_texts(self):
"""The list of texts of ``verb_chains`` layer elements."""
if not self.is_tagged(VERB_CHAINS):
self.tag_verb_chains()
return self.texts(VERB_CHAINS) | python | def verb_chain_texts(self):
"""The list of texts of ``verb_chains`` layer elements."""
if not self.is_tagged(VERB_CHAINS):
self.tag_verb_chains()
return self.texts(VERB_CHAINS) | ['def', 'verb_chain_texts', '(', 'self', ')', ':', 'if', 'not', 'self', '.', 'is_tagged', '(', 'VERB_CHAINS', ')', ':', 'self', '.', 'tag_verb_chains', '(', ')', 'return', 'self', '.', 'texts', '(', 'VERB_CHAINS', ')'] | The list of texts of ``verb_chains`` layer elements. | ['The', 'list', 'of', 'texts', 'of', 'verb_chains', 'layer', 'elements', '.'] | train | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L1047-L1051 |
4,030 | tensorflow/tensor2tensor | tensor2tensor/trax/history.py | History.metrics_for_mode | def metrics_for_mode(self, mode):
"""Metrics available for a given mode."""
if mode not in self._values:
logging.info("Mode %s not found", mode)
return []
return sorted(list(self._values[mode].keys())) | python | def metrics_for_mode(self, mode):
"""Metrics available for a given mode."""
if mode not in self._values:
logging.info("Mode %s not found", mode)
return []
return sorted(list(self._values[mode].keys())) | ['def', 'metrics_for_mode', '(', 'self', ',', 'mode', ')', ':', 'if', 'mode', 'not', 'in', 'self', '.', '_values', ':', 'logging', '.', 'info', '(', '"Mode %s not found"', ',', 'mode', ')', 'return', '[', ']', 'return', 'sorted', '(', 'list', '(', 'self', '.', '_values', '[', 'mode', ']', '.', 'keys', '(', ')', ')', ')'] | Metrics available for a given mode. | ['Metrics', 'available', 'for', 'a', 'given', 'mode', '.'] | train | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/history.py#L70-L75 |
4,031 | gem/oq-engine | openquake/hazardlib/valid.py | probabilities | def probabilities(value, rows=0, cols=0):
"""
:param value: input string, comma separated or space separated
:param rows: the number of rows if the floats are in a matrix (0 otherwise)
:param cols: the number of columns if the floats are in a matrix (or 0
:returns: a list of probabilities
>>> probabilities('')
[]
>>> probabilities('1')
[1.0]
>>> probabilities('0.1 0.2')
[0.1, 0.2]
>>> probabilities('0.1, 0.2') # commas are ignored
[0.1, 0.2]
"""
probs = list(map(probability, value.replace(',', ' ').split()))
if rows and cols:
probs = numpy.array(probs).reshape((len(rows), len(cols)))
return probs | python | def probabilities(value, rows=0, cols=0):
"""
:param value: input string, comma separated or space separated
:param rows: the number of rows if the floats are in a matrix (0 otherwise)
:param cols: the number of columns if the floats are in a matrix (or 0
:returns: a list of probabilities
>>> probabilities('')
[]
>>> probabilities('1')
[1.0]
>>> probabilities('0.1 0.2')
[0.1, 0.2]
>>> probabilities('0.1, 0.2') # commas are ignored
[0.1, 0.2]
"""
probs = list(map(probability, value.replace(',', ' ').split()))
if rows and cols:
probs = numpy.array(probs).reshape((len(rows), len(cols)))
return probs | ['def', 'probabilities', '(', 'value', ',', 'rows', '=', '0', ',', 'cols', '=', '0', ')', ':', 'probs', '=', 'list', '(', 'map', '(', 'probability', ',', 'value', '.', 'replace', '(', "','", ',', "' '", ')', '.', 'split', '(', ')', ')', ')', 'if', 'rows', 'and', 'cols', ':', 'probs', '=', 'numpy', '.', 'array', '(', 'probs', ')', '.', 'reshape', '(', '(', 'len', '(', 'rows', ')', ',', 'len', '(', 'cols', ')', ')', ')', 'return', 'probs'] | :param value: input string, comma separated or space separated
:param rows: the number of rows if the floats are in a matrix (0 otherwise)
:param cols: the number of columns if the floats are in a matrix (or 0
:returns: a list of probabilities
>>> probabilities('')
[]
>>> probabilities('1')
[1.0]
>>> probabilities('0.1 0.2')
[0.1, 0.2]
>>> probabilities('0.1, 0.2') # commas are ignored
[0.1, 0.2] | [':', 'param', 'value', ':', 'input', 'string', 'comma', 'separated', 'or', 'space', 'separated', ':', 'param', 'rows', ':', 'the', 'number', 'of', 'rows', 'if', 'the', 'floats', 'are', 'in', 'a', 'matrix', '(', '0', 'otherwise', ')', ':', 'param', 'cols', ':', 'the', 'number', 'of', 'columns', 'if', 'the', 'floats', 'are', 'in', 'a', 'matrix', '(', 'or', '0', ':', 'returns', ':', 'a', 'list', 'of', 'probabilities'] | train | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/valid.py#L587-L606 |
4,032 | wonambi-python/wonambi | wonambi/trans/analyze.py | event_params | def event_params(segments, params, band=None, n_fft=None, slopes=None,
prep=None, parent=None):
"""Compute event parameters.
Parameters
----------
segments : instance of wonambi.trans.select.Segments
list of segments, with time series and metadata
params : dict of bool, or str
'dur', 'minamp', 'maxamp', 'ptp', 'rms', 'power', 'peakf', 'energy',
'peakef'. If 'all', a dict will be created with these keys and all
values as True, so that all parameters are returned.
band : tuple of float
band of interest for power and energy
n_fft : int
length of FFT. if shorter than input signal, signal is truncated; if
longer, signal is zero-padded to length
slopes : dict of bool
'avg_slope', 'max_slope', 'prep', 'invert'
prep : dict of bool
same keys as params. if True, segment['trans_data'] will be used as dat
parent : QMainWindow
for use with GUI only
Returns
-------
list of dict
list of segments, with time series, metadata and parameters
"""
if parent is not None:
progress = QProgressDialog('Computing parameters', 'Abort',
0, len(segments) - 1, parent)
progress.setWindowModality(Qt.ApplicationModal)
param_keys = ['dur', 'minamp', 'maxamp', 'ptp', 'rms', 'power', 'peakpf',
'energy', 'peakef']
if params == 'all':
params = {k: 1 for k in param_keys}
if prep is None:
prep = {k: 0 for k in param_keys}
if band is None:
band = (None, None)
params_out = []
evt_output = False
for i, seg in enumerate(segments):
out = dict(seg)
dat = seg['data']
if params['dur']:
out['dur'] = float(dat.number_of('time')) / dat.s_freq
evt_output = True
if params['minamp']:
dat1 = dat
if prep['minamp']:
dat1 = seg['trans_data']
out['minamp'] = math(dat1, operator=_amin, axis='time')
evt_output = True
if params['maxamp']:
dat1 = dat
if prep['maxamp']:
dat1 = seg['trans_data']
out['maxamp'] = math(dat1, operator=_amax, axis='time')
evt_output = True
if params['ptp']:
dat1 = dat
if prep['ptp']:
dat1 = seg['trans_data']
out['ptp'] = math(dat1, operator=_ptp, axis='time')
evt_output = True
if params['rms']:
dat1 = dat
if prep['rms']:
dat1 = seg['trans_data']
out['rms'] = math(dat1, operator=(square, _mean, sqrt),
axis='time')
evt_output = True
for pw, pk in [('power', 'peakpf'), ('energy', 'peakef')]:
if params[pw] or params[pk]:
evt_output = True
if prep[pw] or prep[pk]:
prep_pw, prep_pk = band_power(seg['trans_data'], band,
scaling=pw, n_fft=n_fft)
if not (prep[pw] and prep[pk]):
raw_pw, raw_pk = band_power(dat, band,
scaling=pw, n_fft=n_fft)
if prep[pw]:
out[pw] = prep_pw
else:
out[pw] = raw_pw
if prep[pk]:
out[pk] = prep_pk
else:
out[pk] = raw_pk
if slopes:
evt_output = True
out['slope'] = {}
dat1 = dat
if slopes['prep']:
dat1 = seg['trans_data']
if slopes['invert']:
dat1 = math(dat1, operator=negative, axis='time')
if slopes['avg_slope'] and slopes['max_slope']:
level = 'all'
elif slopes['avg_slope']:
level = 'average'
else:
level = 'maximum'
for chan in dat1.axis['chan'][0]:
d = dat1(chan=chan)[0]
out['slope'][chan] = get_slopes(d, dat.s_freq, level=level)
if evt_output:
timeline = dat.axis['time'][0]
out['start'] = timeline[0]
out['end'] = timeline[-1]
params_out.append(out)
if parent:
progress.setValue(i)
if progress.wasCanceled():
msg = 'Analysis canceled by user.'
parent.statusBar().showMessage(msg)
return
if parent:
progress.close()
return params_out | python | def event_params(segments, params, band=None, n_fft=None, slopes=None,
prep=None, parent=None):
"""Compute event parameters.
Parameters
----------
segments : instance of wonambi.trans.select.Segments
list of segments, with time series and metadata
params : dict of bool, or str
'dur', 'minamp', 'maxamp', 'ptp', 'rms', 'power', 'peakf', 'energy',
'peakef'. If 'all', a dict will be created with these keys and all
values as True, so that all parameters are returned.
band : tuple of float
band of interest for power and energy
n_fft : int
length of FFT. if shorter than input signal, signal is truncated; if
longer, signal is zero-padded to length
slopes : dict of bool
'avg_slope', 'max_slope', 'prep', 'invert'
prep : dict of bool
same keys as params. if True, segment['trans_data'] will be used as dat
parent : QMainWindow
for use with GUI only
Returns
-------
list of dict
list of segments, with time series, metadata and parameters
"""
if parent is not None:
progress = QProgressDialog('Computing parameters', 'Abort',
0, len(segments) - 1, parent)
progress.setWindowModality(Qt.ApplicationModal)
param_keys = ['dur', 'minamp', 'maxamp', 'ptp', 'rms', 'power', 'peakpf',
'energy', 'peakef']
if params == 'all':
params = {k: 1 for k in param_keys}
if prep is None:
prep = {k: 0 for k in param_keys}
if band is None:
band = (None, None)
params_out = []
evt_output = False
for i, seg in enumerate(segments):
out = dict(seg)
dat = seg['data']
if params['dur']:
out['dur'] = float(dat.number_of('time')) / dat.s_freq
evt_output = True
if params['minamp']:
dat1 = dat
if prep['minamp']:
dat1 = seg['trans_data']
out['minamp'] = math(dat1, operator=_amin, axis='time')
evt_output = True
if params['maxamp']:
dat1 = dat
if prep['maxamp']:
dat1 = seg['trans_data']
out['maxamp'] = math(dat1, operator=_amax, axis='time')
evt_output = True
if params['ptp']:
dat1 = dat
if prep['ptp']:
dat1 = seg['trans_data']
out['ptp'] = math(dat1, operator=_ptp, axis='time')
evt_output = True
if params['rms']:
dat1 = dat
if prep['rms']:
dat1 = seg['trans_data']
out['rms'] = math(dat1, operator=(square, _mean, sqrt),
axis='time')
evt_output = True
for pw, pk in [('power', 'peakpf'), ('energy', 'peakef')]:
if params[pw] or params[pk]:
evt_output = True
if prep[pw] or prep[pk]:
prep_pw, prep_pk = band_power(seg['trans_data'], band,
scaling=pw, n_fft=n_fft)
if not (prep[pw] and prep[pk]):
raw_pw, raw_pk = band_power(dat, band,
scaling=pw, n_fft=n_fft)
if prep[pw]:
out[pw] = prep_pw
else:
out[pw] = raw_pw
if prep[pk]:
out[pk] = prep_pk
else:
out[pk] = raw_pk
if slopes:
evt_output = True
out['slope'] = {}
dat1 = dat
if slopes['prep']:
dat1 = seg['trans_data']
if slopes['invert']:
dat1 = math(dat1, operator=negative, axis='time')
if slopes['avg_slope'] and slopes['max_slope']:
level = 'all'
elif slopes['avg_slope']:
level = 'average'
else:
level = 'maximum'
for chan in dat1.axis['chan'][0]:
d = dat1(chan=chan)[0]
out['slope'][chan] = get_slopes(d, dat.s_freq, level=level)
if evt_output:
timeline = dat.axis['time'][0]
out['start'] = timeline[0]
out['end'] = timeline[-1]
params_out.append(out)
if parent:
progress.setValue(i)
if progress.wasCanceled():
msg = 'Analysis canceled by user.'
parent.statusBar().showMessage(msg)
return
if parent:
progress.close()
return params_out | ['def', 'event_params', '(', 'segments', ',', 'params', ',', 'band', '=', 'None', ',', 'n_fft', '=', 'None', ',', 'slopes', '=', 'None', ',', 'prep', '=', 'None', ',', 'parent', '=', 'None', ')', ':', 'if', 'parent', 'is', 'not', 'None', ':', 'progress', '=', 'QProgressDialog', '(', "'Computing parameters'", ',', "'Abort'", ',', '0', ',', 'len', '(', 'segments', ')', '-', '1', ',', 'parent', ')', 'progress', '.', 'setWindowModality', '(', 'Qt', '.', 'ApplicationModal', ')', 'param_keys', '=', '[', "'dur'", ',', "'minamp'", ',', "'maxamp'", ',', "'ptp'", ',', "'rms'", ',', "'power'", ',', "'peakpf'", ',', "'energy'", ',', "'peakef'", ']', 'if', 'params', '==', "'all'", ':', 'params', '=', '{', 'k', ':', '1', 'for', 'k', 'in', 'param_keys', '}', 'if', 'prep', 'is', 'None', ':', 'prep', '=', '{', 'k', ':', '0', 'for', 'k', 'in', 'param_keys', '}', 'if', 'band', 'is', 'None', ':', 'band', '=', '(', 'None', ',', 'None', ')', 'params_out', '=', '[', ']', 'evt_output', '=', 'False', 'for', 'i', ',', 'seg', 'in', 'enumerate', '(', 'segments', ')', ':', 'out', '=', 'dict', '(', 'seg', ')', 'dat', '=', 'seg', '[', "'data'", ']', 'if', 'params', '[', "'dur'", ']', ':', 'out', '[', "'dur'", ']', '=', 'float', '(', 'dat', '.', 'number_of', '(', "'time'", ')', ')', '/', 'dat', '.', 's_freq', 'evt_output', '=', 'True', 'if', 'params', '[', "'minamp'", ']', ':', 'dat1', '=', 'dat', 'if', 'prep', '[', "'minamp'", ']', ':', 'dat1', '=', 'seg', '[', "'trans_data'", ']', 'out', '[', "'minamp'", ']', '=', 'math', '(', 'dat1', ',', 'operator', '=', '_amin', ',', 'axis', '=', "'time'", ')', 'evt_output', '=', 'True', 'if', 'params', '[', "'maxamp'", ']', ':', 'dat1', '=', 'dat', 'if', 'prep', '[', "'maxamp'", ']', ':', 'dat1', '=', 'seg', '[', "'trans_data'", ']', 'out', '[', "'maxamp'", ']', '=', 'math', '(', 'dat1', ',', 'operator', '=', '_amax', ',', 'axis', '=', "'time'", ')', 'evt_output', '=', 'True', 'if', 'params', '[', "'ptp'", ']', ':', 'dat1', '=', 'dat', 'if', 'prep', '[', "'ptp'", ']', ':', 'dat1', '=', 'seg', '[', "'trans_data'", ']', 'out', '[', "'ptp'", ']', '=', 'math', '(', 'dat1', ',', 'operator', '=', '_ptp', ',', 'axis', '=', "'time'", ')', 'evt_output', '=', 'True', 'if', 'params', '[', "'rms'", ']', ':', 'dat1', '=', 'dat', 'if', 'prep', '[', "'rms'", ']', ':', 'dat1', '=', 'seg', '[', "'trans_data'", ']', 'out', '[', "'rms'", ']', '=', 'math', '(', 'dat1', ',', 'operator', '=', '(', 'square', ',', '_mean', ',', 'sqrt', ')', ',', 'axis', '=', "'time'", ')', 'evt_output', '=', 'True', 'for', 'pw', ',', 'pk', 'in', '[', '(', "'power'", ',', "'peakpf'", ')', ',', '(', "'energy'", ',', "'peakef'", ')', ']', ':', 'if', 'params', '[', 'pw', ']', 'or', 'params', '[', 'pk', ']', ':', 'evt_output', '=', 'True', 'if', 'prep', '[', 'pw', ']', 'or', 'prep', '[', 'pk', ']', ':', 'prep_pw', ',', 'prep_pk', '=', 'band_power', '(', 'seg', '[', "'trans_data'", ']', ',', 'band', ',', 'scaling', '=', 'pw', ',', 'n_fft', '=', 'n_fft', ')', 'if', 'not', '(', 'prep', '[', 'pw', ']', 'and', 'prep', '[', 'pk', ']', ')', ':', 'raw_pw', ',', 'raw_pk', '=', 'band_power', '(', 'dat', ',', 'band', ',', 'scaling', '=', 'pw', ',', 'n_fft', '=', 'n_fft', ')', 'if', 'prep', '[', 'pw', ']', ':', 'out', '[', 'pw', ']', '=', 'prep_pw', 'else', ':', 'out', '[', 'pw', ']', '=', 'raw_pw', 'if', 'prep', '[', 'pk', ']', ':', 'out', '[', 'pk', ']', '=', 'prep_pk', 'else', ':', 'out', '[', 'pk', ']', '=', 'raw_pk', 'if', 'slopes', ':', 'evt_output', '=', 'True', 'out', '[', "'slope'", ']', '=', '{', '}', 'dat1', '=', 'dat', 'if', 'slopes', '[', "'prep'", ']', ':', 'dat1', '=', 'seg', '[', "'trans_data'", ']', 'if', 'slopes', '[', "'invert'", ']', ':', 'dat1', '=', 'math', '(', 'dat1', ',', 'operator', '=', 'negative', ',', 'axis', '=', "'time'", ')', 'if', 'slopes', '[', "'avg_slope'", ']', 'and', 'slopes', '[', "'max_slope'", ']', ':', 'level', '=', "'all'", 'elif', 'slopes', '[', "'avg_slope'", ']', ':', 'level', '=', "'average'", 'else', ':', 'level', '=', "'maximum'", 'for', 'chan', 'in', 'dat1', '.', 'axis', '[', "'chan'", ']', '[', '0', ']', ':', 'd', '=', 'dat1', '(', 'chan', '=', 'chan', ')', '[', '0', ']', 'out', '[', "'slope'", ']', '[', 'chan', ']', '=', 'get_slopes', '(', 'd', ',', 'dat', '.', 's_freq', ',', 'level', '=', 'level', ')', 'if', 'evt_output', ':', 'timeline', '=', 'dat', '.', 'axis', '[', "'time'", ']', '[', '0', ']', 'out', '[', "'start'", ']', '=', 'timeline', '[', '0', ']', 'out', '[', "'end'", ']', '=', 'timeline', '[', '-', '1', ']', 'params_out', '.', 'append', '(', 'out', ')', 'if', 'parent', ':', 'progress', '.', 'setValue', '(', 'i', ')', 'if', 'progress', '.', 'wasCanceled', '(', ')', ':', 'msg', '=', "'Analysis canceled by user.'", 'parent', '.', 'statusBar', '(', ')', '.', 'showMessage', '(', 'msg', ')', 'return', 'if', 'parent', ':', 'progress', '.', 'close', '(', ')', 'return', 'params_out'] | Compute event parameters.
Parameters
----------
segments : instance of wonambi.trans.select.Segments
list of segments, with time series and metadata
params : dict of bool, or str
'dur', 'minamp', 'maxamp', 'ptp', 'rms', 'power', 'peakf', 'energy',
'peakef'. If 'all', a dict will be created with these keys and all
values as True, so that all parameters are returned.
band : tuple of float
band of interest for power and energy
n_fft : int
length of FFT. if shorter than input signal, signal is truncated; if
longer, signal is zero-padded to length
slopes : dict of bool
'avg_slope', 'max_slope', 'prep', 'invert'
prep : dict of bool
same keys as params. if True, segment['trans_data'] will be used as dat
parent : QMainWindow
for use with GUI only
Returns
-------
list of dict
list of segments, with time series, metadata and parameters | ['Compute', 'event', 'parameters', '.', 'Parameters', '----------', 'segments', ':', 'instance', 'of', 'wonambi', '.', 'trans', '.', 'select', '.', 'Segments', 'list', 'of', 'segments', 'with', 'time', 'series', 'and', 'metadata', 'params', ':', 'dict', 'of', 'bool', 'or', 'str', 'dur', 'minamp', 'maxamp', 'ptp', 'rms', 'power', 'peakf', 'energy', 'peakef', '.', 'If', 'all', 'a', 'dict', 'will', 'be', 'created', 'with', 'these', 'keys', 'and', 'all', 'values', 'as', 'True', 'so', 'that', 'all', 'parameters', 'are', 'returned', '.', 'band', ':', 'tuple', 'of', 'float', 'band', 'of', 'interest', 'for', 'power', 'and', 'energy', 'n_fft', ':', 'int', 'length', 'of', 'FFT', '.', 'if', 'shorter', 'than', 'input', 'signal', 'signal', 'is', 'truncated', ';', 'if', 'longer', 'signal', 'is', 'zero', '-', 'padded', 'to', 'length', 'slopes', ':', 'dict', 'of', 'bool', 'avg_slope', 'max_slope', 'prep', 'invert', 'prep', ':', 'dict', 'of', 'bool', 'same', 'keys', 'as', 'params', '.', 'if', 'True', 'segment', '[', 'trans_data', ']', 'will', 'be', 'used', 'as', 'dat', 'parent', ':', 'QMainWindow', 'for', 'use', 'with', 'GUI', 'only', 'Returns', '-------', 'list', 'of', 'dict', 'list', 'of', 'segments', 'with', 'time', 'series', 'metadata', 'and', 'parameters'] | train | https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/trans/analyze.py#L25-L167 |
4,033 | kaste/mockito-python | mockito/spying.py | spy | def spy(object):
"""Spy an object.
Spying means that all functions will behave as before, so they will
be side effects, but the interactions can be verified afterwards.
Returns Dummy-like, almost empty object as proxy to `object`.
The *returned* object must be injected and used by the code under test;
after that all interactions can be verified as usual.
T.i. the original object **will not be patched**, and has no further
knowledge as before.
E.g.::
import time
time = spy(time)
# inject time
do_work(..., time)
verify(time).time()
"""
if inspect.isclass(object) or inspect.ismodule(object):
class_ = None
else:
class_ = object.__class__
class Spy(_Dummy):
if class_:
__class__ = class_
def __getattr__(self, method_name):
return RememberedProxyInvocation(theMock, method_name)
def __repr__(self):
name = 'Spied'
if class_:
name += class_.__name__
return "<%s id=%s>" % (name, id(self))
obj = Spy()
theMock = Mock(obj, strict=True, spec=object)
mock_registry.register(obj, theMock)
return obj | python | def spy(object):
"""Spy an object.
Spying means that all functions will behave as before, so they will
be side effects, but the interactions can be verified afterwards.
Returns Dummy-like, almost empty object as proxy to `object`.
The *returned* object must be injected and used by the code under test;
after that all interactions can be verified as usual.
T.i. the original object **will not be patched**, and has no further
knowledge as before.
E.g.::
import time
time = spy(time)
# inject time
do_work(..., time)
verify(time).time()
"""
if inspect.isclass(object) or inspect.ismodule(object):
class_ = None
else:
class_ = object.__class__
class Spy(_Dummy):
if class_:
__class__ = class_
def __getattr__(self, method_name):
return RememberedProxyInvocation(theMock, method_name)
def __repr__(self):
name = 'Spied'
if class_:
name += class_.__name__
return "<%s id=%s>" % (name, id(self))
obj = Spy()
theMock = Mock(obj, strict=True, spec=object)
mock_registry.register(obj, theMock)
return obj | ['def', 'spy', '(', 'object', ')', ':', 'if', 'inspect', '.', 'isclass', '(', 'object', ')', 'or', 'inspect', '.', 'ismodule', '(', 'object', ')', ':', 'class_', '=', 'None', 'else', ':', 'class_', '=', 'object', '.', '__class__', 'class', 'Spy', '(', '_Dummy', ')', ':', 'if', 'class_', ':', '__class__', '=', 'class_', 'def', '__getattr__', '(', 'self', ',', 'method_name', ')', ':', 'return', 'RememberedProxyInvocation', '(', 'theMock', ',', 'method_name', ')', 'def', '__repr__', '(', 'self', ')', ':', 'name', '=', "'Spied'", 'if', 'class_', ':', 'name', '+=', 'class_', '.', '__name__', 'return', '"<%s id=%s>"', '%', '(', 'name', ',', 'id', '(', 'self', ')', ')', 'obj', '=', 'Spy', '(', ')', 'theMock', '=', 'Mock', '(', 'obj', ',', 'strict', '=', 'True', ',', 'spec', '=', 'object', ')', 'mock_registry', '.', 'register', '(', 'obj', ',', 'theMock', ')', 'return', 'obj'] | Spy an object.
Spying means that all functions will behave as before, so they will
be side effects, but the interactions can be verified afterwards.
Returns Dummy-like, almost empty object as proxy to `object`.
The *returned* object must be injected and used by the code under test;
after that all interactions can be verified as usual.
T.i. the original object **will not be patched**, and has no further
knowledge as before.
E.g.::
import time
time = spy(time)
# inject time
do_work(..., time)
verify(time).time() | ['Spy', 'an', 'object', '.'] | train | https://github.com/kaste/mockito-python/blob/d6b22b003f56ee5b156dbd9d8ba209faf35b6713/mockito/spying.py#L33-L78 |
4,034 | valohai/ulid2 | ulid2.py | generate_ulid_as_uuid | def generate_ulid_as_uuid(timestamp=None, monotonic=False):
"""
Generate an ULID, but expressed as an UUID.
:param timestamp: An optional timestamp override.
If `None`, the current time is used.
:type timestamp: int|float|datetime.datetime|None
:param monotonic: Attempt to ensure ULIDs are monotonically increasing.
Monotonic behavior is not guaranteed when used from multiple threads.
:type monotonic: bool
:return: UUID containing ULID data.
:rtype: uuid.UUID
"""
return uuid.UUID(bytes=generate_binary_ulid(timestamp, monotonic=monotonic)) | python | def generate_ulid_as_uuid(timestamp=None, monotonic=False):
"""
Generate an ULID, but expressed as an UUID.
:param timestamp: An optional timestamp override.
If `None`, the current time is used.
:type timestamp: int|float|datetime.datetime|None
:param monotonic: Attempt to ensure ULIDs are monotonically increasing.
Monotonic behavior is not guaranteed when used from multiple threads.
:type monotonic: bool
:return: UUID containing ULID data.
:rtype: uuid.UUID
"""
return uuid.UUID(bytes=generate_binary_ulid(timestamp, monotonic=monotonic)) | ['def', 'generate_ulid_as_uuid', '(', 'timestamp', '=', 'None', ',', 'monotonic', '=', 'False', ')', ':', 'return', 'uuid', '.', 'UUID', '(', 'bytes', '=', 'generate_binary_ulid', '(', 'timestamp', ',', 'monotonic', '=', 'monotonic', ')', ')'] | Generate an ULID, but expressed as an UUID.
:param timestamp: An optional timestamp override.
If `None`, the current time is used.
:type timestamp: int|float|datetime.datetime|None
:param monotonic: Attempt to ensure ULIDs are monotonically increasing.
Monotonic behavior is not guaranteed when used from multiple threads.
:type monotonic: bool
:return: UUID containing ULID data.
:rtype: uuid.UUID | ['Generate', 'an', 'ULID', 'but', 'expressed', 'as', 'an', 'UUID', '.'] | train | https://github.com/valohai/ulid2/blob/cebc523ac70c5d5ca055c0c3de6318de617b07d7/ulid2.py#L208-L221 |
4,035 | zhanglab/psamm | psamm/fluxanalysis.py | flux_variability | def flux_variability(model, reactions, fixed, tfba, solver):
"""Find the variability of each reaction while fixing certain fluxes.
Yields the reaction id, and a tuple of minimum and maximum value for each
of the given reactions. The fixed reactions are given in a dictionary as
a reaction id to value mapping.
This is an implementation of flux variability analysis (FVA) as described
in [Mahadevan03]_.
Args:
model: MetabolicModel to solve.
reactions: Reactions on which to report variablity.
fixed: dict of additional lower bounds on reaction fluxes.
tfba: If True enable thermodynamic constraints.
solver: LP solver instance to use.
Returns:
Iterator over pairs of reaction ID and bounds. Bounds are returned as
pairs of lower and upper values.
"""
fba = _get_fba_problem(model, tfba, solver)
for reaction_id, value in iteritems(fixed):
flux = fba.get_flux_var(reaction_id)
fba.prob.add_linear_constraints(flux >= value)
def min_max_solve(reaction_id):
for direction in (-1, 1):
yield fba.flux_bound(reaction_id, direction)
# Solve for each reaction
for reaction_id in reactions:
yield reaction_id, tuple(min_max_solve(reaction_id)) | python | def flux_variability(model, reactions, fixed, tfba, solver):
"""Find the variability of each reaction while fixing certain fluxes.
Yields the reaction id, and a tuple of minimum and maximum value for each
of the given reactions. The fixed reactions are given in a dictionary as
a reaction id to value mapping.
This is an implementation of flux variability analysis (FVA) as described
in [Mahadevan03]_.
Args:
model: MetabolicModel to solve.
reactions: Reactions on which to report variablity.
fixed: dict of additional lower bounds on reaction fluxes.
tfba: If True enable thermodynamic constraints.
solver: LP solver instance to use.
Returns:
Iterator over pairs of reaction ID and bounds. Bounds are returned as
pairs of lower and upper values.
"""
fba = _get_fba_problem(model, tfba, solver)
for reaction_id, value in iteritems(fixed):
flux = fba.get_flux_var(reaction_id)
fba.prob.add_linear_constraints(flux >= value)
def min_max_solve(reaction_id):
for direction in (-1, 1):
yield fba.flux_bound(reaction_id, direction)
# Solve for each reaction
for reaction_id in reactions:
yield reaction_id, tuple(min_max_solve(reaction_id)) | ['def', 'flux_variability', '(', 'model', ',', 'reactions', ',', 'fixed', ',', 'tfba', ',', 'solver', ')', ':', 'fba', '=', '_get_fba_problem', '(', 'model', ',', 'tfba', ',', 'solver', ')', 'for', 'reaction_id', ',', 'value', 'in', 'iteritems', '(', 'fixed', ')', ':', 'flux', '=', 'fba', '.', 'get_flux_var', '(', 'reaction_id', ')', 'fba', '.', 'prob', '.', 'add_linear_constraints', '(', 'flux', '>=', 'value', ')', 'def', 'min_max_solve', '(', 'reaction_id', ')', ':', 'for', 'direction', 'in', '(', '-', '1', ',', '1', ')', ':', 'yield', 'fba', '.', 'flux_bound', '(', 'reaction_id', ',', 'direction', ')', '# Solve for each reaction', 'for', 'reaction_id', 'in', 'reactions', ':', 'yield', 'reaction_id', ',', 'tuple', '(', 'min_max_solve', '(', 'reaction_id', ')', ')'] | Find the variability of each reaction while fixing certain fluxes.
Yields the reaction id, and a tuple of minimum and maximum value for each
of the given reactions. The fixed reactions are given in a dictionary as
a reaction id to value mapping.
This is an implementation of flux variability analysis (FVA) as described
in [Mahadevan03]_.
Args:
model: MetabolicModel to solve.
reactions: Reactions on which to report variablity.
fixed: dict of additional lower bounds on reaction fluxes.
tfba: If True enable thermodynamic constraints.
solver: LP solver instance to use.
Returns:
Iterator over pairs of reaction ID and bounds. Bounds are returned as
pairs of lower and upper values. | ['Find', 'the', 'variability', 'of', 'each', 'reaction', 'while', 'fixing', 'certain', 'fluxes', '.'] | train | https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/fluxanalysis.py#L323-L357 |
4,036 | luckydonald/pytgbot | code_generation/code_generator_template.py | Function.class_name | def class_name(self) -> str:
"""
Makes the fist letter big, keep the rest of the camelCaseApiName.
"""
if not self.api_name: # empty string
return self.api_name
# end if
return self.api_name[0].upper() + self.api_name[1:] | python | def class_name(self) -> str:
"""
Makes the fist letter big, keep the rest of the camelCaseApiName.
"""
if not self.api_name: # empty string
return self.api_name
# end if
return self.api_name[0].upper() + self.api_name[1:] | ['def', 'class_name', '(', 'self', ')', '->', 'str', ':', 'if', 'not', 'self', '.', 'api_name', ':', '# empty string', 'return', 'self', '.', 'api_name', '# end if', 'return', 'self', '.', 'api_name', '[', '0', ']', '.', 'upper', '(', ')', '+', 'self', '.', 'api_name', '[', '1', ':', ']'] | Makes the fist letter big, keep the rest of the camelCaseApiName. | ['Makes', 'the', 'fist', 'letter', 'big', 'keep', 'the', 'rest', 'of', 'the', 'camelCaseApiName', '.'] | train | https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/code_generation/code_generator_template.py#L224-L231 |
4,037 | lwcook/horsetail-matching | horsetailmatching/hm.py | _matrix_grad | def _matrix_grad(q, h, h_dx, t, t_prime):
''' Returns the gradient with respect to a single variable'''
N = len(q)
W = np.zeros([N, N])
Wprime = np.zeros([N, N])
for i in range(N):
W[i, i] = 0.5*(h[min(i+1, N-1)] - h[max(i-1, 0)])
Wprime[i, i] = \
0.5*(h_dx[min(i+1, N-1)] - h_dx[max(i-1, 0)])
tgrad = np.array([t_prime[i]*h_dx[i] for i in np.arange(N)])
grad = 2.0*(q - t).T.dot(W).dot(-1.0*tgrad) \
+ (q - t).T.dot(Wprime).dot(q - t)
return grad | python | def _matrix_grad(q, h, h_dx, t, t_prime):
''' Returns the gradient with respect to a single variable'''
N = len(q)
W = np.zeros([N, N])
Wprime = np.zeros([N, N])
for i in range(N):
W[i, i] = 0.5*(h[min(i+1, N-1)] - h[max(i-1, 0)])
Wprime[i, i] = \
0.5*(h_dx[min(i+1, N-1)] - h_dx[max(i-1, 0)])
tgrad = np.array([t_prime[i]*h_dx[i] for i in np.arange(N)])
grad = 2.0*(q - t).T.dot(W).dot(-1.0*tgrad) \
+ (q - t).T.dot(Wprime).dot(q - t)
return grad | ['def', '_matrix_grad', '(', 'q', ',', 'h', ',', 'h_dx', ',', 't', ',', 't_prime', ')', ':', 'N', '=', 'len', '(', 'q', ')', 'W', '=', 'np', '.', 'zeros', '(', '[', 'N', ',', 'N', ']', ')', 'Wprime', '=', 'np', '.', 'zeros', '(', '[', 'N', ',', 'N', ']', ')', 'for', 'i', 'in', 'range', '(', 'N', ')', ':', 'W', '[', 'i', ',', 'i', ']', '=', '0.5', '*', '(', 'h', '[', 'min', '(', 'i', '+', '1', ',', 'N', '-', '1', ')', ']', '-', 'h', '[', 'max', '(', 'i', '-', '1', ',', '0', ')', ']', ')', 'Wprime', '[', 'i', ',', 'i', ']', '=', '0.5', '*', '(', 'h_dx', '[', 'min', '(', 'i', '+', '1', ',', 'N', '-', '1', ')', ']', '-', 'h_dx', '[', 'max', '(', 'i', '-', '1', ',', '0', ')', ']', ')', 'tgrad', '=', 'np', '.', 'array', '(', '[', 't_prime', '[', 'i', ']', '*', 'h_dx', '[', 'i', ']', 'for', 'i', 'in', 'np', '.', 'arange', '(', 'N', ')', ']', ')', 'grad', '=', '2.0', '*', '(', 'q', '-', 't', ')', '.', 'T', '.', 'dot', '(', 'W', ')', '.', 'dot', '(', '-', '1.0', '*', 'tgrad', ')', '+', '(', 'q', '-', 't', ')', '.', 'T', '.', 'dot', '(', 'Wprime', ')', '.', 'dot', '(', 'q', '-', 't', ')', 'return', 'grad'] | Returns the gradient with respect to a single variable | ['Returns', 'the', 'gradient', 'with', 'respect', 'to', 'a', 'single', 'variable'] | train | https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/hm.py#L938-L954 |
4,038 | splunk/splunk-sdk-python | examples/analytics/bottle.py | Bottle.close | def close(self):
''' Close the application and all installed plugins. '''
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
self.stopped = True | python | def close(self):
''' Close the application and all installed plugins. '''
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
self.stopped = True | ['def', 'close', '(', 'self', ')', ':', 'for', 'plugin', 'in', 'self', '.', 'plugins', ':', 'if', 'hasattr', '(', 'plugin', ',', "'close'", ')', ':', 'plugin', '.', 'close', '(', ')', 'self', '.', 'stopped', '=', 'True'] | Close the application and all installed plugins. | ['Close', 'the', 'application', 'and', 'all', 'installed', 'plugins', '.'] | train | https://github.com/splunk/splunk-sdk-python/blob/a245a4eeb93b3621730418008e31715912bcdcd8/examples/analytics/bottle.py#L493-L497 |
4,039 | ludeeus/pycfdns | pycfdns/__init__.py | CloudflareUpdater.get_recordInfo | def get_recordInfo(self, headers, zoneID, zone, records):
"""Get the information of the records."""
if 'None' in records: #If ['None'] in record argument, query all.
recordQueryEnpoint = '/' + zoneID + '/dns_records&per_page=100'
recordUrl = self.BASE_URL + recordQueryEnpoint
recordRequest = requests.get(recordUrl, headers=headers)
recordResponse = recordRequest.json()['result']
dev = []
num = 0
for value in recordResponse:
recordName = recordResponse[num]['name']
dev.append(recordName)
num = num + 1
records = dev
updateRecords = []
for record in records:
if zone in record:
recordFullname = record
else:
recordFullname = record + '.' + zone
recordQuery = '/' + zoneID + '/dns_records?name=' + recordFullname
recordUrl = self.BASE_URL + recordQuery
recordInfoRequest = requests.get(recordUrl, headers=headers)
recordInfoResponse = recordInfoRequest.json()['result'][0]
recordID = recordInfoResponse['id']
recordType = recordInfoResponse['type']
recordProxy = str(recordInfoResponse['proxied'])
recordContent = recordInfoResponse['content']
if recordProxy == 'True':
recordProxied = True
else:
recordProxied = False
updateRecords.append([recordID, recordFullname, recordType,
recordContent, recordProxied])
return updateRecords | python | def get_recordInfo(self, headers, zoneID, zone, records):
"""Get the information of the records."""
if 'None' in records: #If ['None'] in record argument, query all.
recordQueryEnpoint = '/' + zoneID + '/dns_records&per_page=100'
recordUrl = self.BASE_URL + recordQueryEnpoint
recordRequest = requests.get(recordUrl, headers=headers)
recordResponse = recordRequest.json()['result']
dev = []
num = 0
for value in recordResponse:
recordName = recordResponse[num]['name']
dev.append(recordName)
num = num + 1
records = dev
updateRecords = []
for record in records:
if zone in record:
recordFullname = record
else:
recordFullname = record + '.' + zone
recordQuery = '/' + zoneID + '/dns_records?name=' + recordFullname
recordUrl = self.BASE_URL + recordQuery
recordInfoRequest = requests.get(recordUrl, headers=headers)
recordInfoResponse = recordInfoRequest.json()['result'][0]
recordID = recordInfoResponse['id']
recordType = recordInfoResponse['type']
recordProxy = str(recordInfoResponse['proxied'])
recordContent = recordInfoResponse['content']
if recordProxy == 'True':
recordProxied = True
else:
recordProxied = False
updateRecords.append([recordID, recordFullname, recordType,
recordContent, recordProxied])
return updateRecords | ['def', 'get_recordInfo', '(', 'self', ',', 'headers', ',', 'zoneID', ',', 'zone', ',', 'records', ')', ':', 'if', "'None'", 'in', 'records', ':', "#If ['None'] in record argument, query all.", 'recordQueryEnpoint', '=', "'/'", '+', 'zoneID', '+', "'/dns_records&per_page=100'", 'recordUrl', '=', 'self', '.', 'BASE_URL', '+', 'recordQueryEnpoint', 'recordRequest', '=', 'requests', '.', 'get', '(', 'recordUrl', ',', 'headers', '=', 'headers', ')', 'recordResponse', '=', 'recordRequest', '.', 'json', '(', ')', '[', "'result'", ']', 'dev', '=', '[', ']', 'num', '=', '0', 'for', 'value', 'in', 'recordResponse', ':', 'recordName', '=', 'recordResponse', '[', 'num', ']', '[', "'name'", ']', 'dev', '.', 'append', '(', 'recordName', ')', 'num', '=', 'num', '+', '1', 'records', '=', 'dev', 'updateRecords', '=', '[', ']', 'for', 'record', 'in', 'records', ':', 'if', 'zone', 'in', 'record', ':', 'recordFullname', '=', 'record', 'else', ':', 'recordFullname', '=', 'record', '+', "'.'", '+', 'zone', 'recordQuery', '=', "'/'", '+', 'zoneID', '+', "'/dns_records?name='", '+', 'recordFullname', 'recordUrl', '=', 'self', '.', 'BASE_URL', '+', 'recordQuery', 'recordInfoRequest', '=', 'requests', '.', 'get', '(', 'recordUrl', ',', 'headers', '=', 'headers', ')', 'recordInfoResponse', '=', 'recordInfoRequest', '.', 'json', '(', ')', '[', "'result'", ']', '[', '0', ']', 'recordID', '=', 'recordInfoResponse', '[', "'id'", ']', 'recordType', '=', 'recordInfoResponse', '[', "'type'", ']', 'recordProxy', '=', 'str', '(', 'recordInfoResponse', '[', "'proxied'", ']', ')', 'recordContent', '=', 'recordInfoResponse', '[', "'content'", ']', 'if', 'recordProxy', '==', "'True'", ':', 'recordProxied', '=', 'True', 'else', ':', 'recordProxied', '=', 'False', 'updateRecords', '.', 'append', '(', '[', 'recordID', ',', 'recordFullname', ',', 'recordType', ',', 'recordContent', ',', 'recordProxied', ']', ')', 'return', 'updateRecords'] | Get the information of the records. | ['Get', 'the', 'information', 'of', 'the', 'records', '.'] | train | https://github.com/ludeeus/pycfdns/blob/0fd027be49d67250f85f2398d006a9409a7dae28/pycfdns/__init__.py#L33-L67 |
4,040 | saltstack/salt | salt/utils/openstack/nova.py | SaltNova.boot | def boot(self, name, flavor_id=0, image_id=0, timeout=300, **kwargs):
'''
Boot a cloud server.
'''
nt_ks = self.compute_conn
kwargs['name'] = name
kwargs['flavor'] = flavor_id
kwargs['image'] = image_id or None
ephemeral = kwargs.pop('ephemeral', [])
block_device = kwargs.pop('block_device', [])
boot_volume = kwargs.pop('boot_volume', None)
snapshot = kwargs.pop('snapshot', None)
swap = kwargs.pop('swap', None)
kwargs['block_device_mapping_v2'] = _parse_block_device_mapping_v2(
block_device=block_device, boot_volume=boot_volume, snapshot=snapshot,
ephemeral=ephemeral, swap=swap
)
response = nt_ks.servers.create(**kwargs)
self.uuid = response.id
self.password = getattr(response, 'adminPass', None)
start = time.time()
trycount = 0
while True:
trycount += 1
try:
return self.server_show_libcloud(self.uuid)
except Exception as exc:
log.debug(
'Server information not yet available: %s', exc
)
time.sleep(1)
if time.time() - start > timeout:
log.error('Timed out after %s seconds '
'while waiting for data', timeout)
return False
log.debug(
'Retrying server_show() (try %s)', trycount
) | python | def boot(self, name, flavor_id=0, image_id=0, timeout=300, **kwargs):
'''
Boot a cloud server.
'''
nt_ks = self.compute_conn
kwargs['name'] = name
kwargs['flavor'] = flavor_id
kwargs['image'] = image_id or None
ephemeral = kwargs.pop('ephemeral', [])
block_device = kwargs.pop('block_device', [])
boot_volume = kwargs.pop('boot_volume', None)
snapshot = kwargs.pop('snapshot', None)
swap = kwargs.pop('swap', None)
kwargs['block_device_mapping_v2'] = _parse_block_device_mapping_v2(
block_device=block_device, boot_volume=boot_volume, snapshot=snapshot,
ephemeral=ephemeral, swap=swap
)
response = nt_ks.servers.create(**kwargs)
self.uuid = response.id
self.password = getattr(response, 'adminPass', None)
start = time.time()
trycount = 0
while True:
trycount += 1
try:
return self.server_show_libcloud(self.uuid)
except Exception as exc:
log.debug(
'Server information not yet available: %s', exc
)
time.sleep(1)
if time.time() - start > timeout:
log.error('Timed out after %s seconds '
'while waiting for data', timeout)
return False
log.debug(
'Retrying server_show() (try %s)', trycount
) | ['def', 'boot', '(', 'self', ',', 'name', ',', 'flavor_id', '=', '0', ',', 'image_id', '=', '0', ',', 'timeout', '=', '300', ',', '*', '*', 'kwargs', ')', ':', 'nt_ks', '=', 'self', '.', 'compute_conn', 'kwargs', '[', "'name'", ']', '=', 'name', 'kwargs', '[', "'flavor'", ']', '=', 'flavor_id', 'kwargs', '[', "'image'", ']', '=', 'image_id', 'or', 'None', 'ephemeral', '=', 'kwargs', '.', 'pop', '(', "'ephemeral'", ',', '[', ']', ')', 'block_device', '=', 'kwargs', '.', 'pop', '(', "'block_device'", ',', '[', ']', ')', 'boot_volume', '=', 'kwargs', '.', 'pop', '(', "'boot_volume'", ',', 'None', ')', 'snapshot', '=', 'kwargs', '.', 'pop', '(', "'snapshot'", ',', 'None', ')', 'swap', '=', 'kwargs', '.', 'pop', '(', "'swap'", ',', 'None', ')', 'kwargs', '[', "'block_device_mapping_v2'", ']', '=', '_parse_block_device_mapping_v2', '(', 'block_device', '=', 'block_device', ',', 'boot_volume', '=', 'boot_volume', ',', 'snapshot', '=', 'snapshot', ',', 'ephemeral', '=', 'ephemeral', ',', 'swap', '=', 'swap', ')', 'response', '=', 'nt_ks', '.', 'servers', '.', 'create', '(', '*', '*', 'kwargs', ')', 'self', '.', 'uuid', '=', 'response', '.', 'id', 'self', '.', 'password', '=', 'getattr', '(', 'response', ',', "'adminPass'", ',', 'None', ')', 'start', '=', 'time', '.', 'time', '(', ')', 'trycount', '=', '0', 'while', 'True', ':', 'trycount', '+=', '1', 'try', ':', 'return', 'self', '.', 'server_show_libcloud', '(', 'self', '.', 'uuid', ')', 'except', 'Exception', 'as', 'exc', ':', 'log', '.', 'debug', '(', "'Server information not yet available: %s'", ',', 'exc', ')', 'time', '.', 'sleep', '(', '1', ')', 'if', 'time', '.', 'time', '(', ')', '-', 'start', '>', 'timeout', ':', 'log', '.', 'error', '(', "'Timed out after %s seconds '", "'while waiting for data'", ',', 'timeout', ')', 'return', 'False', 'log', '.', 'debug', '(', "'Retrying server_show() (try %s)'", ',', 'trycount', ')'] | Boot a cloud server. | ['Boot', 'a', 'cloud', 'server', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/openstack/nova.py#L496-L535 |
4,041 | colab/colab-superarchives-plugin | src/colab_superarchives/views.py | EmailView.post | def post(self, request, key):
"""Create new email address that will wait for validation"""
email = request.POST.get('email')
user_id = request.POST.get('user')
if not email:
return http.HttpResponseBadRequest()
try:
EmailAddressValidation.objects.create(address=email,
user_id=user_id)
except IntegrityError:
# 409 Conflict
# duplicated entries
# email exist and it's waiting for validation
return http.HttpResponse(status=409)
return http.HttpResponse(status=201) | python | def post(self, request, key):
"""Create new email address that will wait for validation"""
email = request.POST.get('email')
user_id = request.POST.get('user')
if not email:
return http.HttpResponseBadRequest()
try:
EmailAddressValidation.objects.create(address=email,
user_id=user_id)
except IntegrityError:
# 409 Conflict
# duplicated entries
# email exist and it's waiting for validation
return http.HttpResponse(status=409)
return http.HttpResponse(status=201) | ['def', 'post', '(', 'self', ',', 'request', ',', 'key', ')', ':', 'email', '=', 'request', '.', 'POST', '.', 'get', '(', "'email'", ')', 'user_id', '=', 'request', '.', 'POST', '.', 'get', '(', "'user'", ')', 'if', 'not', 'email', ':', 'return', 'http', '.', 'HttpResponseBadRequest', '(', ')', 'try', ':', 'EmailAddressValidation', '.', 'objects', '.', 'create', '(', 'address', '=', 'email', ',', 'user_id', '=', 'user_id', ')', 'except', 'IntegrityError', ':', '# 409 Conflict', '# duplicated entries', "# email exist and it's waiting for validation", 'return', 'http', '.', 'HttpResponse', '(', 'status', '=', '409', ')', 'return', 'http', '.', 'HttpResponse', '(', 'status', '=', '201', ')'] | Create new email address that will wait for validation | ['Create', 'new', 'email', 'address', 'that', 'will', 'wait', 'for', 'validation'] | train | https://github.com/colab/colab-superarchives-plugin/blob/fe588a1d4fac874ccad2063ee19a857028a22721/src/colab_superarchives/views.py#L202-L219 |
4,042 | twisted/mantissa | xmantissa/ampserver.py | AMPConfiguration.getFactory | def getFactory(self):
"""
Return a server factory which creates AMP protocol instances.
"""
factory = ServerFactory()
def protocol():
proto = CredReceiver()
proto.portal = Portal(
self.loginSystem,
[self.loginSystem,
OneTimePadChecker(self._oneTimePads)])
return proto
factory.protocol = protocol
return factory | python | def getFactory(self):
"""
Return a server factory which creates AMP protocol instances.
"""
factory = ServerFactory()
def protocol():
proto = CredReceiver()
proto.portal = Portal(
self.loginSystem,
[self.loginSystem,
OneTimePadChecker(self._oneTimePads)])
return proto
factory.protocol = protocol
return factory | ['def', 'getFactory', '(', 'self', ')', ':', 'factory', '=', 'ServerFactory', '(', ')', 'def', 'protocol', '(', ')', ':', 'proto', '=', 'CredReceiver', '(', ')', 'proto', '.', 'portal', '=', 'Portal', '(', 'self', '.', 'loginSystem', ',', '[', 'self', '.', 'loginSystem', ',', 'OneTimePadChecker', '(', 'self', '.', '_oneTimePads', ')', ']', ')', 'return', 'proto', 'factory', '.', 'protocol', '=', 'protocol', 'return', 'factory'] | Return a server factory which creates AMP protocol instances. | ['Return', 'a', 'server', 'factory', 'which', 'creates', 'AMP', 'protocol', 'instances', '.'] | train | https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/ampserver.py#L78-L91 |
4,043 | franciscogarate/pyliferisk | pyliferisk/__init__.py | Cx | def Cx(mt, x):
""" Return the Cx """
return ((1 / (1 + mt.i)) ** (x + 1)) * mt.dx[x] * ((1 + mt.i) ** 0.5) | python | def Cx(mt, x):
""" Return the Cx """
return ((1 / (1 + mt.i)) ** (x + 1)) * mt.dx[x] * ((1 + mt.i) ** 0.5) | ['def', 'Cx', '(', 'mt', ',', 'x', ')', ':', 'return', '(', '(', '1', '/', '(', '1', '+', 'mt', '.', 'i', ')', ')', '**', '(', 'x', '+', '1', ')', ')', '*', 'mt', '.', 'dx', '[', 'x', ']', '*', '(', '(', '1', '+', 'mt', '.', 'i', ')', '**', '0.5', ')'] | Return the Cx | ['Return', 'the', 'Cx'] | train | https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L268-L270 |
4,044 | anchore/anchore | anchore/cli/toolbox.py | image_import | def image_import(infile, force):
"""Import image anchore data from a JSON file."""
ecode = 0
try:
with open(infile, 'r') as FH:
savelist = json.loads(FH.read())
except Exception as err:
anchore_print_err("could not load input file: " + str(err))
ecode = 1
if ecode == 0:
for record in savelist:
try:
imageId = record['image']['imageId']
if contexts['anchore_db'].is_image_present(imageId) and not force:
anchore_print("image ("+str(imageId)+") already exists in DB, skipping import.")
else:
imagedata = record['image']['imagedata']
try:
rc = contexts['anchore_db'].save_image_new(imageId, report=imagedata)
if not rc:
contexts['anchore_db'].delete_image(imageId)
raise Exception("save to anchore DB failed")
except Exception as err:
contexts['anchore_db'].delete_image(imageId)
raise err
except Exception as err:
anchore_print_err("could not store image ("+str(imageId)+") from import file: "+ str(err))
ecode = 1
sys.exit(ecode) | python | def image_import(infile, force):
"""Import image anchore data from a JSON file."""
ecode = 0
try:
with open(infile, 'r') as FH:
savelist = json.loads(FH.read())
except Exception as err:
anchore_print_err("could not load input file: " + str(err))
ecode = 1
if ecode == 0:
for record in savelist:
try:
imageId = record['image']['imageId']
if contexts['anchore_db'].is_image_present(imageId) and not force:
anchore_print("image ("+str(imageId)+") already exists in DB, skipping import.")
else:
imagedata = record['image']['imagedata']
try:
rc = contexts['anchore_db'].save_image_new(imageId, report=imagedata)
if not rc:
contexts['anchore_db'].delete_image(imageId)
raise Exception("save to anchore DB failed")
except Exception as err:
contexts['anchore_db'].delete_image(imageId)
raise err
except Exception as err:
anchore_print_err("could not store image ("+str(imageId)+") from import file: "+ str(err))
ecode = 1
sys.exit(ecode) | ['def', 'image_import', '(', 'infile', ',', 'force', ')', ':', 'ecode', '=', '0', 'try', ':', 'with', 'open', '(', 'infile', ',', "'r'", ')', 'as', 'FH', ':', 'savelist', '=', 'json', '.', 'loads', '(', 'FH', '.', 'read', '(', ')', ')', 'except', 'Exception', 'as', 'err', ':', 'anchore_print_err', '(', '"could not load input file: "', '+', 'str', '(', 'err', ')', ')', 'ecode', '=', '1', 'if', 'ecode', '==', '0', ':', 'for', 'record', 'in', 'savelist', ':', 'try', ':', 'imageId', '=', 'record', '[', "'image'", ']', '[', "'imageId'", ']', 'if', 'contexts', '[', "'anchore_db'", ']', '.', 'is_image_present', '(', 'imageId', ')', 'and', 'not', 'force', ':', 'anchore_print', '(', '"image ("', '+', 'str', '(', 'imageId', ')', '+', '") already exists in DB, skipping import."', ')', 'else', ':', 'imagedata', '=', 'record', '[', "'image'", ']', '[', "'imagedata'", ']', 'try', ':', 'rc', '=', 'contexts', '[', "'anchore_db'", ']', '.', 'save_image_new', '(', 'imageId', ',', 'report', '=', 'imagedata', ')', 'if', 'not', 'rc', ':', 'contexts', '[', "'anchore_db'", ']', '.', 'delete_image', '(', 'imageId', ')', 'raise', 'Exception', '(', '"save to anchore DB failed"', ')', 'except', 'Exception', 'as', 'err', ':', 'contexts', '[', "'anchore_db'", ']', '.', 'delete_image', '(', 'imageId', ')', 'raise', 'err', 'except', 'Exception', 'as', 'err', ':', 'anchore_print_err', '(', '"could not store image ("', '+', 'str', '(', 'imageId', ')', '+', '") from import file: "', '+', 'str', '(', 'err', ')', ')', 'ecode', '=', '1', 'sys', '.', 'exit', '(', 'ecode', ')'] | Import image anchore data from a JSON file. | ['Import', 'image', 'anchore', 'data', 'from', 'a', 'JSON', 'file', '.'] | train | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/cli/toolbox.py#L421-L452 |
4,045 | mistio/mist.client | src/mistclient/model.py | Machine.get_stats | def get_stats(self, start=int(time()), stop=int(time())+10, step=10):
"""
Get stats of a monitored machine
:param start: Time formatted as integer, from when to fetch stats (default now)
:param stop: Time formatted as integer, until when to fetch stats (default +10 seconds)
:param step: Step to fetch stats (default 10 seconds)
:returns: A dict of stats
"""
payload = {
'v': 2,
'start': start,
'stop': stop,
'step': step
}
data = json.dumps(payload)
req = self.request(self.mist_client.uri+"/clouds/"+self.cloud.id+"/machines/"+self.id+"/stats", data=data)
stats = req.get().json()
return stats | python | def get_stats(self, start=int(time()), stop=int(time())+10, step=10):
"""
Get stats of a monitored machine
:param start: Time formatted as integer, from when to fetch stats (default now)
:param stop: Time formatted as integer, until when to fetch stats (default +10 seconds)
:param step: Step to fetch stats (default 10 seconds)
:returns: A dict of stats
"""
payload = {
'v': 2,
'start': start,
'stop': stop,
'step': step
}
data = json.dumps(payload)
req = self.request(self.mist_client.uri+"/clouds/"+self.cloud.id+"/machines/"+self.id+"/stats", data=data)
stats = req.get().json()
return stats | ['def', 'get_stats', '(', 'self', ',', 'start', '=', 'int', '(', 'time', '(', ')', ')', ',', 'stop', '=', 'int', '(', 'time', '(', ')', ')', '+', '10', ',', 'step', '=', '10', ')', ':', 'payload', '=', '{', "'v'", ':', '2', ',', "'start'", ':', 'start', ',', "'stop'", ':', 'stop', ',', "'step'", ':', 'step', '}', 'data', '=', 'json', '.', 'dumps', '(', 'payload', ')', 'req', '=', 'self', '.', 'request', '(', 'self', '.', 'mist_client', '.', 'uri', '+', '"/clouds/"', '+', 'self', '.', 'cloud', '.', 'id', '+', '"/machines/"', '+', 'self', '.', 'id', '+', '"/stats"', ',', 'data', '=', 'data', ')', 'stats', '=', 'req', '.', 'get', '(', ')', '.', 'json', '(', ')', 'return', 'stats'] | Get stats of a monitored machine
:param start: Time formatted as integer, from when to fetch stats (default now)
:param stop: Time formatted as integer, until when to fetch stats (default +10 seconds)
:param step: Step to fetch stats (default 10 seconds)
:returns: A dict of stats | ['Get', 'stats', 'of', 'a', 'monitored', 'machine'] | train | https://github.com/mistio/mist.client/blob/bc190af2cba358fa556a69b205c12a77a34eb2a8/src/mistclient/model.py#L557-L576 |
4,046 | unionbilling/union-python | union/models.py | BaseModel.save | def save(self):
'''
Save an instance of a Union object
'''
client = self._new_api_client()
params = {'id': self.id} if hasattr(self, 'id') else {}
action = 'patch' if hasattr(self, 'id') else 'post'
saved_model = client.make_request(self, action, url_params=params, post_data=self._to_json)
self.__init__(**saved_model._to_dict) | python | def save(self):
'''
Save an instance of a Union object
'''
client = self._new_api_client()
params = {'id': self.id} if hasattr(self, 'id') else {}
action = 'patch' if hasattr(self, 'id') else 'post'
saved_model = client.make_request(self, action, url_params=params, post_data=self._to_json)
self.__init__(**saved_model._to_dict) | ['def', 'save', '(', 'self', ')', ':', 'client', '=', 'self', '.', '_new_api_client', '(', ')', 'params', '=', '{', "'id'", ':', 'self', '.', 'id', '}', 'if', 'hasattr', '(', 'self', ',', "'id'", ')', 'else', '{', '}', 'action', '=', "'patch'", 'if', 'hasattr', '(', 'self', ',', "'id'", ')', 'else', "'post'", 'saved_model', '=', 'client', '.', 'make_request', '(', 'self', ',', 'action', ',', 'url_params', '=', 'params', ',', 'post_data', '=', 'self', '.', '_to_json', ')', 'self', '.', '__init__', '(', '*', '*', 'saved_model', '.', '_to_dict', ')'] | Save an instance of a Union object | ['Save', 'an', 'instance', 'of', 'a', 'Union', 'object'] | train | https://github.com/unionbilling/union-python/blob/551e4fc1a0b395b632781d80527a3660a7c67c0c/union/models.py#L61-L69 |
4,047 | CityOfZion/neo-python-core | neocore/IO/BinaryWriter.py | BinaryWriter.WriteUInt256 | def WriteUInt256(self, value):
"""
Write a UInt256 type to the stream.
Args:
value (UInt256):
Raises:
Exception: when `value` is not of neocore.UInt256 type.
"""
if type(value) is UInt256:
value.Serialize(self)
else:
raise Exception("Cannot write value that is not UInt256") | python | def WriteUInt256(self, value):
"""
Write a UInt256 type to the stream.
Args:
value (UInt256):
Raises:
Exception: when `value` is not of neocore.UInt256 type.
"""
if type(value) is UInt256:
value.Serialize(self)
else:
raise Exception("Cannot write value that is not UInt256") | ['def', 'WriteUInt256', '(', 'self', ',', 'value', ')', ':', 'if', 'type', '(', 'value', ')', 'is', 'UInt256', ':', 'value', '.', 'Serialize', '(', 'self', ')', 'else', ':', 'raise', 'Exception', '(', '"Cannot write value that is not UInt256"', ')'] | Write a UInt256 type to the stream.
Args:
value (UInt256):
Raises:
Exception: when `value` is not of neocore.UInt256 type. | ['Write', 'a', 'UInt256', 'type', 'to', 'the', 'stream', '.'] | train | https://github.com/CityOfZion/neo-python-core/blob/786c02cc2f41712d70b1f064ae3d67f86167107f/neocore/IO/BinaryWriter.py#L289-L302 |
4,048 | aiogram/aiogram | aiogram/dispatcher/dispatcher.py | Dispatcher.stop_polling | def stop_polling(self):
"""
Break long-polling process.
:return:
"""
if hasattr(self, '_polling') and self._polling:
log.info('Stop polling...')
self._polling = False | python | def stop_polling(self):
"""
Break long-polling process.
:return:
"""
if hasattr(self, '_polling') and self._polling:
log.info('Stop polling...')
self._polling = False | ['def', 'stop_polling', '(', 'self', ')', ':', 'if', 'hasattr', '(', 'self', ',', "'_polling'", ')', 'and', 'self', '.', '_polling', ':', 'log', '.', 'info', '(', "'Stop polling...'", ')', 'self', '.', '_polling', '=', 'False'] | Break long-polling process.
:return: | ['Break', 'long', '-', 'polling', 'process', '.'] | train | https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/dispatcher/dispatcher.py#L293-L301 |
4,049 | timstaley/voevent-parse | src/voeventparse/convenience.py | pull_astro_coords | def pull_astro_coords(voevent, index=0):
"""
Deprecated alias of :func:`.get_event_position`
"""
import warnings
warnings.warn(
"""
The function `pull_astro_coords` has been renamed to
`get_event_position`. This alias is preserved for backwards
compatibility, and may be removed in a future release.
""",
FutureWarning)
return get_event_position(voevent, index) | python | def pull_astro_coords(voevent, index=0):
"""
Deprecated alias of :func:`.get_event_position`
"""
import warnings
warnings.warn(
"""
The function `pull_astro_coords` has been renamed to
`get_event_position`. This alias is preserved for backwards
compatibility, and may be removed in a future release.
""",
FutureWarning)
return get_event_position(voevent, index) | ['def', 'pull_astro_coords', '(', 'voevent', ',', 'index', '=', '0', ')', ':', 'import', 'warnings', 'warnings', '.', 'warn', '(', '"""\n The function `pull_astro_coords` has been renamed to\n `get_event_position`. This alias is preserved for backwards\n compatibility, and may be removed in a future release.\n """', ',', 'FutureWarning', ')', 'return', 'get_event_position', '(', 'voevent', ',', 'index', ')'] | Deprecated alias of :func:`.get_event_position` | ['Deprecated', 'alias', 'of', ':', 'func', ':', '.', 'get_event_position'] | train | https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/convenience.py#L186-L198 |
4,050 | Yelp/kafka-utils | kafka_utils/util/zookeeper.py | ZK.execute_plan | def execute_plan(self, plan, allow_rf_change=False):
"""Submit reassignment plan for execution."""
reassignment_path = '{admin}/{reassignment_node}'\
.format(admin=ADMIN_PATH, reassignment_node=REASSIGNMENT_NODE)
plan_json = dump_json(plan)
base_plan = self.get_cluster_plan()
if not validate_plan(plan, base_plan, allow_rf_change=allow_rf_change):
_log.error('Given plan is invalid. Aborting new reassignment plan ... {plan}'.format(plan=plan))
return False
# Send proposed-plan to zookeeper
try:
_log.info('Sending plan to Zookeeper...')
self.create(reassignment_path, plan_json, makepath=True)
_log.info(
'Re-assign partitions node in Zookeeper updated successfully '
'with {plan}'.format(plan=plan),
)
return True
except NodeExistsError:
_log.warning('Previous plan in progress. Exiting..')
_log.warning('Aborting new reassignment plan... {plan}'.format(plan=plan))
in_progress_plan = load_json(self.get(reassignment_path)[0])
in_progress_partitions = [
'{topic}-{p_id}'.format(
topic=p_data['topic'],
p_id=str(p_data['partition']),
)
for p_data in in_progress_plan['partitions']
]
_log.warning(
'{count} partition(s) reassignment currently in progress:-'
.format(count=len(in_progress_partitions)),
)
_log.warning(
'{partitions}. In Progress reassignment plan...'.format(
partitions=', '.join(in_progress_partitions),
),
)
return False
except Exception as e:
_log.error(
'Could not re-assign partitions {plan}. Error: {e}'
.format(plan=plan, e=e),
)
return False | python | def execute_plan(self, plan, allow_rf_change=False):
"""Submit reassignment plan for execution."""
reassignment_path = '{admin}/{reassignment_node}'\
.format(admin=ADMIN_PATH, reassignment_node=REASSIGNMENT_NODE)
plan_json = dump_json(plan)
base_plan = self.get_cluster_plan()
if not validate_plan(plan, base_plan, allow_rf_change=allow_rf_change):
_log.error('Given plan is invalid. Aborting new reassignment plan ... {plan}'.format(plan=plan))
return False
# Send proposed-plan to zookeeper
try:
_log.info('Sending plan to Zookeeper...')
self.create(reassignment_path, plan_json, makepath=True)
_log.info(
'Re-assign partitions node in Zookeeper updated successfully '
'with {plan}'.format(plan=plan),
)
return True
except NodeExistsError:
_log.warning('Previous plan in progress. Exiting..')
_log.warning('Aborting new reassignment plan... {plan}'.format(plan=plan))
in_progress_plan = load_json(self.get(reassignment_path)[0])
in_progress_partitions = [
'{topic}-{p_id}'.format(
topic=p_data['topic'],
p_id=str(p_data['partition']),
)
for p_data in in_progress_plan['partitions']
]
_log.warning(
'{count} partition(s) reassignment currently in progress:-'
.format(count=len(in_progress_partitions)),
)
_log.warning(
'{partitions}. In Progress reassignment plan...'.format(
partitions=', '.join(in_progress_partitions),
),
)
return False
except Exception as e:
_log.error(
'Could not re-assign partitions {plan}. Error: {e}'
.format(plan=plan, e=e),
)
return False | ['def', 'execute_plan', '(', 'self', ',', 'plan', ',', 'allow_rf_change', '=', 'False', ')', ':', 'reassignment_path', '=', "'{admin}/{reassignment_node}'", '.', 'format', '(', 'admin', '=', 'ADMIN_PATH', ',', 'reassignment_node', '=', 'REASSIGNMENT_NODE', ')', 'plan_json', '=', 'dump_json', '(', 'plan', ')', 'base_plan', '=', 'self', '.', 'get_cluster_plan', '(', ')', 'if', 'not', 'validate_plan', '(', 'plan', ',', 'base_plan', ',', 'allow_rf_change', '=', 'allow_rf_change', ')', ':', '_log', '.', 'error', '(', "'Given plan is invalid. Aborting new reassignment plan ... {plan}'", '.', 'format', '(', 'plan', '=', 'plan', ')', ')', 'return', 'False', '# Send proposed-plan to zookeeper', 'try', ':', '_log', '.', 'info', '(', "'Sending plan to Zookeeper...'", ')', 'self', '.', 'create', '(', 'reassignment_path', ',', 'plan_json', ',', 'makepath', '=', 'True', ')', '_log', '.', 'info', '(', "'Re-assign partitions node in Zookeeper updated successfully '", "'with {plan}'", '.', 'format', '(', 'plan', '=', 'plan', ')', ',', ')', 'return', 'True', 'except', 'NodeExistsError', ':', '_log', '.', 'warning', '(', "'Previous plan in progress. Exiting..'", ')', '_log', '.', 'warning', '(', "'Aborting new reassignment plan... {plan}'", '.', 'format', '(', 'plan', '=', 'plan', ')', ')', 'in_progress_plan', '=', 'load_json', '(', 'self', '.', 'get', '(', 'reassignment_path', ')', '[', '0', ']', ')', 'in_progress_partitions', '=', '[', "'{topic}-{p_id}'", '.', 'format', '(', 'topic', '=', 'p_data', '[', "'topic'", ']', ',', 'p_id', '=', 'str', '(', 'p_data', '[', "'partition'", ']', ')', ',', ')', 'for', 'p_data', 'in', 'in_progress_plan', '[', "'partitions'", ']', ']', '_log', '.', 'warning', '(', "'{count} partition(s) reassignment currently in progress:-'", '.', 'format', '(', 'count', '=', 'len', '(', 'in_progress_partitions', ')', ')', ',', ')', '_log', '.', 'warning', '(', "'{partitions}. In Progress reassignment plan...'", '.', 'format', '(', 'partitions', '=', "', '", '.', 'join', '(', 'in_progress_partitions', ')', ',', ')', ',', ')', 'return', 'False', 'except', 'Exception', 'as', 'e', ':', '_log', '.', 'error', '(', "'Could not re-assign partitions {plan}. Error: {e}'", '.', 'format', '(', 'plan', '=', 'plan', ',', 'e', '=', 'e', ')', ',', ')', 'return', 'False'] | Submit reassignment plan for execution. | ['Submit', 'reassignment', 'plan', 'for', 'execution', '.'] | train | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/zookeeper.py#L483-L527 |
4,051 | openego/eDisGo | edisgo/grid/components.py | Load.timeseries | def timeseries(self):
"""
Load time series
It returns the actual time series used in power flow analysis. If
:attr:`_timeseries` is not :obj:`None`, it is returned. Otherwise,
:meth:`timeseries()` looks for time series of the according sector in
:class:`~.grid.network.TimeSeries` object.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
DataFrame containing active power in kW in column 'p' and
reactive power in kVA in column 'q'.
"""
if self._timeseries is None:
if isinstance(self.grid, MVGrid):
voltage_level = 'mv'
elif isinstance(self.grid, LVGrid):
voltage_level = 'lv'
ts_total = None
for sector in self.consumption.keys():
consumption = self.consumption[sector]
# check if load time series for MV and LV are differentiated
try:
ts = self.grid.network.timeseries.load[
sector, voltage_level].to_frame('p')
except KeyError:
try:
ts = self.grid.network.timeseries.load[
sector].to_frame('p')
except KeyError:
logger.exception(
"No timeseries for load of type {} "
"given.".format(sector))
raise
ts = ts * consumption
ts_q = self.timeseries_reactive
if ts_q is not None:
ts['q'] = ts_q.q
else:
ts['q'] = ts['p'] * self.q_sign * tan(
acos(self.power_factor))
if ts_total is None:
ts_total = ts
else:
ts_total.p += ts.p
ts_total.q += ts.q
return ts_total
else:
return self._timeseries | python | def timeseries(self):
"""
Load time series
It returns the actual time series used in power flow analysis. If
:attr:`_timeseries` is not :obj:`None`, it is returned. Otherwise,
:meth:`timeseries()` looks for time series of the according sector in
:class:`~.grid.network.TimeSeries` object.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
DataFrame containing active power in kW in column 'p' and
reactive power in kVA in column 'q'.
"""
if self._timeseries is None:
if isinstance(self.grid, MVGrid):
voltage_level = 'mv'
elif isinstance(self.grid, LVGrid):
voltage_level = 'lv'
ts_total = None
for sector in self.consumption.keys():
consumption = self.consumption[sector]
# check if load time series for MV and LV are differentiated
try:
ts = self.grid.network.timeseries.load[
sector, voltage_level].to_frame('p')
except KeyError:
try:
ts = self.grid.network.timeseries.load[
sector].to_frame('p')
except KeyError:
logger.exception(
"No timeseries for load of type {} "
"given.".format(sector))
raise
ts = ts * consumption
ts_q = self.timeseries_reactive
if ts_q is not None:
ts['q'] = ts_q.q
else:
ts['q'] = ts['p'] * self.q_sign * tan(
acos(self.power_factor))
if ts_total is None:
ts_total = ts
else:
ts_total.p += ts.p
ts_total.q += ts.q
return ts_total
else:
return self._timeseries | ['def', 'timeseries', '(', 'self', ')', ':', 'if', 'self', '.', '_timeseries', 'is', 'None', ':', 'if', 'isinstance', '(', 'self', '.', 'grid', ',', 'MVGrid', ')', ':', 'voltage_level', '=', "'mv'", 'elif', 'isinstance', '(', 'self', '.', 'grid', ',', 'LVGrid', ')', ':', 'voltage_level', '=', "'lv'", 'ts_total', '=', 'None', 'for', 'sector', 'in', 'self', '.', 'consumption', '.', 'keys', '(', ')', ':', 'consumption', '=', 'self', '.', 'consumption', '[', 'sector', ']', '# check if load time series for MV and LV are differentiated', 'try', ':', 'ts', '=', 'self', '.', 'grid', '.', 'network', '.', 'timeseries', '.', 'load', '[', 'sector', ',', 'voltage_level', ']', '.', 'to_frame', '(', "'p'", ')', 'except', 'KeyError', ':', 'try', ':', 'ts', '=', 'self', '.', 'grid', '.', 'network', '.', 'timeseries', '.', 'load', '[', 'sector', ']', '.', 'to_frame', '(', "'p'", ')', 'except', 'KeyError', ':', 'logger', '.', 'exception', '(', '"No timeseries for load of type {} "', '"given."', '.', 'format', '(', 'sector', ')', ')', 'raise', 'ts', '=', 'ts', '*', 'consumption', 'ts_q', '=', 'self', '.', 'timeseries_reactive', 'if', 'ts_q', 'is', 'not', 'None', ':', 'ts', '[', "'q'", ']', '=', 'ts_q', '.', 'q', 'else', ':', 'ts', '[', "'q'", ']', '=', 'ts', '[', "'p'", ']', '*', 'self', '.', 'q_sign', '*', 'tan', '(', 'acos', '(', 'self', '.', 'power_factor', ')', ')', 'if', 'ts_total', 'is', 'None', ':', 'ts_total', '=', 'ts', 'else', ':', 'ts_total', '.', 'p', '+=', 'ts', '.', 'p', 'ts_total', '.', 'q', '+=', 'ts', '.', 'q', 'return', 'ts_total', 'else', ':', 'return', 'self', '.', '_timeseries'] | Load time series
It returns the actual time series used in power flow analysis. If
:attr:`_timeseries` is not :obj:`None`, it is returned. Otherwise,
:meth:`timeseries()` looks for time series of the according sector in
:class:`~.grid.network.TimeSeries` object.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
DataFrame containing active power in kW in column 'p' and
reactive power in kVA in column 'q'. | ['Load', 'time', 'series'] | train | https://github.com/openego/eDisGo/blob/e6245bdaf236f9c49dbda5a18c1c458290f41e2b/edisgo/grid/components.py#L178-L234 |
4,052 | bapakode/OmMongo | ommongo/fields/mapping.py | DictField.validate_wrap | def validate_wrap(self, value):
''' Checks that value is a ``dict``, that every key is a valid MongoDB
key, and that every value validates based on DictField.value_type
'''
if not isinstance(value, dict):
self._fail_validation_type(value, dict)
for k, v in value.items():
self._validate_key_wrap(k)
try:
self.value_type.validate_wrap(v)
except BadValueException as bve:
self._fail_validation(value, 'Bad value for key %s' % k, cause=bve) | python | def validate_wrap(self, value):
''' Checks that value is a ``dict``, that every key is a valid MongoDB
key, and that every value validates based on DictField.value_type
'''
if not isinstance(value, dict):
self._fail_validation_type(value, dict)
for k, v in value.items():
self._validate_key_wrap(k)
try:
self.value_type.validate_wrap(v)
except BadValueException as bve:
self._fail_validation(value, 'Bad value for key %s' % k, cause=bve) | ['def', 'validate_wrap', '(', 'self', ',', 'value', ')', ':', 'if', 'not', 'isinstance', '(', 'value', ',', 'dict', ')', ':', 'self', '.', '_fail_validation_type', '(', 'value', ',', 'dict', ')', 'for', 'k', ',', 'v', 'in', 'value', '.', 'items', '(', ')', ':', 'self', '.', '_validate_key_wrap', '(', 'k', ')', 'try', ':', 'self', '.', 'value_type', '.', 'validate_wrap', '(', 'v', ')', 'except', 'BadValueException', 'as', 'bve', ':', 'self', '.', '_fail_validation', '(', 'value', ',', "'Bad value for key %s'", '%', 'k', ',', 'cause', '=', 'bve', ')'] | Checks that value is a ``dict``, that every key is a valid MongoDB
key, and that every value validates based on DictField.value_type | ['Checks', 'that', 'value', 'is', 'a', 'dict', 'that', 'every', 'key', 'is', 'a', 'valid', 'MongoDB', 'key', 'and', 'that', 'every', 'value', 'validates', 'based', 'on', 'DictField', '.', 'value_type'] | train | https://github.com/bapakode/OmMongo/blob/52b5a5420516dc709f2d2eb065818c7973991ce3/ommongo/fields/mapping.py#L91-L102 |
4,053 | salimm/pylods | pylods/backend/pylodsc/mapper.py | CObjectMapper.copy | def copy(self):
'''
makes a clone copy of the mapper. It won't clone the serializers or deserializers and it won't copy the events
'''
try:
tmp = self.__class__()
except Exception:
tmp = self.__class__(self._pdict)
tmp._serializers = self._serializers
tmp.__deserializers = self.__deserializers
return tmp | python | def copy(self):
'''
makes a clone copy of the mapper. It won't clone the serializers or deserializers and it won't copy the events
'''
try:
tmp = self.__class__()
except Exception:
tmp = self.__class__(self._pdict)
tmp._serializers = self._serializers
tmp.__deserializers = self.__deserializers
return tmp | ['def', 'copy', '(', 'self', ')', ':', 'try', ':', 'tmp', '=', 'self', '.', '__class__', '(', ')', 'except', 'Exception', ':', 'tmp', '=', 'self', '.', '__class__', '(', 'self', '.', '_pdict', ')', 'tmp', '.', '_serializers', '=', 'self', '.', '_serializers', 'tmp', '.', '__deserializers', '=', 'self', '.', '__deserializers', 'return', 'tmp'] | makes a clone copy of the mapper. It won't clone the serializers or deserializers and it won't copy the events | ['makes', 'a', 'clone', 'copy', 'of', 'the', 'mapper', '.', 'It', 'won', 't', 'clone', 'the', 'serializers', 'or', 'deserializers', 'and', 'it', 'won', 't', 'copy', 'the', 'events'] | train | https://github.com/salimm/pylods/blob/d089e2a9afb1fa8cb6c754933fc574b512757c40/pylods/backend/pylodsc/mapper.py#L95-L107 |
4,054 | saltstack/salt | salt/modules/win_path.py | get_path | def get_path():
'''
Returns a list of items in the SYSTEM path
CLI Example:
.. code-block:: bash
salt '*' win_path.get_path
'''
ret = salt.utils.stringutils.to_unicode(
__utils__['reg.read_value'](
'HKEY_LOCAL_MACHINE',
'SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment',
'PATH')['vdata']
).split(';')
# Trim ending backslash
return list(map(_normalize_dir, ret)) | python | def get_path():
'''
Returns a list of items in the SYSTEM path
CLI Example:
.. code-block:: bash
salt '*' win_path.get_path
'''
ret = salt.utils.stringutils.to_unicode(
__utils__['reg.read_value'](
'HKEY_LOCAL_MACHINE',
'SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment',
'PATH')['vdata']
).split(';')
# Trim ending backslash
return list(map(_normalize_dir, ret)) | ['def', 'get_path', '(', ')', ':', 'ret', '=', 'salt', '.', 'utils', '.', 'stringutils', '.', 'to_unicode', '(', '__utils__', '[', "'reg.read_value'", ']', '(', "'HKEY_LOCAL_MACHINE'", ',', "'SYSTEM\\\\CurrentControlSet\\\\Control\\\\Session Manager\\\\Environment'", ',', "'PATH'", ')', '[', "'vdata'", ']', ')', '.', 'split', '(', "';'", ')', '# Trim ending backslash', 'return', 'list', '(', 'map', '(', '_normalize_dir', ',', 'ret', ')', ')'] | Returns a list of items in the SYSTEM path
CLI Example:
.. code-block:: bash
salt '*' win_path.get_path | ['Returns', 'a', 'list', 'of', 'items', 'in', 'the', 'SYSTEM', 'path'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_path.py#L76-L94 |
4,055 | DLR-RM/RAFCON | source/rafcon/core/library_manager.py | LibraryManager.get_library_state_copy_instance | def get_library_state_copy_instance(self, lib_os_path):
""" A method to get a state copy of the library specified via the lib_os_path.
:param lib_os_path: the location of the library to get a copy for
:return:
"""
# originally libraries were called like this; DO NOT DELETE; interesting for performance tests
# state_machine = storage.load_state_machine_from_path(lib_os_path)
# return state_machine.version, state_machine.root_state
# TODO observe changes on file system and update data
if lib_os_path in self._loaded_libraries:
# this list can also be taken to open library state machines TODO -> implement it -> because faster
state_machine = self._loaded_libraries[lib_os_path]
# logger.info("Take copy of {0}".format(lib_os_path))
# as long as the a library state root state is never edited so the state first has to be copied here
state_copy = copy.deepcopy(state_machine.root_state)
return state_machine.version, state_copy
else:
state_machine = storage.load_state_machine_from_path(lib_os_path)
self._loaded_libraries[lib_os_path] = state_machine
if config.global_config.get_config_value("NO_PROGRAMMATIC_CHANGE_OF_LIBRARY_STATES_PERFORMED", False):
return state_machine.version, state_machine.root_state
else:
state_copy = copy.deepcopy(state_machine.root_state)
return state_machine.version, state_copy | python | def get_library_state_copy_instance(self, lib_os_path):
""" A method to get a state copy of the library specified via the lib_os_path.
:param lib_os_path: the location of the library to get a copy for
:return:
"""
# originally libraries were called like this; DO NOT DELETE; interesting for performance tests
# state_machine = storage.load_state_machine_from_path(lib_os_path)
# return state_machine.version, state_machine.root_state
# TODO observe changes on file system and update data
if lib_os_path in self._loaded_libraries:
# this list can also be taken to open library state machines TODO -> implement it -> because faster
state_machine = self._loaded_libraries[lib_os_path]
# logger.info("Take copy of {0}".format(lib_os_path))
# as long as the a library state root state is never edited so the state first has to be copied here
state_copy = copy.deepcopy(state_machine.root_state)
return state_machine.version, state_copy
else:
state_machine = storage.load_state_machine_from_path(lib_os_path)
self._loaded_libraries[lib_os_path] = state_machine
if config.global_config.get_config_value("NO_PROGRAMMATIC_CHANGE_OF_LIBRARY_STATES_PERFORMED", False):
return state_machine.version, state_machine.root_state
else:
state_copy = copy.deepcopy(state_machine.root_state)
return state_machine.version, state_copy | ['def', 'get_library_state_copy_instance', '(', 'self', ',', 'lib_os_path', ')', ':', '# originally libraries were called like this; DO NOT DELETE; interesting for performance tests', '# state_machine = storage.load_state_machine_from_path(lib_os_path)', '# return state_machine.version, state_machine.root_state', '# TODO observe changes on file system and update data', 'if', 'lib_os_path', 'in', 'self', '.', '_loaded_libraries', ':', '# this list can also be taken to open library state machines TODO -> implement it -> because faster', 'state_machine', '=', 'self', '.', '_loaded_libraries', '[', 'lib_os_path', ']', '# logger.info("Take copy of {0}".format(lib_os_path))', '# as long as the a library state root state is never edited so the state first has to be copied here', 'state_copy', '=', 'copy', '.', 'deepcopy', '(', 'state_machine', '.', 'root_state', ')', 'return', 'state_machine', '.', 'version', ',', 'state_copy', 'else', ':', 'state_machine', '=', 'storage', '.', 'load_state_machine_from_path', '(', 'lib_os_path', ')', 'self', '.', '_loaded_libraries', '[', 'lib_os_path', ']', '=', 'state_machine', 'if', 'config', '.', 'global_config', '.', 'get_config_value', '(', '"NO_PROGRAMMATIC_CHANGE_OF_LIBRARY_STATES_PERFORMED"', ',', 'False', ')', ':', 'return', 'state_machine', '.', 'version', ',', 'state_machine', '.', 'root_state', 'else', ':', 'state_copy', '=', 'copy', '.', 'deepcopy', '(', 'state_machine', '.', 'root_state', ')', 'return', 'state_machine', '.', 'version', ',', 'state_copy'] | A method to get a state copy of the library specified via the lib_os_path.
:param lib_os_path: the location of the library to get a copy for
:return: | ['A', 'method', 'to', 'get', 'a', 'state', 'copy', 'of', 'the', 'library', 'specified', 'via', 'the', 'lib_os_path', '.'] | train | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/library_manager.py#L377-L403 |
4,056 | uw-it-aca/uw-restclients | restclients/r25/__init__.py | get_resource | def get_resource(url):
"""
Issue a GET request to R25 with the given url
and return a response as an etree element.
"""
response = R25_DAO().getURL(url, {"Accept": "text/xml"})
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
tree = etree.fromstring(response.data.strip())
# XHTML response is an error response
xhtml = tree.xpath("//xhtml:html", namespaces=nsmap)
if len(xhtml):
raise DataFailureException(url, 500, response.data)
return tree | python | def get_resource(url):
"""
Issue a GET request to R25 with the given url
and return a response as an etree element.
"""
response = R25_DAO().getURL(url, {"Accept": "text/xml"})
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
tree = etree.fromstring(response.data.strip())
# XHTML response is an error response
xhtml = tree.xpath("//xhtml:html", namespaces=nsmap)
if len(xhtml):
raise DataFailureException(url, 500, response.data)
return tree | ['def', 'get_resource', '(', 'url', ')', ':', 'response', '=', 'R25_DAO', '(', ')', '.', 'getURL', '(', 'url', ',', '{', '"Accept"', ':', '"text/xml"', '}', ')', 'if', 'response', '.', 'status', '!=', '200', ':', 'raise', 'DataFailureException', '(', 'url', ',', 'response', '.', 'status', ',', 'response', '.', 'data', ')', 'tree', '=', 'etree', '.', 'fromstring', '(', 'response', '.', 'data', '.', 'strip', '(', ')', ')', '# XHTML response is an error response', 'xhtml', '=', 'tree', '.', 'xpath', '(', '"//xhtml:html"', ',', 'namespaces', '=', 'nsmap', ')', 'if', 'len', '(', 'xhtml', ')', ':', 'raise', 'DataFailureException', '(', 'url', ',', '500', ',', 'response', '.', 'data', ')', 'return', 'tree'] | Issue a GET request to R25 with the given url
and return a response as an etree element. | ['Issue', 'a', 'GET', 'request', 'to', 'R25', 'with', 'the', 'given', 'url', 'and', 'return', 'a', 'response', 'as', 'an', 'etree', 'element', '.'] | train | https://github.com/uw-it-aca/uw-restclients/blob/e12dcd32bf5296b6ebdf71798031594afb7852cb/restclients/r25/__init__.py#L10-L26 |
4,057 | concordusapps/alchemist | alchemist/db/operations/sql.py | flush | def flush(**kwargs):
"""Flush the specified names from the specified databases.
This can be highly destructive as it destroys all data.
"""
expression = lambda target, table: target.execute(table.delete())
test = lambda target, table: not table.exists(target)
op(expression, reversed(metadata.sorted_tables), test=test,
primary='flush', secondary='flush', **kwargs) | python | def flush(**kwargs):
"""Flush the specified names from the specified databases.
This can be highly destructive as it destroys all data.
"""
expression = lambda target, table: target.execute(table.delete())
test = lambda target, table: not table.exists(target)
op(expression, reversed(metadata.sorted_tables), test=test,
primary='flush', secondary='flush', **kwargs) | ['def', 'flush', '(', '*', '*', 'kwargs', ')', ':', 'expression', '=', 'lambda', 'target', ',', 'table', ':', 'target', '.', 'execute', '(', 'table', '.', 'delete', '(', ')', ')', 'test', '=', 'lambda', 'target', ',', 'table', ':', 'not', 'table', '.', 'exists', '(', 'target', ')', 'op', '(', 'expression', ',', 'reversed', '(', 'metadata', '.', 'sorted_tables', ')', ',', 'test', '=', 'test', ',', 'primary', '=', "'flush'", ',', 'secondary', '=', "'flush'", ',', '*', '*', 'kwargs', ')'] | Flush the specified names from the specified databases.
This can be highly destructive as it destroys all data. | ['Flush', 'the', 'specified', 'names', 'from', 'the', 'specified', 'databases', '.'] | train | https://github.com/concordusapps/alchemist/blob/822571366271b5dca0ac8bf41df988c6a3b61432/alchemist/db/operations/sql.py#L96-L105 |
4,058 | timothydmorton/VESPA | vespa/populations.py | EclipsePopulation.prior | def prior(self):
"""
Model prior for particular model.
Product of eclipse probability (``self.prob``),
the fraction of scenario that is allowed by the various
constraints (``self.selectfrac``), and all additional
factors in ``self.priorfactors``.
"""
prior = self.prob * self.selectfrac
for f in self.priorfactors:
prior *= self.priorfactors[f]
return prior | python | def prior(self):
"""
Model prior for particular model.
Product of eclipse probability (``self.prob``),
the fraction of scenario that is allowed by the various
constraints (``self.selectfrac``), and all additional
factors in ``self.priorfactors``.
"""
prior = self.prob * self.selectfrac
for f in self.priorfactors:
prior *= self.priorfactors[f]
return prior | ['def', 'prior', '(', 'self', ')', ':', 'prior', '=', 'self', '.', 'prob', '*', 'self', '.', 'selectfrac', 'for', 'f', 'in', 'self', '.', 'priorfactors', ':', 'prior', '*=', 'self', '.', 'priorfactors', '[', 'f', ']', 'return', 'prior'] | Model prior for particular model.
Product of eclipse probability (``self.prob``),
the fraction of scenario that is allowed by the various
constraints (``self.selectfrac``), and all additional
factors in ``self.priorfactors``. | ['Model', 'prior', 'for', 'particular', 'model', '.'] | train | https://github.com/timothydmorton/VESPA/blob/0446b54d48009f3655cfd1a3957ceea21d3adcaa/vespa/populations.py#L409-L422 |
4,059 | dpgaspar/Flask-AppBuilder | flask_appbuilder/models/sqla/interface.py | SQLAInterface._query_select_options | def _query_select_options(self, query, select_columns=None):
"""
Add select load options to query. The goal
is to only SQL select what is requested
:param query: SQLAlchemy Query obj
:param select_columns: (list) of columns
:return: SQLAlchemy Query obj
"""
if select_columns:
_load_options = list()
for column in select_columns:
if "." in column:
model_relation = self.get_related_model(column.split(".")[0])
if not self.is_model_already_joinded(query, model_relation):
query = query.join(model_relation)
_load_options.append(
Load(model_relation).load_only(column.split(".")[1])
)
else:
if not self.is_relation(column) and not hasattr(
getattr(self.obj, column), "__call__"
):
_load_options.append(Load(self.obj).load_only(column))
else:
_load_options.append(Load(self.obj))
query = query.options(*tuple(_load_options))
return query | python | def _query_select_options(self, query, select_columns=None):
"""
Add select load options to query. The goal
is to only SQL select what is requested
:param query: SQLAlchemy Query obj
:param select_columns: (list) of columns
:return: SQLAlchemy Query obj
"""
if select_columns:
_load_options = list()
for column in select_columns:
if "." in column:
model_relation = self.get_related_model(column.split(".")[0])
if not self.is_model_already_joinded(query, model_relation):
query = query.join(model_relation)
_load_options.append(
Load(model_relation).load_only(column.split(".")[1])
)
else:
if not self.is_relation(column) and not hasattr(
getattr(self.obj, column), "__call__"
):
_load_options.append(Load(self.obj).load_only(column))
else:
_load_options.append(Load(self.obj))
query = query.options(*tuple(_load_options))
return query | ['def', '_query_select_options', '(', 'self', ',', 'query', ',', 'select_columns', '=', 'None', ')', ':', 'if', 'select_columns', ':', '_load_options', '=', 'list', '(', ')', 'for', 'column', 'in', 'select_columns', ':', 'if', '"."', 'in', 'column', ':', 'model_relation', '=', 'self', '.', 'get_related_model', '(', 'column', '.', 'split', '(', '"."', ')', '[', '0', ']', ')', 'if', 'not', 'self', '.', 'is_model_already_joinded', '(', 'query', ',', 'model_relation', ')', ':', 'query', '=', 'query', '.', 'join', '(', 'model_relation', ')', '_load_options', '.', 'append', '(', 'Load', '(', 'model_relation', ')', '.', 'load_only', '(', 'column', '.', 'split', '(', '"."', ')', '[', '1', ']', ')', ')', 'else', ':', 'if', 'not', 'self', '.', 'is_relation', '(', 'column', ')', 'and', 'not', 'hasattr', '(', 'getattr', '(', 'self', '.', 'obj', ',', 'column', ')', ',', '"__call__"', ')', ':', '_load_options', '.', 'append', '(', 'Load', '(', 'self', '.', 'obj', ')', '.', 'load_only', '(', 'column', ')', ')', 'else', ':', '_load_options', '.', 'append', '(', 'Load', '(', 'self', '.', 'obj', ')', ')', 'query', '=', 'query', '.', 'options', '(', '*', 'tuple', '(', '_load_options', ')', ')', 'return', 'query'] | Add select load options to query. The goal
is to only SQL select what is requested
:param query: SQLAlchemy Query obj
:param select_columns: (list) of columns
:return: SQLAlchemy Query obj | ['Add', 'select', 'load', 'options', 'to', 'query', '.', 'The', 'goal', 'is', 'to', 'only', 'SQL', 'select', 'what', 'is', 'requested'] | train | https://github.com/dpgaspar/Flask-AppBuilder/blob/c293734c1b86e176a3ba57ee2deab6676d125576/flask_appbuilder/models/sqla/interface.py#L97-L124 |
4,060 | twilio/twilio-python | twilio/base/deserialize.py | rfc2822_datetime | def rfc2822_datetime(s):
"""
Parses an RFC 2822 date string and returns a UTC datetime object,
or the string if parsing failed.
:param s: RFC 2822-formatted string date
:return: datetime or str
"""
date_tuple = parsedate(s)
if date_tuple is None:
return None
return datetime.datetime(*date_tuple[:6]).replace(tzinfo=pytz.utc) | python | def rfc2822_datetime(s):
"""
Parses an RFC 2822 date string and returns a UTC datetime object,
or the string if parsing failed.
:param s: RFC 2822-formatted string date
:return: datetime or str
"""
date_tuple = parsedate(s)
if date_tuple is None:
return None
return datetime.datetime(*date_tuple[:6]).replace(tzinfo=pytz.utc) | ['def', 'rfc2822_datetime', '(', 's', ')', ':', 'date_tuple', '=', 'parsedate', '(', 's', ')', 'if', 'date_tuple', 'is', 'None', ':', 'return', 'None', 'return', 'datetime', '.', 'datetime', '(', '*', 'date_tuple', '[', ':', '6', ']', ')', '.', 'replace', '(', 'tzinfo', '=', 'pytz', '.', 'utc', ')'] | Parses an RFC 2822 date string and returns a UTC datetime object,
or the string if parsing failed.
:param s: RFC 2822-formatted string date
:return: datetime or str | ['Parses', 'an', 'RFC', '2822', 'date', 'string', 'and', 'returns', 'a', 'UTC', 'datetime', 'object', 'or', 'the', 'string', 'if', 'parsing', 'failed', '.', ':', 'param', 's', ':', 'RFC', '2822', '-', 'formatted', 'string', 'date', ':', 'return', ':', 'datetime', 'or', 'str'] | train | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/base/deserialize.py#L36-L46 |
4,061 | fermiPy/fermipy | fermipy/gtanalysis.py | GTAnalysis.write_roi | def write_roi(self, outfile=None,
save_model_map=False, **kwargs):
"""Write current state of the analysis to a file. This method
writes an XML model definition, a ROI dictionary, and a FITS
source catalog file. A previously saved analysis state can be
reloaded from the ROI dictionary file with the
`~fermipy.gtanalysis.GTAnalysis.load_roi` method.
Parameters
----------
outfile : str
String prefix of the output files. The extension of this
string will be stripped when generating the XML, YAML and
npy filenames.
make_plots : bool
Generate diagnostic plots.
save_model_map : bool
Save the current counts model to a FITS file.
"""
# extract the results in a convenient format
make_plots = kwargs.get('make_plots', False)
save_weight_map = kwargs.get('save_weight_map', False)
if outfile is None:
pathprefix = os.path.join(self.config['fileio']['workdir'],
'results')
elif not os.path.isabs(outfile):
pathprefix = os.path.join(self.config['fileio']['workdir'],
outfile)
else:
pathprefix = outfile
pathprefix = utils.strip_suffix(pathprefix,
['fits', 'yaml', 'npy'])
# pathprefix, ext = os.path.splitext(pathprefix)
prefix = os.path.basename(pathprefix)
xmlfile = pathprefix + '.xml'
fitsfile = pathprefix + '.fits'
npyfile = pathprefix + '.npy'
self.write_xml(xmlfile)
self.write_fits(fitsfile)
if not self.config['gtlike']['use_external_srcmap']:
for c in self.components:
c.like.logLike.saveSourceMaps(str(c.files['srcmap']))
if save_model_map:
self.write_model_map(prefix)
if save_weight_map:
self.write_weight_map(prefix)
o = {}
o['roi'] = copy.deepcopy(self._roi_data)
o['config'] = copy.deepcopy(self.config)
o['version'] = fermipy.__version__
o['stversion'] = fermipy.get_st_version()
o['sources'] = {}
for s in self.roi.sources:
o['sources'][s.name] = copy.deepcopy(s.data)
for i, c in enumerate(self.components):
o['roi']['components'][i][
'src_expscale'] = copy.deepcopy(c.src_expscale)
self.logger.info('Writing %s...', npyfile)
np.save(npyfile, o)
if make_plots:
self.make_plots(prefix, None,
**kwargs.get('plotting', {})) | python | def write_roi(self, outfile=None,
save_model_map=False, **kwargs):
"""Write current state of the analysis to a file. This method
writes an XML model definition, a ROI dictionary, and a FITS
source catalog file. A previously saved analysis state can be
reloaded from the ROI dictionary file with the
`~fermipy.gtanalysis.GTAnalysis.load_roi` method.
Parameters
----------
outfile : str
String prefix of the output files. The extension of this
string will be stripped when generating the XML, YAML and
npy filenames.
make_plots : bool
Generate diagnostic plots.
save_model_map : bool
Save the current counts model to a FITS file.
"""
# extract the results in a convenient format
make_plots = kwargs.get('make_plots', False)
save_weight_map = kwargs.get('save_weight_map', False)
if outfile is None:
pathprefix = os.path.join(self.config['fileio']['workdir'],
'results')
elif not os.path.isabs(outfile):
pathprefix = os.path.join(self.config['fileio']['workdir'],
outfile)
else:
pathprefix = outfile
pathprefix = utils.strip_suffix(pathprefix,
['fits', 'yaml', 'npy'])
# pathprefix, ext = os.path.splitext(pathprefix)
prefix = os.path.basename(pathprefix)
xmlfile = pathprefix + '.xml'
fitsfile = pathprefix + '.fits'
npyfile = pathprefix + '.npy'
self.write_xml(xmlfile)
self.write_fits(fitsfile)
if not self.config['gtlike']['use_external_srcmap']:
for c in self.components:
c.like.logLike.saveSourceMaps(str(c.files['srcmap']))
if save_model_map:
self.write_model_map(prefix)
if save_weight_map:
self.write_weight_map(prefix)
o = {}
o['roi'] = copy.deepcopy(self._roi_data)
o['config'] = copy.deepcopy(self.config)
o['version'] = fermipy.__version__
o['stversion'] = fermipy.get_st_version()
o['sources'] = {}
for s in self.roi.sources:
o['sources'][s.name] = copy.deepcopy(s.data)
for i, c in enumerate(self.components):
o['roi']['components'][i][
'src_expscale'] = copy.deepcopy(c.src_expscale)
self.logger.info('Writing %s...', npyfile)
np.save(npyfile, o)
if make_plots:
self.make_plots(prefix, None,
**kwargs.get('plotting', {})) | ['def', 'write_roi', '(', 'self', ',', 'outfile', '=', 'None', ',', 'save_model_map', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', '# extract the results in a convenient format', 'make_plots', '=', 'kwargs', '.', 'get', '(', "'make_plots'", ',', 'False', ')', 'save_weight_map', '=', 'kwargs', '.', 'get', '(', "'save_weight_map'", ',', 'False', ')', 'if', 'outfile', 'is', 'None', ':', 'pathprefix', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'config', '[', "'fileio'", ']', '[', "'workdir'", ']', ',', "'results'", ')', 'elif', 'not', 'os', '.', 'path', '.', 'isabs', '(', 'outfile', ')', ':', 'pathprefix', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'config', '[', "'fileio'", ']', '[', "'workdir'", ']', ',', 'outfile', ')', 'else', ':', 'pathprefix', '=', 'outfile', 'pathprefix', '=', 'utils', '.', 'strip_suffix', '(', 'pathprefix', ',', '[', "'fits'", ',', "'yaml'", ',', "'npy'", ']', ')', '# pathprefix, ext = os.path.splitext(pathprefix)', 'prefix', '=', 'os', '.', 'path', '.', 'basename', '(', 'pathprefix', ')', 'xmlfile', '=', 'pathprefix', '+', "'.xml'", 'fitsfile', '=', 'pathprefix', '+', "'.fits'", 'npyfile', '=', 'pathprefix', '+', "'.npy'", 'self', '.', 'write_xml', '(', 'xmlfile', ')', 'self', '.', 'write_fits', '(', 'fitsfile', ')', 'if', 'not', 'self', '.', 'config', '[', "'gtlike'", ']', '[', "'use_external_srcmap'", ']', ':', 'for', 'c', 'in', 'self', '.', 'components', ':', 'c', '.', 'like', '.', 'logLike', '.', 'saveSourceMaps', '(', 'str', '(', 'c', '.', 'files', '[', "'srcmap'", ']', ')', ')', 'if', 'save_model_map', ':', 'self', '.', 'write_model_map', '(', 'prefix', ')', 'if', 'save_weight_map', ':', 'self', '.', 'write_weight_map', '(', 'prefix', ')', 'o', '=', '{', '}', 'o', '[', "'roi'", ']', '=', 'copy', '.', 'deepcopy', '(', 'self', '.', '_roi_data', ')', 'o', '[', "'config'", ']', '=', 'copy', '.', 'deepcopy', '(', 'self', '.', 'config', ')', 'o', '[', "'version'", ']', '=', 'fermipy', '.', '__version__', 'o', '[', "'stversion'", ']', '=', 'fermipy', '.', 'get_st_version', '(', ')', 'o', '[', "'sources'", ']', '=', '{', '}', 'for', 's', 'in', 'self', '.', 'roi', '.', 'sources', ':', 'o', '[', "'sources'", ']', '[', 's', '.', 'name', ']', '=', 'copy', '.', 'deepcopy', '(', 's', '.', 'data', ')', 'for', 'i', ',', 'c', 'in', 'enumerate', '(', 'self', '.', 'components', ')', ':', 'o', '[', "'roi'", ']', '[', "'components'", ']', '[', 'i', ']', '[', "'src_expscale'", ']', '=', 'copy', '.', 'deepcopy', '(', 'c', '.', 'src_expscale', ')', 'self', '.', 'logger', '.', 'info', '(', "'Writing %s...'", ',', 'npyfile', ')', 'np', '.', 'save', '(', 'npyfile', ',', 'o', ')', 'if', 'make_plots', ':', 'self', '.', 'make_plots', '(', 'prefix', ',', 'None', ',', '*', '*', 'kwargs', '.', 'get', '(', "'plotting'", ',', '{', '}', ')', ')'] | Write current state of the analysis to a file. This method
writes an XML model definition, a ROI dictionary, and a FITS
source catalog file. A previously saved analysis state can be
reloaded from the ROI dictionary file with the
`~fermipy.gtanalysis.GTAnalysis.load_roi` method.
Parameters
----------
outfile : str
String prefix of the output files. The extension of this
string will be stripped when generating the XML, YAML and
npy filenames.
make_plots : bool
Generate diagnostic plots.
save_model_map : bool
Save the current counts model to a FITS file. | ['Write', 'current', 'state', 'of', 'the', 'analysis', 'to', 'a', 'file', '.', 'This', 'method', 'writes', 'an', 'XML', 'model', 'definition', 'a', 'ROI', 'dictionary', 'and', 'a', 'FITS', 'source', 'catalog', 'file', '.', 'A', 'previously', 'saved', 'analysis', 'state', 'can', 'be', 'reloaded', 'from', 'the', 'ROI', 'dictionary', 'file', 'with', 'the', '~fermipy', '.', 'gtanalysis', '.', 'GTAnalysis', '.', 'load_roi', 'method', '.'] | train | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/gtanalysis.py#L3612-L3690 |
4,062 | dvdotsenko/jsonrpc.py | jsonrpcparts/serializers.py | JSONRPC20Serializer._parse_single_response | def _parse_single_response(cls, response_data):
"""de-serialize a JSON-RPC Response/error
:Returns: | [result, id] for Responses
:Raises: | RPCFault+derivates for error-packages/faults, RPCParseError, RPCInvalidRPC
"""
if not isinstance(response_data, dict):
raise errors.RPCInvalidRequest("No valid RPC-package.")
if "id" not in response_data:
raise errors.RPCInvalidRequest("""Invalid Response, "id" missing.""")
request_id = response_data['id']
if "jsonrpc" not in response_data:
raise errors.RPCInvalidRequest("""Invalid Response, "jsonrpc" missing.""", request_id)
if not isinstance(response_data["jsonrpc"], (str, unicode)):
raise errors.RPCInvalidRequest("""Invalid Response, "jsonrpc" must be a string.""")
if response_data["jsonrpc"] != "2.0":
raise errors.RPCInvalidRequest("""Invalid jsonrpc version.""", request_id)
error = response_data.get('error', None)
result = response_data.get('result', None)
if error and result:
raise errors.RPCInvalidRequest("""Invalid Response, only "result" OR "error" allowed.""", request_id)
if error:
if not isinstance(error, dict):
raise errors.RPCInvalidRequest("Invalid Response, invalid error-object.", request_id)
if not ("code" in error and "message" in error):
raise errors.RPCInvalidRequest("Invalid Response, invalid error-object.", request_id)
error_data = error.get("data", None)
if error['code'] in errors.ERROR_CODE_CLASS_MAP:
raise errors.ERROR_CODE_CLASS_MAP[error['code']](error_data, request_id)
else:
error_object = errors.RPCFault(error_data, request_id)
error_object.error_code = error['code']
error_object.message = error['message']
raise error_object
return result, request_id | python | def _parse_single_response(cls, response_data):
"""de-serialize a JSON-RPC Response/error
:Returns: | [result, id] for Responses
:Raises: | RPCFault+derivates for error-packages/faults, RPCParseError, RPCInvalidRPC
"""
if not isinstance(response_data, dict):
raise errors.RPCInvalidRequest("No valid RPC-package.")
if "id" not in response_data:
raise errors.RPCInvalidRequest("""Invalid Response, "id" missing.""")
request_id = response_data['id']
if "jsonrpc" not in response_data:
raise errors.RPCInvalidRequest("""Invalid Response, "jsonrpc" missing.""", request_id)
if not isinstance(response_data["jsonrpc"], (str, unicode)):
raise errors.RPCInvalidRequest("""Invalid Response, "jsonrpc" must be a string.""")
if response_data["jsonrpc"] != "2.0":
raise errors.RPCInvalidRequest("""Invalid jsonrpc version.""", request_id)
error = response_data.get('error', None)
result = response_data.get('result', None)
if error and result:
raise errors.RPCInvalidRequest("""Invalid Response, only "result" OR "error" allowed.""", request_id)
if error:
if not isinstance(error, dict):
raise errors.RPCInvalidRequest("Invalid Response, invalid error-object.", request_id)
if not ("code" in error and "message" in error):
raise errors.RPCInvalidRequest("Invalid Response, invalid error-object.", request_id)
error_data = error.get("data", None)
if error['code'] in errors.ERROR_CODE_CLASS_MAP:
raise errors.ERROR_CODE_CLASS_MAP[error['code']](error_data, request_id)
else:
error_object = errors.RPCFault(error_data, request_id)
error_object.error_code = error['code']
error_object.message = error['message']
raise error_object
return result, request_id | ['def', '_parse_single_response', '(', 'cls', ',', 'response_data', ')', ':', 'if', 'not', 'isinstance', '(', 'response_data', ',', 'dict', ')', ':', 'raise', 'errors', '.', 'RPCInvalidRequest', '(', '"No valid RPC-package."', ')', 'if', '"id"', 'not', 'in', 'response_data', ':', 'raise', 'errors', '.', 'RPCInvalidRequest', '(', '"""Invalid Response, "id" missing."""', ')', 'request_id', '=', 'response_data', '[', "'id'", ']', 'if', '"jsonrpc"', 'not', 'in', 'response_data', ':', 'raise', 'errors', '.', 'RPCInvalidRequest', '(', '"""Invalid Response, "jsonrpc" missing."""', ',', 'request_id', ')', 'if', 'not', 'isinstance', '(', 'response_data', '[', '"jsonrpc"', ']', ',', '(', 'str', ',', 'unicode', ')', ')', ':', 'raise', 'errors', '.', 'RPCInvalidRequest', '(', '"""Invalid Response, "jsonrpc" must be a string."""', ')', 'if', 'response_data', '[', '"jsonrpc"', ']', '!=', '"2.0"', ':', 'raise', 'errors', '.', 'RPCInvalidRequest', '(', '"""Invalid jsonrpc version."""', ',', 'request_id', ')', 'error', '=', 'response_data', '.', 'get', '(', "'error'", ',', 'None', ')', 'result', '=', 'response_data', '.', 'get', '(', "'result'", ',', 'None', ')', 'if', 'error', 'and', 'result', ':', 'raise', 'errors', '.', 'RPCInvalidRequest', '(', '"""Invalid Response, only "result" OR "error" allowed."""', ',', 'request_id', ')', 'if', 'error', ':', 'if', 'not', 'isinstance', '(', 'error', ',', 'dict', ')', ':', 'raise', 'errors', '.', 'RPCInvalidRequest', '(', '"Invalid Response, invalid error-object."', ',', 'request_id', ')', 'if', 'not', '(', '"code"', 'in', 'error', 'and', '"message"', 'in', 'error', ')', ':', 'raise', 'errors', '.', 'RPCInvalidRequest', '(', '"Invalid Response, invalid error-object."', ',', 'request_id', ')', 'error_data', '=', 'error', '.', 'get', '(', '"data"', ',', 'None', ')', 'if', 'error', '[', "'code'", ']', 'in', 'errors', '.', 'ERROR_CODE_CLASS_MAP', ':', 'raise', 'errors', '.', 'ERROR_CODE_CLASS_MAP', '[', 'error', '[', "'code'", ']', ']', '(', 'error_data', ',', 'request_id', ')', 'else', ':', 'error_object', '=', 'errors', '.', 'RPCFault', '(', 'error_data', ',', 'request_id', ')', 'error_object', '.', 'error_code', '=', 'error', '[', "'code'", ']', 'error_object', '.', 'message', '=', 'error', '[', "'message'", ']', 'raise', 'error_object', 'return', 'result', ',', 'request_id'] | de-serialize a JSON-RPC Response/error
:Returns: | [result, id] for Responses
:Raises: | RPCFault+derivates for error-packages/faults, RPCParseError, RPCInvalidRPC | ['de', '-', 'serialize', 'a', 'JSON', '-', 'RPC', 'Response', '/', 'error'] | train | https://github.com/dvdotsenko/jsonrpc.py/blob/19673edd77a9518ac5655bd407f6b93ffbb2cafc/jsonrpcparts/serializers.py#L470-L515 |
4,063 | nccgroup/Scout2 | AWSScout2/services/iam.py | IAMConfig.parse_roles | def parse_roles(self, fetched_role, params):
"""
Parse a single IAM role and fetch additional data
"""
role = {}
role['instances_count'] = 'N/A'
# When resuming upon throttling error, skip if already fetched
if fetched_role['RoleName'] in self.roles:
return
api_client = params['api_client']
# Ensure consistent attribute names across resource types
role['id'] = fetched_role.pop('RoleId')
role['name'] = fetched_role.pop('RoleName')
role['arn'] = fetched_role.pop('Arn')
# Get other attributes
get_keys(fetched_role, role, [ 'CreateDate', 'Path'])
# Get role policies
policies = self.__get_inline_policies(api_client, 'role', role['id'], role['name'])
if len(policies):
role['inline_policies'] = policies
role['inline_policies_count'] = len(policies)
# Get instance profiles
profiles = handle_truncated_response(api_client.list_instance_profiles_for_role, {'RoleName': role['name']}, ['InstanceProfiles'])
manage_dictionary(role, 'instance_profiles', {})
for profile in profiles['InstanceProfiles']:
manage_dictionary(role['instance_profiles'], profile['InstanceProfileId'], {})
role['instance_profiles'][profile['InstanceProfileId']]['arn'] = profile['Arn']
role['instance_profiles'][profile['InstanceProfileId']]['name'] = profile['InstanceProfileName']
# Get trust relationship
role['assume_role_policy'] = {}
role['assume_role_policy']['PolicyDocument'] = fetched_role.pop('AssumeRolePolicyDocument')
# Save role
self.roles[role['id']] = role | python | def parse_roles(self, fetched_role, params):
"""
Parse a single IAM role and fetch additional data
"""
role = {}
role['instances_count'] = 'N/A'
# When resuming upon throttling error, skip if already fetched
if fetched_role['RoleName'] in self.roles:
return
api_client = params['api_client']
# Ensure consistent attribute names across resource types
role['id'] = fetched_role.pop('RoleId')
role['name'] = fetched_role.pop('RoleName')
role['arn'] = fetched_role.pop('Arn')
# Get other attributes
get_keys(fetched_role, role, [ 'CreateDate', 'Path'])
# Get role policies
policies = self.__get_inline_policies(api_client, 'role', role['id'], role['name'])
if len(policies):
role['inline_policies'] = policies
role['inline_policies_count'] = len(policies)
# Get instance profiles
profiles = handle_truncated_response(api_client.list_instance_profiles_for_role, {'RoleName': role['name']}, ['InstanceProfiles'])
manage_dictionary(role, 'instance_profiles', {})
for profile in profiles['InstanceProfiles']:
manage_dictionary(role['instance_profiles'], profile['InstanceProfileId'], {})
role['instance_profiles'][profile['InstanceProfileId']]['arn'] = profile['Arn']
role['instance_profiles'][profile['InstanceProfileId']]['name'] = profile['InstanceProfileName']
# Get trust relationship
role['assume_role_policy'] = {}
role['assume_role_policy']['PolicyDocument'] = fetched_role.pop('AssumeRolePolicyDocument')
# Save role
self.roles[role['id']] = role | ['def', 'parse_roles', '(', 'self', ',', 'fetched_role', ',', 'params', ')', ':', 'role', '=', '{', '}', 'role', '[', "'instances_count'", ']', '=', "'N/A'", '# When resuming upon throttling error, skip if already fetched', 'if', 'fetched_role', '[', "'RoleName'", ']', 'in', 'self', '.', 'roles', ':', 'return', 'api_client', '=', 'params', '[', "'api_client'", ']', '# Ensure consistent attribute names across resource types', 'role', '[', "'id'", ']', '=', 'fetched_role', '.', 'pop', '(', "'RoleId'", ')', 'role', '[', "'name'", ']', '=', 'fetched_role', '.', 'pop', '(', "'RoleName'", ')', 'role', '[', "'arn'", ']', '=', 'fetched_role', '.', 'pop', '(', "'Arn'", ')', '# Get other attributes', 'get_keys', '(', 'fetched_role', ',', 'role', ',', '[', "'CreateDate'", ',', "'Path'", ']', ')', '# Get role policies', 'policies', '=', 'self', '.', '__get_inline_policies', '(', 'api_client', ',', "'role'", ',', 'role', '[', "'id'", ']', ',', 'role', '[', "'name'", ']', ')', 'if', 'len', '(', 'policies', ')', ':', 'role', '[', "'inline_policies'", ']', '=', 'policies', 'role', '[', "'inline_policies_count'", ']', '=', 'len', '(', 'policies', ')', '# Get instance profiles', 'profiles', '=', 'handle_truncated_response', '(', 'api_client', '.', 'list_instance_profiles_for_role', ',', '{', "'RoleName'", ':', 'role', '[', "'name'", ']', '}', ',', '[', "'InstanceProfiles'", ']', ')', 'manage_dictionary', '(', 'role', ',', "'instance_profiles'", ',', '{', '}', ')', 'for', 'profile', 'in', 'profiles', '[', "'InstanceProfiles'", ']', ':', 'manage_dictionary', '(', 'role', '[', "'instance_profiles'", ']', ',', 'profile', '[', "'InstanceProfileId'", ']', ',', '{', '}', ')', 'role', '[', "'instance_profiles'", ']', '[', 'profile', '[', "'InstanceProfileId'", ']', ']', '[', "'arn'", ']', '=', 'profile', '[', "'Arn'", ']', 'role', '[', "'instance_profiles'", ']', '[', 'profile', '[', "'InstanceProfileId'", ']', ']', '[', "'name'", ']', '=', 'profile', '[', "'InstanceProfileName'", ']', '# Get trust relationship', 'role', '[', "'assume_role_policy'", ']', '=', '{', '}', 'role', '[', "'assume_role_policy'", ']', '[', "'PolicyDocument'", ']', '=', 'fetched_role', '.', 'pop', '(', "'AssumeRolePolicyDocument'", ')', '# Save role', 'self', '.', 'roles', '[', 'role', '[', "'id'", ']', ']', '=', 'role'] | Parse a single IAM role and fetch additional data | ['Parse', 'a', 'single', 'IAM', 'role', 'and', 'fetch', 'additional', 'data'] | train | https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/iam.py#L205-L237 |
4,064 | benoitkugler/abstractDataLibrary | pyDLib/GUI/app.py | abstractToolBar.set_interface | def set_interface(self, interface):
"""Add update toolbar callback to the interface"""
self.interface = interface
self.interface.callbacks.update_toolbar = self._update
self._update() | python | def set_interface(self, interface):
"""Add update toolbar callback to the interface"""
self.interface = interface
self.interface.callbacks.update_toolbar = self._update
self._update() | ['def', 'set_interface', '(', 'self', ',', 'interface', ')', ':', 'self', '.', 'interface', '=', 'interface', 'self', '.', 'interface', '.', 'callbacks', '.', 'update_toolbar', '=', 'self', '.', '_update', 'self', '.', '_update', '(', ')'] | Add update toolbar callback to the interface | ['Add', 'update', 'toolbar', 'callback', 'to', 'the', 'interface'] | train | https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/GUI/app.py#L101-L105 |
4,065 | apple/turicreate | src/unity/python/turicreate/data_structures/sframe.py | SFrame.topk | def topk(self, column_name, k=10, reverse=False):
"""
Get top k rows according to the given column. Result is according to and
sorted by `column_name` in the given order (default is descending).
When `k` is small, `topk` is more efficient than `sort`.
Parameters
----------
column_name : string
The column to sort on
k : int, optional
The number of rows to return
reverse : bool, optional
If True, return the top k rows in ascending order, otherwise, in
descending order.
Returns
-------
out : SFrame
an SFrame containing the top k rows sorted by column_name.
See Also
--------
sort
Examples
--------
>>> sf = turicreate.SFrame({'id': range(1000)})
>>> sf['value'] = -sf['id']
>>> sf.topk('id', k=3)
+--------+--------+
| id | value |
+--------+--------+
| 999 | -999 |
| 998 | -998 |
| 997 | -997 |
+--------+--------+
[3 rows x 2 columns]
>>> sf.topk('value', k=3)
+--------+--------+
| id | value |
+--------+--------+
| 1 | -1 |
| 2 | -2 |
| 3 | -3 |
+--------+--------+
[3 rows x 2 columns]
"""
if type(column_name) is not str:
raise TypeError("column_name must be a string")
sf = self[self[column_name].is_topk(k, reverse)]
return sf.sort(column_name, ascending=reverse) | python | def topk(self, column_name, k=10, reverse=False):
"""
Get top k rows according to the given column. Result is according to and
sorted by `column_name` in the given order (default is descending).
When `k` is small, `topk` is more efficient than `sort`.
Parameters
----------
column_name : string
The column to sort on
k : int, optional
The number of rows to return
reverse : bool, optional
If True, return the top k rows in ascending order, otherwise, in
descending order.
Returns
-------
out : SFrame
an SFrame containing the top k rows sorted by column_name.
See Also
--------
sort
Examples
--------
>>> sf = turicreate.SFrame({'id': range(1000)})
>>> sf['value'] = -sf['id']
>>> sf.topk('id', k=3)
+--------+--------+
| id | value |
+--------+--------+
| 999 | -999 |
| 998 | -998 |
| 997 | -997 |
+--------+--------+
[3 rows x 2 columns]
>>> sf.topk('value', k=3)
+--------+--------+
| id | value |
+--------+--------+
| 1 | -1 |
| 2 | -2 |
| 3 | -3 |
+--------+--------+
[3 rows x 2 columns]
"""
if type(column_name) is not str:
raise TypeError("column_name must be a string")
sf = self[self[column_name].is_topk(k, reverse)]
return sf.sort(column_name, ascending=reverse) | ['def', 'topk', '(', 'self', ',', 'column_name', ',', 'k', '=', '10', ',', 'reverse', '=', 'False', ')', ':', 'if', 'type', '(', 'column_name', ')', 'is', 'not', 'str', ':', 'raise', 'TypeError', '(', '"column_name must be a string"', ')', 'sf', '=', 'self', '[', 'self', '[', 'column_name', ']', '.', 'is_topk', '(', 'k', ',', 'reverse', ')', ']', 'return', 'sf', '.', 'sort', '(', 'column_name', ',', 'ascending', '=', 'reverse', ')'] | Get top k rows according to the given column. Result is according to and
sorted by `column_name` in the given order (default is descending).
When `k` is small, `topk` is more efficient than `sort`.
Parameters
----------
column_name : string
The column to sort on
k : int, optional
The number of rows to return
reverse : bool, optional
If True, return the top k rows in ascending order, otherwise, in
descending order.
Returns
-------
out : SFrame
an SFrame containing the top k rows sorted by column_name.
See Also
--------
sort
Examples
--------
>>> sf = turicreate.SFrame({'id': range(1000)})
>>> sf['value'] = -sf['id']
>>> sf.topk('id', k=3)
+--------+--------+
| id | value |
+--------+--------+
| 999 | -999 |
| 998 | -998 |
| 997 | -997 |
+--------+--------+
[3 rows x 2 columns]
>>> sf.topk('value', k=3)
+--------+--------+
| id | value |
+--------+--------+
| 1 | -1 |
| 2 | -2 |
| 3 | -3 |
+--------+--------+
[3 rows x 2 columns] | ['Get', 'top', 'k', 'rows', 'according', 'to', 'the', 'given', 'column', '.', 'Result', 'is', 'according', 'to', 'and', 'sorted', 'by', 'column_name', 'in', 'the', 'given', 'order', '(', 'default', 'is', 'descending', ')', '.', 'When', 'k', 'is', 'small', 'topk', 'is', 'more', 'efficient', 'than', 'sort', '.'] | train | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sframe.py#L2710-L2766 |
4,066 | Autodesk/pyccc | pyccc/python.py | PackagedFunction.prepare_namespace | def prepare_namespace(self, func):
"""
Prepares the function to be run after deserializing it.
Re-associates any previously bound variables and modules from the closure
Returns:
callable: ready-to-call function
"""
if self.is_imethod:
to_run = getattr(self.obj, self.imethod_name)
else:
to_run = func
for varname, modulename in self.global_modules.items():
to_run.__globals__[varname] = __import__(modulename)
if self.global_closure:
to_run.__globals__.update(self.global_closure)
if self.global_functions:
to_run.__globals__.update(self.global_functions)
return to_run | python | def prepare_namespace(self, func):
"""
Prepares the function to be run after deserializing it.
Re-associates any previously bound variables and modules from the closure
Returns:
callable: ready-to-call function
"""
if self.is_imethod:
to_run = getattr(self.obj, self.imethod_name)
else:
to_run = func
for varname, modulename in self.global_modules.items():
to_run.__globals__[varname] = __import__(modulename)
if self.global_closure:
to_run.__globals__.update(self.global_closure)
if self.global_functions:
to_run.__globals__.update(self.global_functions)
return to_run | ['def', 'prepare_namespace', '(', 'self', ',', 'func', ')', ':', 'if', 'self', '.', 'is_imethod', ':', 'to_run', '=', 'getattr', '(', 'self', '.', 'obj', ',', 'self', '.', 'imethod_name', ')', 'else', ':', 'to_run', '=', 'func', 'for', 'varname', ',', 'modulename', 'in', 'self', '.', 'global_modules', '.', 'items', '(', ')', ':', 'to_run', '.', '__globals__', '[', 'varname', ']', '=', '__import__', '(', 'modulename', ')', 'if', 'self', '.', 'global_closure', ':', 'to_run', '.', '__globals__', '.', 'update', '(', 'self', '.', 'global_closure', ')', 'if', 'self', '.', 'global_functions', ':', 'to_run', '.', '__globals__', '.', 'update', '(', 'self', '.', 'global_functions', ')', 'return', 'to_run'] | Prepares the function to be run after deserializing it.
Re-associates any previously bound variables and modules from the closure
Returns:
callable: ready-to-call function | ['Prepares', 'the', 'function', 'to', 'be', 'run', 'after', 'deserializing', 'it', '.', 'Re', '-', 'associates', 'any', 'previously', 'bound', 'variables', 'and', 'modules', 'from', 'the', 'closure'] | train | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/python.py#L292-L311 |
4,067 | jazzband/django-axes | axes/attempts.py | get_user_attempts | def get_user_attempts(request: AxesHttpRequest, credentials: dict = None) -> QuerySet:
"""
Get valid user attempts that match the given request and credentials.
"""
attempts = filter_user_attempts(request, credentials)
if settings.AXES_COOLOFF_TIME is None:
log.debug('AXES: Getting all access attempts from database because no AXES_COOLOFF_TIME is configured')
return attempts
threshold = get_cool_off_threshold(request.axes_attempt_time)
log.debug('AXES: Getting access attempts that are newer than %s', threshold)
return attempts.filter(attempt_time__gte=threshold) | python | def get_user_attempts(request: AxesHttpRequest, credentials: dict = None) -> QuerySet:
"""
Get valid user attempts that match the given request and credentials.
"""
attempts = filter_user_attempts(request, credentials)
if settings.AXES_COOLOFF_TIME is None:
log.debug('AXES: Getting all access attempts from database because no AXES_COOLOFF_TIME is configured')
return attempts
threshold = get_cool_off_threshold(request.axes_attempt_time)
log.debug('AXES: Getting access attempts that are newer than %s', threshold)
return attempts.filter(attempt_time__gte=threshold) | ['def', 'get_user_attempts', '(', 'request', ':', 'AxesHttpRequest', ',', 'credentials', ':', 'dict', '=', 'None', ')', '->', 'QuerySet', ':', 'attempts', '=', 'filter_user_attempts', '(', 'request', ',', 'credentials', ')', 'if', 'settings', '.', 'AXES_COOLOFF_TIME', 'is', 'None', ':', 'log', '.', 'debug', '(', "'AXES: Getting all access attempts from database because no AXES_COOLOFF_TIME is configured'", ')', 'return', 'attempts', 'threshold', '=', 'get_cool_off_threshold', '(', 'request', '.', 'axes_attempt_time', ')', 'log', '.', 'debug', '(', "'AXES: Getting access attempts that are newer than %s'", ',', 'threshold', ')', 'return', 'attempts', '.', 'filter', '(', 'attempt_time__gte', '=', 'threshold', ')'] | Get valid user attempts that match the given request and credentials. | ['Get', 'valid', 'user', 'attempts', 'that', 'match', 'the', 'given', 'request', 'and', 'credentials', '.'] | train | https://github.com/jazzband/django-axes/blob/3e215a174030e43e7ab8c2a79c395eb0eeddc667/axes/attempts.py#L45-L58 |
4,068 | RPi-Distro/python-sense-hat | sense_hat/sense_hat.py | SenseHat.get_pressure | def get_pressure(self):
"""
Returns the pressure in Millibars
"""
self._init_pressure() # Ensure pressure sensor is initialised
pressure = 0
data = self._pressure.pressureRead()
if (data[0]): # Pressure valid
pressure = data[1]
return pressure | python | def get_pressure(self):
"""
Returns the pressure in Millibars
"""
self._init_pressure() # Ensure pressure sensor is initialised
pressure = 0
data = self._pressure.pressureRead()
if (data[0]): # Pressure valid
pressure = data[1]
return pressure | ['def', 'get_pressure', '(', 'self', ')', ':', 'self', '.', '_init_pressure', '(', ')', '# Ensure pressure sensor is initialised', 'pressure', '=', '0', 'data', '=', 'self', '.', '_pressure', '.', 'pressureRead', '(', ')', 'if', '(', 'data', '[', '0', ']', ')', ':', '# Pressure valid', 'pressure', '=', 'data', '[', '1', ']', 'return', 'pressure'] | Returns the pressure in Millibars | ['Returns', 'the', 'pressure', 'in', 'Millibars'] | train | https://github.com/RPi-Distro/python-sense-hat/blob/9a37f0923ce8dbde69514c3b8d58d30de01c9ee7/sense_hat/sense_hat.py#L616-L626 |
4,069 | enkore/i3pystatus | i3pystatus/calendar/google.py | Google.refresh_events | def refresh_events(self):
"""
Retrieve the next N events from Google.
"""
now = datetime.datetime.now(tz=pytz.UTC)
try:
now, later = self.get_timerange_formatted(now)
events_result = self.service.events().list(
calendarId='primary',
timeMin=now,
timeMax=later,
maxResults=10,
singleEvents=True,
orderBy='startTime',
timeZone='utc'
).execute()
self.events.clear()
for event in events_result.get('items', []):
self.events.append(GoogleCalendarEvent(event))
except HttpError as e:
if e.resp.status in (500, 503):
self.logger.warn("GoogleCalendar received %s while retrieving events" % e.resp.status)
else:
raise | python | def refresh_events(self):
"""
Retrieve the next N events from Google.
"""
now = datetime.datetime.now(tz=pytz.UTC)
try:
now, later = self.get_timerange_formatted(now)
events_result = self.service.events().list(
calendarId='primary',
timeMin=now,
timeMax=later,
maxResults=10,
singleEvents=True,
orderBy='startTime',
timeZone='utc'
).execute()
self.events.clear()
for event in events_result.get('items', []):
self.events.append(GoogleCalendarEvent(event))
except HttpError as e:
if e.resp.status in (500, 503):
self.logger.warn("GoogleCalendar received %s while retrieving events" % e.resp.status)
else:
raise | ['def', 'refresh_events', '(', 'self', ')', ':', 'now', '=', 'datetime', '.', 'datetime', '.', 'now', '(', 'tz', '=', 'pytz', '.', 'UTC', ')', 'try', ':', 'now', ',', 'later', '=', 'self', '.', 'get_timerange_formatted', '(', 'now', ')', 'events_result', '=', 'self', '.', 'service', '.', 'events', '(', ')', '.', 'list', '(', 'calendarId', '=', "'primary'", ',', 'timeMin', '=', 'now', ',', 'timeMax', '=', 'later', ',', 'maxResults', '=', '10', ',', 'singleEvents', '=', 'True', ',', 'orderBy', '=', "'startTime'", ',', 'timeZone', '=', "'utc'", ')', '.', 'execute', '(', ')', 'self', '.', 'events', '.', 'clear', '(', ')', 'for', 'event', 'in', 'events_result', '.', 'get', '(', "'items'", ',', '[', ']', ')', ':', 'self', '.', 'events', '.', 'append', '(', 'GoogleCalendarEvent', '(', 'event', ')', ')', 'except', 'HttpError', 'as', 'e', ':', 'if', 'e', '.', 'resp', '.', 'status', 'in', '(', '500', ',', '503', ')', ':', 'self', '.', 'logger', '.', 'warn', '(', '"GoogleCalendar received %s while retrieving events"', '%', 'e', '.', 'resp', '.', 'status', ')', 'else', ':', 'raise'] | Retrieve the next N events from Google. | ['Retrieve', 'the', 'next', 'N', 'events', 'from', 'Google', '.'] | train | https://github.com/enkore/i3pystatus/blob/14cfde967cecf79b40e223e35a04600f4c875af7/i3pystatus/calendar/google.py#L105-L128 |
4,070 | wummel/linkchecker | linkcheck/url.py | urlunsplit | def urlunsplit (urlparts):
"""Same as urlparse.urlunsplit but with extra UNC path handling
for Windows OS."""
res = urlparse.urlunsplit(urlparts)
if os.name == 'nt' and urlparts[0] == 'file' and '|' not in urlparts[2]:
# UNC paths must have 4 slashes: 'file:////server/path'
# Depending on the path in urlparts[2], urlparse.urlunsplit()
# left only two or three slashes. This is fixed below
repl = 'file://' if urlparts[2].startswith('//') else 'file:/'
res = res.replace('file:', repl)
return res | python | def urlunsplit (urlparts):
"""Same as urlparse.urlunsplit but with extra UNC path handling
for Windows OS."""
res = urlparse.urlunsplit(urlparts)
if os.name == 'nt' and urlparts[0] == 'file' and '|' not in urlparts[2]:
# UNC paths must have 4 slashes: 'file:////server/path'
# Depending on the path in urlparts[2], urlparse.urlunsplit()
# left only two or three slashes. This is fixed below
repl = 'file://' if urlparts[2].startswith('//') else 'file:/'
res = res.replace('file:', repl)
return res | ['def', 'urlunsplit', '(', 'urlparts', ')', ':', 'res', '=', 'urlparse', '.', 'urlunsplit', '(', 'urlparts', ')', 'if', 'os', '.', 'name', '==', "'nt'", 'and', 'urlparts', '[', '0', ']', '==', "'file'", 'and', "'|'", 'not', 'in', 'urlparts', '[', '2', ']', ':', "# UNC paths must have 4 slashes: 'file:////server/path'", '# Depending on the path in urlparts[2], urlparse.urlunsplit()', '# left only two or three slashes. This is fixed below', 'repl', '=', "'file://'", 'if', 'urlparts', '[', '2', ']', '.', 'startswith', '(', "'//'", ')', 'else', "'file:/'", 'res', '=', 'res', '.', 'replace', '(', "'file:'", ',', 'repl', ')', 'return', 'res'] | Same as urlparse.urlunsplit but with extra UNC path handling
for Windows OS. | ['Same', 'as', 'urlparse', '.', 'urlunsplit', 'but', 'with', 'extra', 'UNC', 'path', 'handling', 'for', 'Windows', 'OS', '.'] | train | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/url.py#L275-L285 |
4,071 | nitely/django-hooks | hooks/templatehook.py | Hook.register | def register(self, name, func):
"""
Register a new callback.\
When the name/id is not found\
a new hook is created under its name,\
meaning the hook is usually created by\
the first registered callback
:param str name: Hook name
:param callable func: A func reference (callback)
"""
try:
templatehook = self._registry[name]
except KeyError:
templatehook = self._register(name)
templatehook.register(func) | python | def register(self, name, func):
"""
Register a new callback.\
When the name/id is not found\
a new hook is created under its name,\
meaning the hook is usually created by\
the first registered callback
:param str name: Hook name
:param callable func: A func reference (callback)
"""
try:
templatehook = self._registry[name]
except KeyError:
templatehook = self._register(name)
templatehook.register(func) | ['def', 'register', '(', 'self', ',', 'name', ',', 'func', ')', ':', 'try', ':', 'templatehook', '=', 'self', '.', '_registry', '[', 'name', ']', 'except', 'KeyError', ':', 'templatehook', '=', 'self', '.', '_register', '(', 'name', ')', 'templatehook', '.', 'register', '(', 'func', ')'] | Register a new callback.\
When the name/id is not found\
a new hook is created under its name,\
meaning the hook is usually created by\
the first registered callback
:param str name: Hook name
:param callable func: A func reference (callback) | ['Register', 'a', 'new', 'callback', '.', '\\', 'When', 'the', 'name', '/', 'id', 'is', 'not', 'found', '\\', 'a', 'new', 'hook', 'is', 'created', 'under', 'its', 'name', '\\', 'meaning', 'the', 'hook', 'is', 'usually', 'created', 'by', '\\', 'the', 'first', 'registered', 'callback'] | train | https://github.com/nitely/django-hooks/blob/26ea2150c9be110e90b9ee60fbfd1065ac30ab1d/hooks/templatehook.py#L99-L115 |
4,072 | SpriteLink/NIPAP | pynipap/pynipap.py | Pool.get | def get(cls, id):
""" Get the pool with id 'id'.
"""
# cached?
if CACHE:
if id in _cache['Pool']:
log.debug('cache hit for pool %d' % id)
return _cache['Pool'][id]
log.debug('cache miss for pool %d' % id)
try:
pool = Pool.list({'id': id})[0]
except (IndexError, KeyError):
raise NipapNonExistentError('no pool with ID ' + str(id) + ' found')
_cache['Pool'][id] = pool
return pool | python | def get(cls, id):
""" Get the pool with id 'id'.
"""
# cached?
if CACHE:
if id in _cache['Pool']:
log.debug('cache hit for pool %d' % id)
return _cache['Pool'][id]
log.debug('cache miss for pool %d' % id)
try:
pool = Pool.list({'id': id})[0]
except (IndexError, KeyError):
raise NipapNonExistentError('no pool with ID ' + str(id) + ' found')
_cache['Pool'][id] = pool
return pool | ['def', 'get', '(', 'cls', ',', 'id', ')', ':', '# cached?', 'if', 'CACHE', ':', 'if', 'id', 'in', '_cache', '[', "'Pool'", ']', ':', 'log', '.', 'debug', '(', "'cache hit for pool %d'", '%', 'id', ')', 'return', '_cache', '[', "'Pool'", ']', '[', 'id', ']', 'log', '.', 'debug', '(', "'cache miss for pool %d'", '%', 'id', ')', 'try', ':', 'pool', '=', 'Pool', '.', 'list', '(', '{', "'id'", ':', 'id', '}', ')', '[', '0', ']', 'except', '(', 'IndexError', ',', 'KeyError', ')', ':', 'raise', 'NipapNonExistentError', '(', "'no pool with ID '", '+', 'str', '(', 'id', ')', '+', "' found'", ')', '_cache', '[', "'Pool'", ']', '[', 'id', ']', '=', 'pool', 'return', 'pool'] | Get the pool with id 'id'. | ['Get', 'the', 'pool', 'with', 'id', 'id', '.'] | train | https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/pynipap/pynipap.py#L780-L797 |
4,073 | bitcraft/pyscroll | pyscroll/orthographic.py | BufferedRenderer._flush_tile_queue_blits | def _flush_tile_queue_blits(self, surface):
""" Blit the queued tiles and block until the tile queue is empty
for pygame 1.9.4 +
"""
tw, th = self.data.tile_size
ltw = self._tile_view.left * tw
tth = self._tile_view.top * th
self.data.prepare_tiles(self._tile_view)
blit_list = [(image, (x * tw - ltw, y * th - tth)) for x, y, l, image in self._tile_queue]
surface.blits(blit_list) | python | def _flush_tile_queue_blits(self, surface):
""" Blit the queued tiles and block until the tile queue is empty
for pygame 1.9.4 +
"""
tw, th = self.data.tile_size
ltw = self._tile_view.left * tw
tth = self._tile_view.top * th
self.data.prepare_tiles(self._tile_view)
blit_list = [(image, (x * tw - ltw, y * th - tth)) for x, y, l, image in self._tile_queue]
surface.blits(blit_list) | ['def', '_flush_tile_queue_blits', '(', 'self', ',', 'surface', ')', ':', 'tw', ',', 'th', '=', 'self', '.', 'data', '.', 'tile_size', 'ltw', '=', 'self', '.', '_tile_view', '.', 'left', '*', 'tw', 'tth', '=', 'self', '.', '_tile_view', '.', 'top', '*', 'th', 'self', '.', 'data', '.', 'prepare_tiles', '(', 'self', '.', '_tile_view', ')', 'blit_list', '=', '[', '(', 'image', ',', '(', 'x', '*', 'tw', '-', 'ltw', ',', 'y', '*', 'th', '-', 'tth', ')', ')', 'for', 'x', ',', 'y', ',', 'l', ',', 'image', 'in', 'self', '.', '_tile_queue', ']', 'surface', '.', 'blits', '(', 'blit_list', ')'] | Blit the queued tiles and block until the tile queue is empty
for pygame 1.9.4 + | ['Blit', 'the', 'queued', 'tiles', 'and', 'block', 'until', 'the', 'tile', 'queue', 'is', 'empty'] | train | https://github.com/bitcraft/pyscroll/blob/b41c1016dfefd0e2d83a14a2ce40d7ad298c5b0f/pyscroll/orthographic.py#L508-L520 |
4,074 | ultrabug/py3status | py3status/formatter.py | Formatter.update_placeholders | def update_placeholders(self, format_string, placeholders):
"""
Update a format string renaming placeholders.
"""
# Tokenize the format string and process them
output = []
for token in self.tokens(format_string):
if token.group("key") in placeholders:
output.append(
"{%s%s}" % (placeholders[token.group("key")], token.group("format"))
)
continue
elif token.group("command"):
# update any placeholders used in commands
commands = parse_qsl(token.group("command"), keep_blank_values=True)
# placeholders only used in `if`
if "if" in [x[0] for x in commands]:
items = []
for key, value in commands:
if key == "if":
# we have to rebuild from the parts we have
condition = Condition(value)
variable = condition.variable
if variable in placeholders:
variable = placeholders[variable]
# negation via `!`
not_ = "!" if not condition.default else ""
condition_ = condition.condition or ""
# if there is no condition then there is no
# value
if condition_:
value_ = condition.value
else:
value_ = ""
value = "{}{}{}{}".format(
not_, variable, condition_, value_
)
if value:
items.append("{}={}".format(key, value))
else:
items.append(key)
# we cannot use urlencode because it will escape things
# like `!`
output.append(r"\?{} ".format("&".join(items)))
continue
value = token.group(0)
output.append(value)
return u"".join(output) | python | def update_placeholders(self, format_string, placeholders):
"""
Update a format string renaming placeholders.
"""
# Tokenize the format string and process them
output = []
for token in self.tokens(format_string):
if token.group("key") in placeholders:
output.append(
"{%s%s}" % (placeholders[token.group("key")], token.group("format"))
)
continue
elif token.group("command"):
# update any placeholders used in commands
commands = parse_qsl(token.group("command"), keep_blank_values=True)
# placeholders only used in `if`
if "if" in [x[0] for x in commands]:
items = []
for key, value in commands:
if key == "if":
# we have to rebuild from the parts we have
condition = Condition(value)
variable = condition.variable
if variable in placeholders:
variable = placeholders[variable]
# negation via `!`
not_ = "!" if not condition.default else ""
condition_ = condition.condition or ""
# if there is no condition then there is no
# value
if condition_:
value_ = condition.value
else:
value_ = ""
value = "{}{}{}{}".format(
not_, variable, condition_, value_
)
if value:
items.append("{}={}".format(key, value))
else:
items.append(key)
# we cannot use urlencode because it will escape things
# like `!`
output.append(r"\?{} ".format("&".join(items)))
continue
value = token.group(0)
output.append(value)
return u"".join(output) | ['def', 'update_placeholders', '(', 'self', ',', 'format_string', ',', 'placeholders', ')', ':', '# Tokenize the format string and process them', 'output', '=', '[', ']', 'for', 'token', 'in', 'self', '.', 'tokens', '(', 'format_string', ')', ':', 'if', 'token', '.', 'group', '(', '"key"', ')', 'in', 'placeholders', ':', 'output', '.', 'append', '(', '"{%s%s}"', '%', '(', 'placeholders', '[', 'token', '.', 'group', '(', '"key"', ')', ']', ',', 'token', '.', 'group', '(', '"format"', ')', ')', ')', 'continue', 'elif', 'token', '.', 'group', '(', '"command"', ')', ':', '# update any placeholders used in commands', 'commands', '=', 'parse_qsl', '(', 'token', '.', 'group', '(', '"command"', ')', ',', 'keep_blank_values', '=', 'True', ')', '# placeholders only used in `if`', 'if', '"if"', 'in', '[', 'x', '[', '0', ']', 'for', 'x', 'in', 'commands', ']', ':', 'items', '=', '[', ']', 'for', 'key', ',', 'value', 'in', 'commands', ':', 'if', 'key', '==', '"if"', ':', '# we have to rebuild from the parts we have', 'condition', '=', 'Condition', '(', 'value', ')', 'variable', '=', 'condition', '.', 'variable', 'if', 'variable', 'in', 'placeholders', ':', 'variable', '=', 'placeholders', '[', 'variable', ']', '# negation via `!`', 'not_', '=', '"!"', 'if', 'not', 'condition', '.', 'default', 'else', '""', 'condition_', '=', 'condition', '.', 'condition', 'or', '""', '# if there is no condition then there is no', '# value', 'if', 'condition_', ':', 'value_', '=', 'condition', '.', 'value', 'else', ':', 'value_', '=', '""', 'value', '=', '"{}{}{}{}"', '.', 'format', '(', 'not_', ',', 'variable', ',', 'condition_', ',', 'value_', ')', 'if', 'value', ':', 'items', '.', 'append', '(', '"{}={}"', '.', 'format', '(', 'key', ',', 'value', ')', ')', 'else', ':', 'items', '.', 'append', '(', 'key', ')', '# we cannot use urlencode because it will escape things', '# like `!`', 'output', '.', 'append', '(', 'r"\\?{} "', '.', 'format', '(', '"&"', '.', 'join', '(', 'items', ')', ')', ')', 'continue', 'value', '=', 'token', '.', 'group', '(', '0', ')', 'output', '.', 'append', '(', 'value', ')', 'return', 'u""', '.', 'join', '(', 'output', ')'] | Update a format string renaming placeholders. | ['Update', 'a', 'format', 'string', 'renaming', 'placeholders', '.'] | train | https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/formatter.py#L105-L153 |
4,075 | andersinno/django-sanitized-dump | sanitized_dump/config.py | Configuration.diff_with_models | def diff_with_models(self):
"""
Return a dict stating the differences between current state of models
and the configuration itself.
TODO: Detect fields that are in conf, but not in models
"""
missing_from_conf = defaultdict(set)
for model in get_models():
db_tables_and_columns = get_db_tables_and_columns_of_model(model)
for (table_name, columns) in db_tables_and_columns.items():
model_strategy = self.strategy.get(table_name)
for column in columns:
if not model_strategy or column not in model_strategy:
missing_from_conf[table_name].add(column)
return missing_from_conf | python | def diff_with_models(self):
"""
Return a dict stating the differences between current state of models
and the configuration itself.
TODO: Detect fields that are in conf, but not in models
"""
missing_from_conf = defaultdict(set)
for model in get_models():
db_tables_and_columns = get_db_tables_and_columns_of_model(model)
for (table_name, columns) in db_tables_and_columns.items():
model_strategy = self.strategy.get(table_name)
for column in columns:
if not model_strategy or column not in model_strategy:
missing_from_conf[table_name].add(column)
return missing_from_conf | ['def', 'diff_with_models', '(', 'self', ')', ':', 'missing_from_conf', '=', 'defaultdict', '(', 'set', ')', 'for', 'model', 'in', 'get_models', '(', ')', ':', 'db_tables_and_columns', '=', 'get_db_tables_and_columns_of_model', '(', 'model', ')', 'for', '(', 'table_name', ',', 'columns', ')', 'in', 'db_tables_and_columns', '.', 'items', '(', ')', ':', 'model_strategy', '=', 'self', '.', 'strategy', '.', 'get', '(', 'table_name', ')', 'for', 'column', 'in', 'columns', ':', 'if', 'not', 'model_strategy', 'or', 'column', 'not', 'in', 'model_strategy', ':', 'missing_from_conf', '[', 'table_name', ']', '.', 'add', '(', 'column', ')', 'return', 'missing_from_conf'] | Return a dict stating the differences between current state of models
and the configuration itself.
TODO: Detect fields that are in conf, but not in models | ['Return', 'a', 'dict', 'stating', 'the', 'differences', 'between', 'current', 'state', 'of', 'models', 'and', 'the', 'configuration', 'itself', '.', 'TODO', ':', 'Detect', 'fields', 'that', 'are', 'in', 'conf', 'but', 'not', 'in', 'models'] | train | https://github.com/andersinno/django-sanitized-dump/blob/185a693d153dd9fb56cdc58382e4744635afc2e7/sanitized_dump/config.py#L60-L75 |
4,076 | dmort27/panphon | panphon/_panphon.py | FeatureTable.segs | def segs(self, word):
"""Returns a list of segments from a word
Args:
word (unicode): input word as Unicode IPA string
Returns:
list: list of strings corresponding to segments found in `word`
"""
return [m.group('all') for m in self.seg_regex.finditer(word)] | python | def segs(self, word):
"""Returns a list of segments from a word
Args:
word (unicode): input word as Unicode IPA string
Returns:
list: list of strings corresponding to segments found in `word`
"""
return [m.group('all') for m in self.seg_regex.finditer(word)] | ['def', 'segs', '(', 'self', ',', 'word', ')', ':', 'return', '[', 'm', '.', 'group', '(', "'all'", ')', 'for', 'm', 'in', 'self', '.', 'seg_regex', '.', 'finditer', '(', 'word', ')', ']'] | Returns a list of segments from a word
Args:
word (unicode): input word as Unicode IPA string
Returns:
list: list of strings corresponding to segments found in `word` | ['Returns', 'a', 'list', 'of', 'segments', 'from', 'a', 'word'] | train | https://github.com/dmort27/panphon/blob/17eaa482e3edb211f3a8138137d76e4b9246d201/panphon/_panphon.py#L243-L252 |
4,077 | twilio/twilio-python | twilio/rest/taskrouter/v1/workspace/task_channel.py | TaskChannelList.get | def get(self, sid):
"""
Constructs a TaskChannelContext
:param sid: The sid
:returns: twilio.rest.taskrouter.v1.workspace.task_channel.TaskChannelContext
:rtype: twilio.rest.taskrouter.v1.workspace.task_channel.TaskChannelContext
"""
return TaskChannelContext(self._version, workspace_sid=self._solution['workspace_sid'], sid=sid, ) | python | def get(self, sid):
"""
Constructs a TaskChannelContext
:param sid: The sid
:returns: twilio.rest.taskrouter.v1.workspace.task_channel.TaskChannelContext
:rtype: twilio.rest.taskrouter.v1.workspace.task_channel.TaskChannelContext
"""
return TaskChannelContext(self._version, workspace_sid=self._solution['workspace_sid'], sid=sid, ) | ['def', 'get', '(', 'self', ',', 'sid', ')', ':', 'return', 'TaskChannelContext', '(', 'self', '.', '_version', ',', 'workspace_sid', '=', 'self', '.', '_solution', '[', "'workspace_sid'", ']', ',', 'sid', '=', 'sid', ',', ')'] | Constructs a TaskChannelContext
:param sid: The sid
:returns: twilio.rest.taskrouter.v1.workspace.task_channel.TaskChannelContext
:rtype: twilio.rest.taskrouter.v1.workspace.task_channel.TaskChannelContext | ['Constructs', 'a', 'TaskChannelContext'] | train | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/taskrouter/v1/workspace/task_channel.py#L137-L146 |
4,078 | openstack/networking-arista | networking_arista/common/db_lib.py | filter_unmanaged_physnets | def filter_unmanaged_physnets(query):
"""Filter ports managed by other ML2 plugins """
config = cfg.CONF.ml2_arista
managed_physnets = config['managed_physnets']
# Filter out ports bound to segments on physnets that we're not
# managing
segment_model = segment_models.NetworkSegment
if managed_physnets:
query = (query
.join_if_necessary(segment_model)
.filter(segment_model.physical_network.in_(
managed_physnets)))
return query | python | def filter_unmanaged_physnets(query):
"""Filter ports managed by other ML2 plugins """
config = cfg.CONF.ml2_arista
managed_physnets = config['managed_physnets']
# Filter out ports bound to segments on physnets that we're not
# managing
segment_model = segment_models.NetworkSegment
if managed_physnets:
query = (query
.join_if_necessary(segment_model)
.filter(segment_model.physical_network.in_(
managed_physnets)))
return query | ['def', 'filter_unmanaged_physnets', '(', 'query', ')', ':', 'config', '=', 'cfg', '.', 'CONF', '.', 'ml2_arista', 'managed_physnets', '=', 'config', '[', "'managed_physnets'", ']', "# Filter out ports bound to segments on physnets that we're not", '# managing', 'segment_model', '=', 'segment_models', '.', 'NetworkSegment', 'if', 'managed_physnets', ':', 'query', '=', '(', 'query', '.', 'join_if_necessary', '(', 'segment_model', ')', '.', 'filter', '(', 'segment_model', '.', 'physical_network', '.', 'in_', '(', 'managed_physnets', ')', ')', ')', 'return', 'query'] | Filter ports managed by other ML2 plugins | ['Filter', 'ports', 'managed', 'by', 'other', 'ML2', 'plugins'] | train | https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/common/db_lib.py#L152-L165 |
4,079 | pandas-dev/pandas | pandas/io/sql.py | SQLTable._query_iterator | def _query_iterator(self, result, chunksize, columns, coerce_float=True,
parse_dates=None):
"""Return generator through chunked result set."""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
self.frame = DataFrame.from_records(
data, columns=columns, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
yield self.frame | python | def _query_iterator(self, result, chunksize, columns, coerce_float=True,
parse_dates=None):
"""Return generator through chunked result set."""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
self.frame = DataFrame.from_records(
data, columns=columns, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
yield self.frame | ['def', '_query_iterator', '(', 'self', ',', 'result', ',', 'chunksize', ',', 'columns', ',', 'coerce_float', '=', 'True', ',', 'parse_dates', '=', 'None', ')', ':', 'while', 'True', ':', 'data', '=', 'result', '.', 'fetchmany', '(', 'chunksize', ')', 'if', 'not', 'data', ':', 'break', 'else', ':', 'self', '.', 'frame', '=', 'DataFrame', '.', 'from_records', '(', 'data', ',', 'columns', '=', 'columns', ',', 'coerce_float', '=', 'coerce_float', ')', 'self', '.', '_harmonize_columns', '(', 'parse_dates', '=', 'parse_dates', ')', 'if', 'self', '.', 'index', 'is', 'not', 'None', ':', 'self', '.', 'frame', '.', 'set_index', '(', 'self', '.', 'index', ',', 'inplace', '=', 'True', ')', 'yield', 'self', '.', 'frame'] | Return generator through chunked result set. | ['Return', 'generator', 'through', 'chunked', 'result', 'set', '.'] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/sql.py#L679-L696 |
4,080 | ucfopen/canvasapi | canvasapi/canvas.py | Canvas.get_outcome | def get_outcome(self, outcome):
"""
Returns the details of the outcome with the given id.
:calls: `GET /api/v1/outcomes/:id \
<https://canvas.instructure.com/doc/api/outcomes.html#method.outcomes_api.show>`_
:param outcome: The outcome object or ID to return.
:type outcome: :class:`canvasapi.outcome.Outcome` or int
:returns: An Outcome object.
:rtype: :class:`canvasapi.outcome.Outcome`
"""
from canvasapi.outcome import Outcome
outcome_id = obj_or_id(outcome, "outcome", (Outcome,))
response = self.__requester.request(
'GET',
'outcomes/{}'.format(outcome_id)
)
return Outcome(self.__requester, response.json()) | python | def get_outcome(self, outcome):
"""
Returns the details of the outcome with the given id.
:calls: `GET /api/v1/outcomes/:id \
<https://canvas.instructure.com/doc/api/outcomes.html#method.outcomes_api.show>`_
:param outcome: The outcome object or ID to return.
:type outcome: :class:`canvasapi.outcome.Outcome` or int
:returns: An Outcome object.
:rtype: :class:`canvasapi.outcome.Outcome`
"""
from canvasapi.outcome import Outcome
outcome_id = obj_or_id(outcome, "outcome", (Outcome,))
response = self.__requester.request(
'GET',
'outcomes/{}'.format(outcome_id)
)
return Outcome(self.__requester, response.json()) | ['def', 'get_outcome', '(', 'self', ',', 'outcome', ')', ':', 'from', 'canvasapi', '.', 'outcome', 'import', 'Outcome', 'outcome_id', '=', 'obj_or_id', '(', 'outcome', ',', '"outcome"', ',', '(', 'Outcome', ',', ')', ')', 'response', '=', 'self', '.', '__requester', '.', 'request', '(', "'GET'", ',', "'outcomes/{}'", '.', 'format', '(', 'outcome_id', ')', ')', 'return', 'Outcome', '(', 'self', '.', '__requester', ',', 'response', '.', 'json', '(', ')', ')'] | Returns the details of the outcome with the given id.
:calls: `GET /api/v1/outcomes/:id \
<https://canvas.instructure.com/doc/api/outcomes.html#method.outcomes_api.show>`_
:param outcome: The outcome object or ID to return.
:type outcome: :class:`canvasapi.outcome.Outcome` or int
:returns: An Outcome object.
:rtype: :class:`canvasapi.outcome.Outcome` | ['Returns', 'the', 'details', 'of', 'the', 'outcome', 'with', 'the', 'given', 'id', '.'] | train | https://github.com/ucfopen/canvasapi/blob/319064b5fc97ba54250af683eb98723ef3f76cf8/canvasapi/canvas.py#L1058-L1078 |
4,081 | gmr/tinman | tinman/handlers/mixins.py | ModelAPIMixin.get | def get(self, *args, **kwargs):
"""Handle reading of the model
:param args:
:param kwargs:
"""
# Create the model and fetch its data
self.model = self.get_model(kwargs.get('id'))
result = yield self.model.fetch()
# If model is not found, return 404
if not result:
LOGGER.debug('Not found')
self.not_found()
return
# Stub to check for read permissions
if not self.has_read_permission():
LOGGER.debug('Permission denied')
self.permission_denied()
return
# Add the headers and return the content as JSON
self.add_headers()
self.finish(self.model_json()) | python | def get(self, *args, **kwargs):
"""Handle reading of the model
:param args:
:param kwargs:
"""
# Create the model and fetch its data
self.model = self.get_model(kwargs.get('id'))
result = yield self.model.fetch()
# If model is not found, return 404
if not result:
LOGGER.debug('Not found')
self.not_found()
return
# Stub to check for read permissions
if not self.has_read_permission():
LOGGER.debug('Permission denied')
self.permission_denied()
return
# Add the headers and return the content as JSON
self.add_headers()
self.finish(self.model_json()) | ['def', 'get', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', '# Create the model and fetch its data', 'self', '.', 'model', '=', 'self', '.', 'get_model', '(', 'kwargs', '.', 'get', '(', "'id'", ')', ')', 'result', '=', 'yield', 'self', '.', 'model', '.', 'fetch', '(', ')', '# If model is not found, return 404', 'if', 'not', 'result', ':', 'LOGGER', '.', 'debug', '(', "'Not found'", ')', 'self', '.', 'not_found', '(', ')', 'return', '# Stub to check for read permissions', 'if', 'not', 'self', '.', 'has_read_permission', '(', ')', ':', 'LOGGER', '.', 'debug', '(', "'Permission denied'", ')', 'self', '.', 'permission_denied', '(', ')', 'return', '# Add the headers and return the content as JSON', 'self', '.', 'add_headers', '(', ')', 'self', '.', 'finish', '(', 'self', '.', 'model_json', '(', ')', ')'] | Handle reading of the model
:param args:
:param kwargs: | ['Handle', 'reading', 'of', 'the', 'model'] | train | https://github.com/gmr/tinman/blob/98f0acd15a228d752caa1864cdf02aaa3d492a9f/tinman/handlers/mixins.py#L258-L283 |
4,082 | softlayer/softlayer-python | SoftLayer/CLI/file/snapshot/disable.py | cli | def cli(env, volume_id, schedule_type):
"""Disables snapshots on the specified schedule for a given volume"""
if (schedule_type not in ['INTERVAL', 'HOURLY', 'DAILY', 'WEEKLY']):
raise exceptions.CLIAbort(
'--schedule_type must be INTERVAL, HOURLY, DAILY, or WEEKLY')
file_manager = SoftLayer.FileStorageManager(env.client)
disabled = file_manager.disable_snapshots(volume_id, schedule_type)
if disabled:
click.echo('%s snapshots have been disabled for volume %s'
% (schedule_type, volume_id)) | python | def cli(env, volume_id, schedule_type):
"""Disables snapshots on the specified schedule for a given volume"""
if (schedule_type not in ['INTERVAL', 'HOURLY', 'DAILY', 'WEEKLY']):
raise exceptions.CLIAbort(
'--schedule_type must be INTERVAL, HOURLY, DAILY, or WEEKLY')
file_manager = SoftLayer.FileStorageManager(env.client)
disabled = file_manager.disable_snapshots(volume_id, schedule_type)
if disabled:
click.echo('%s snapshots have been disabled for volume %s'
% (schedule_type, volume_id)) | ['def', 'cli', '(', 'env', ',', 'volume_id', ',', 'schedule_type', ')', ':', 'if', '(', 'schedule_type', 'not', 'in', '[', "'INTERVAL'", ',', "'HOURLY'", ',', "'DAILY'", ',', "'WEEKLY'", ']', ')', ':', 'raise', 'exceptions', '.', 'CLIAbort', '(', "'--schedule_type must be INTERVAL, HOURLY, DAILY, or WEEKLY'", ')', 'file_manager', '=', 'SoftLayer', '.', 'FileStorageManager', '(', 'env', '.', 'client', ')', 'disabled', '=', 'file_manager', '.', 'disable_snapshots', '(', 'volume_id', ',', 'schedule_type', ')', 'if', 'disabled', ':', 'click', '.', 'echo', '(', "'%s snapshots have been disabled for volume %s'", '%', '(', 'schedule_type', ',', 'volume_id', ')', ')'] | Disables snapshots on the specified schedule for a given volume | ['Disables', 'snapshots', 'on', 'the', 'specified', 'schedule', 'for', 'a', 'given', 'volume'] | train | https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/file/snapshot/disable.py#L16-L28 |
4,083 | deepmind/pysc2 | pysc2/lib/point.py | Point.floor | def floor(self):
"""Round `x` and `y` down to integers."""
return Point(int(math.floor(self.x)), int(math.floor(self.y))) | python | def floor(self):
"""Round `x` and `y` down to integers."""
return Point(int(math.floor(self.x)), int(math.floor(self.y))) | ['def', 'floor', '(', 'self', ')', ':', 'return', 'Point', '(', 'int', '(', 'math', '.', 'floor', '(', 'self', '.', 'x', ')', ')', ',', 'int', '(', 'math', '.', 'floor', '(', 'self', '.', 'y', ')', ')', ')'] | Round `x` and `y` down to integers. | ['Round', 'x', 'and', 'y', 'down', 'to', 'integers', '.'] | train | https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/lib/point.py#L60-L62 |
4,084 | pesaply/sarafu | pesaply.py | pesaplyMM._parse_transactions | def _parse_transactions(self, response):
"""
This method parses the CSV output in `get_transactions`
to generate a usable list of transactions that use native
python data types
"""
transactions = list()
if response:
f = StringIO(response)
reader = csv.DictReader(f)
for line in reader:
txn = {}
txn['date'] = datetime.strptime(line['Date'], '%d/%m/%Y %H:%M:%S')
txn['description'] = line['Description']
txn['amount'] = float(line['Amount'].replace(',', ''))
txn['reference'] = line['Transaction number']
txn['sender'] = line['???transfer.fromOwner???']
txn['recipient'] = line['???transfer.toOwner???']
txn['currency'] = 'TSH'
txn['comment'] = line['Transaction type']
transactions.append(txn)
return transactions | python | def _parse_transactions(self, response):
"""
This method parses the CSV output in `get_transactions`
to generate a usable list of transactions that use native
python data types
"""
transactions = list()
if response:
f = StringIO(response)
reader = csv.DictReader(f)
for line in reader:
txn = {}
txn['date'] = datetime.strptime(line['Date'], '%d/%m/%Y %H:%M:%S')
txn['description'] = line['Description']
txn['amount'] = float(line['Amount'].replace(',', ''))
txn['reference'] = line['Transaction number']
txn['sender'] = line['???transfer.fromOwner???']
txn['recipient'] = line['???transfer.toOwner???']
txn['currency'] = 'TSH'
txn['comment'] = line['Transaction type']
transactions.append(txn)
return transactions | ['def', '_parse_transactions', '(', 'self', ',', 'response', ')', ':', 'transactions', '=', 'list', '(', ')', 'if', 'response', ':', 'f', '=', 'StringIO', '(', 'response', ')', 'reader', '=', 'csv', '.', 'DictReader', '(', 'f', ')', 'for', 'line', 'in', 'reader', ':', 'txn', '=', '{', '}', 'txn', '[', "'date'", ']', '=', 'datetime', '.', 'strptime', '(', 'line', '[', "'Date'", ']', ',', "'%d/%m/%Y %H:%M:%S'", ')', 'txn', '[', "'description'", ']', '=', 'line', '[', "'Description'", ']', 'txn', '[', "'amount'", ']', '=', 'float', '(', 'line', '[', "'Amount'", ']', '.', 'replace', '(', "','", ',', "''", ')', ')', 'txn', '[', "'reference'", ']', '=', 'line', '[', "'Transaction number'", ']', 'txn', '[', "'sender'", ']', '=', 'line', '[', "'???transfer.fromOwner???'", ']', 'txn', '[', "'recipient'", ']', '=', 'line', '[', "'???transfer.toOwner???'", ']', 'txn', '[', "'currency'", ']', '=', "'TSH'", 'txn', '[', "'comment'", ']', '=', 'line', '[', "'Transaction type'", ']', 'transactions', '.', 'append', '(', 'txn', ')', 'return', 'transactions'] | This method parses the CSV output in `get_transactions`
to generate a usable list of transactions that use native
python data types | ['This', 'method', 'parses', 'the', 'CSV', 'output', 'in', 'get_transactions', 'to', 'generate', 'a', 'usable', 'list', 'of', 'transactions', 'that', 'use', 'native', 'python', 'data', 'types'] | train | https://github.com/pesaply/sarafu/blob/8c1296d48427a6cf17ffc5600d100b49acc9c5b7/pesaply.py#L253-L278 |
4,085 | WojciechMula/pyahocorasick | py/pyahocorasick.py | Trie.add_word | def add_word(self, word, value):
"""
Adds word and associated value.
If word already exists, its value is replaced.
"""
if not word:
return
node = self.root
for c in word:
try:
node = node.children[c]
except KeyError:
n = TrieNode(c)
node.children[c] = n
node = n
node.output = value | python | def add_word(self, word, value):
"""
Adds word and associated value.
If word already exists, its value is replaced.
"""
if not word:
return
node = self.root
for c in word:
try:
node = node.children[c]
except KeyError:
n = TrieNode(c)
node.children[c] = n
node = n
node.output = value | ['def', 'add_word', '(', 'self', ',', 'word', ',', 'value', ')', ':', 'if', 'not', 'word', ':', 'return', 'node', '=', 'self', '.', 'root', 'for', 'c', 'in', 'word', ':', 'try', ':', 'node', '=', 'node', '.', 'children', '[', 'c', ']', 'except', 'KeyError', ':', 'n', '=', 'TrieNode', '(', 'c', ')', 'node', '.', 'children', '[', 'c', ']', '=', 'n', 'node', '=', 'n', 'node', '.', 'output', '=', 'value'] | Adds word and associated value.
If word already exists, its value is replaced. | ['Adds', 'word', 'and', 'associated', 'value', '.'] | train | https://github.com/WojciechMula/pyahocorasick/blob/53842f783fbe3fa77d53cde1ac251b23c3cbed02/py/pyahocorasick.py#L151-L169 |
4,086 | reingart/pyafipws | wsremcarne.py | WSRemCarne.ConsultarTiposContingencia | def ConsultarTiposContingencia(self, sep="||"):
"Obtener el código y descripción para cada tipo de contingencia que puede reportar"
ret = self.client.consultarTiposContingencia(
authRequest={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentada': self.Cuit, },
)['consultarTiposContingenciaReturn']
self.__analizar_errores(ret)
array = ret.get('arrayTiposContingencia', [])
lista = [it['codigoDescripcion'] for it in array]
return [(u"%s {codigo} %s {descripcion} %s" % (sep, sep, sep)).format(**it) if sep else it for it in lista] | python | def ConsultarTiposContingencia(self, sep="||"):
"Obtener el código y descripción para cada tipo de contingencia que puede reportar"
ret = self.client.consultarTiposContingencia(
authRequest={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentada': self.Cuit, },
)['consultarTiposContingenciaReturn']
self.__analizar_errores(ret)
array = ret.get('arrayTiposContingencia', [])
lista = [it['codigoDescripcion'] for it in array]
return [(u"%s {codigo} %s {descripcion} %s" % (sep, sep, sep)).format(**it) if sep else it for it in lista] | ['def', 'ConsultarTiposContingencia', '(', 'self', ',', 'sep', '=', '"||"', ')', ':', 'ret', '=', 'self', '.', 'client', '.', 'consultarTiposContingencia', '(', 'authRequest', '=', '{', "'token'", ':', 'self', '.', 'Token', ',', "'sign'", ':', 'self', '.', 'Sign', ',', "'cuitRepresentada'", ':', 'self', '.', 'Cuit', ',', '}', ',', ')', '[', "'consultarTiposContingenciaReturn'", ']', 'self', '.', '__analizar_errores', '(', 'ret', ')', 'array', '=', 'ret', '.', 'get', '(', "'arrayTiposContingencia'", ',', '[', ']', ')', 'lista', '=', '[', 'it', '[', "'codigoDescripcion'", ']', 'for', 'it', 'in', 'array', ']', 'return', '[', '(', 'u"%s {codigo} %s {descripcion} %s"', '%', '(', 'sep', ',', 'sep', ',', 'sep', ')', ')', '.', 'format', '(', '*', '*', 'it', ')', 'if', 'sep', 'else', 'it', 'for', 'it', 'in', 'lista', ']'] | Obtener el código y descripción para cada tipo de contingencia que puede reportar | ['Obtener', 'el', 'código', 'y', 'descripción', 'para', 'cada', 'tipo', 'de', 'contingencia', 'que', 'puede', 'reportar'] | train | https://github.com/reingart/pyafipws/blob/ee87cfe4ac12285ab431df5fec257f103042d1ab/wsremcarne.py#L346-L356 |
4,087 | QUANTAXIS/QUANTAXIS | QUANTAXIS/QAFetch/QATdx.py | QA_fetch_get_option_50etf_contract_time_to_market | def QA_fetch_get_option_50etf_contract_time_to_market():
'''
#🛠todo 获取期权合约的上市日期 ? 暂时没有。
:return: list Series
'''
result = QA_fetch_get_option_list('tdx')
# pprint.pprint(result)
# category market code name desc code
'''
fix here :
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
result['meaningful_name'] = None
C:\work_new\QUANTAXIS\QUANTAXIS\QAFetch\QATdx.py:1468: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
'''
# df = pd.DataFrame()
rows = []
result['meaningful_name'] = None
for idx in result.index:
# pprint.pprint((idx))
strCategory = result.loc[idx, "category"]
strMarket = result.loc[idx, "market"]
strCode = result.loc[idx, "code"] # 10001215
strName = result.loc[idx, 'name'] # 510050C9M03200
strDesc = result.loc[idx, 'desc'] # 10001215
if strName.startswith("510050"):
# print(strCategory,' ', strMarket, ' ', strCode, ' ', strName, ' ', strDesc, )
if strName.startswith("510050C"):
putcall = '50ETF,认购期权'
elif strName.startswith("510050P"):
putcall = '50ETF,认沽期权'
else:
putcall = "Unkown code name : " + strName
expireMonth = strName[7:8]
if expireMonth == 'A':
expireMonth = "10月"
elif expireMonth == 'B':
expireMonth = "11月"
elif expireMonth == 'C':
expireMonth = "12月"
else:
expireMonth = expireMonth + '月'
# 第12位期初设为“M”,并根据合约调整次数按照“A”至“Z”依序变更,如变更为“A”表示期权合约发生首次调整,变更为“B”表示期权合约发生第二次调整,依此类推;
# fix here : M ??
if strName[8:9] == "M":
adjust = "未调整"
elif strName[8:9] == 'A':
adjust = " 第1次调整"
elif strName[8:9] == 'B':
adjust = " 第2调整"
elif strName[8:9] == 'C':
adjust = " 第3次调整"
elif strName[8:9] == 'D':
adjust = " 第4次调整"
elif strName[8:9] == 'E':
adjust = " 第5次调整"
elif strName[8:9] == 'F':
adjust = " 第6次调整"
elif strName[8:9] == 'G':
adjust = " 第7次调整"
elif strName[8:9] == 'H':
adjust = " 第8次调整"
elif strName[8:9] == 'I':
adjust = " 第9次调整"
elif strName[8:9] == 'J':
adjust = " 第10次调整"
else:
adjust = " 第10次以上的调整,调整代码 %s" + strName[8:9]
executePrice = strName[9:]
result.loc[idx, 'meaningful_name'] = '%s,到期月份:%s,%s,行权价:%s' % (
putcall, expireMonth, adjust, executePrice)
row = result.loc[idx]
rows.append(row)
return rows | python | def QA_fetch_get_option_50etf_contract_time_to_market():
'''
#🛠todo 获取期权合约的上市日期 ? 暂时没有。
:return: list Series
'''
result = QA_fetch_get_option_list('tdx')
# pprint.pprint(result)
# category market code name desc code
'''
fix here :
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
result['meaningful_name'] = None
C:\work_new\QUANTAXIS\QUANTAXIS\QAFetch\QATdx.py:1468: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
'''
# df = pd.DataFrame()
rows = []
result['meaningful_name'] = None
for idx in result.index:
# pprint.pprint((idx))
strCategory = result.loc[idx, "category"]
strMarket = result.loc[idx, "market"]
strCode = result.loc[idx, "code"] # 10001215
strName = result.loc[idx, 'name'] # 510050C9M03200
strDesc = result.loc[idx, 'desc'] # 10001215
if strName.startswith("510050"):
# print(strCategory,' ', strMarket, ' ', strCode, ' ', strName, ' ', strDesc, )
if strName.startswith("510050C"):
putcall = '50ETF,认购期权'
elif strName.startswith("510050P"):
putcall = '50ETF,认沽期权'
else:
putcall = "Unkown code name : " + strName
expireMonth = strName[7:8]
if expireMonth == 'A':
expireMonth = "10月"
elif expireMonth == 'B':
expireMonth = "11月"
elif expireMonth == 'C':
expireMonth = "12月"
else:
expireMonth = expireMonth + '月'
# 第12位期初设为“M”,并根据合约调整次数按照“A”至“Z”依序变更,如变更为“A”表示期权合约发生首次调整,变更为“B”表示期权合约发生第二次调整,依此类推;
# fix here : M ??
if strName[8:9] == "M":
adjust = "未调整"
elif strName[8:9] == 'A':
adjust = " 第1次调整"
elif strName[8:9] == 'B':
adjust = " 第2调整"
elif strName[8:9] == 'C':
adjust = " 第3次调整"
elif strName[8:9] == 'D':
adjust = " 第4次调整"
elif strName[8:9] == 'E':
adjust = " 第5次调整"
elif strName[8:9] == 'F':
adjust = " 第6次调整"
elif strName[8:9] == 'G':
adjust = " 第7次调整"
elif strName[8:9] == 'H':
adjust = " 第8次调整"
elif strName[8:9] == 'I':
adjust = " 第9次调整"
elif strName[8:9] == 'J':
adjust = " 第10次调整"
else:
adjust = " 第10次以上的调整,调整代码 %s" + strName[8:9]
executePrice = strName[9:]
result.loc[idx, 'meaningful_name'] = '%s,到期月份:%s,%s,行权价:%s' % (
putcall, expireMonth, adjust, executePrice)
row = result.loc[idx]
rows.append(row)
return rows | ['def', 'QA_fetch_get_option_50etf_contract_time_to_market', '(', ')', ':', 'result', '=', 'QA_fetch_get_option_list', '(', "'tdx'", ')', '# pprint.pprint(result)', '# category market code name desc code', "'''\n fix here : \n See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n result['meaningful_name'] = None\n C:\\work_new\\QUANTAXIS\\QUANTAXIS\\QAFetch\\QATdx.py:1468: SettingWithCopyWarning: \n A value is trying to be set on a copy of a slice from a DataFrame.\n Try using .loc[row_indexer,col_indexer] = value instead\n '''", '# df = pd.DataFrame()', 'rows', '=', '[', ']', 'result', '[', "'meaningful_name'", ']', '=', 'None', 'for', 'idx', 'in', 'result', '.', 'index', ':', '# pprint.pprint((idx))', 'strCategory', '=', 'result', '.', 'loc', '[', 'idx', ',', '"category"', ']', 'strMarket', '=', 'result', '.', 'loc', '[', 'idx', ',', '"market"', ']', 'strCode', '=', 'result', '.', 'loc', '[', 'idx', ',', '"code"', ']', '# 10001215', 'strName', '=', 'result', '.', 'loc', '[', 'idx', ',', "'name'", ']', '# 510050C9M03200', 'strDesc', '=', 'result', '.', 'loc', '[', 'idx', ',', "'desc'", ']', '# 10001215', 'if', 'strName', '.', 'startswith', '(', '"510050"', ')', ':', "# print(strCategory,' ', strMarket, ' ', strCode, ' ', strName, ' ', strDesc, )", 'if', 'strName', '.', 'startswith', '(', '"510050C"', ')', ':', 'putcall', '=', "'50ETF,认购期权'", 'elif', 'strName', '.', 'startswith', '(', '"510050P"', ')', ':', 'putcall', '=', "'50ETF,认沽期权'", 'else', ':', 'putcall', '=', '"Unkown code name : " +', 's', 'rName', 'expireMonth', '=', 'strName', '[', '7', ':', '8', ']', 'if', 'expireMonth', '==', "'A'", ':', 'expireMonth', '=', '"10月"', 'elif', 'expireMonth', '==', "'B'", ':', 'expireMonth', '=', '"11月"', 'elif', 'expireMonth', '==', "'C'", ':', 'expireMonth', '=', '"12月"', 'else', ':', 'expireMonth', '=', 'expireMonth', '+', "'月'", '# 第12位期初设为“M”,并根据合约调整次数按照“A”至“Z”依序变更,如变更为“A”表示期权合约发生首次调整,变更为“B”表示期权合约发生第二次调整,依此类推;', '# fix here : M ??', 'if', 'strName', '[', '8', ':', '9', ']', '==', '"M"', ':', 'adjust', '=', '"未调整"', 'elif', 'strName', '[', '8', ':', '9', ']', '==', "'A'", ':', 'adjust', '=', '" 第1次调整"', 'elif', 'strName', '[', '8', ':', '9', ']', '==', "'B'", ':', 'adjust', '=', '" 第2调整"', 'elif', 'strName', '[', '8', ':', '9', ']', '==', "'C'", ':', 'adjust', '=', '" 第3次调整"', 'elif', 'strName', '[', '8', ':', '9', ']', '==', "'D'", ':', 'adjust', '=', '" 第4次调整"', 'elif', 'strName', '[', '8', ':', '9', ']', '==', "'E'", ':', 'adjust', '=', '" 第5次调整"', 'elif', 'strName', '[', '8', ':', '9', ']', '==', "'F'", ':', 'adjust', '=', '" 第6次调整"', 'elif', 'strName', '[', '8', ':', '9', ']', '==', "'G'", ':', 'adjust', '=', '" 第7次调整"', 'elif', 'strName', '[', '8', ':', '9', ']', '==', "'H'", ':', 'adjust', '=', '" 第8次调整"', 'elif', 'strName', '[', '8', ':', '9', ']', '==', "'I'", ':', 'adjust', '=', '" 第9次调整"', 'elif', 'strName', '[', '8', ':', '9', ']', '==', "'J'", ':', 'adjust', '=', '" 第10次调整"', 'else', ':', 'adjust', '=', '" 第10次以上的调整,调整代码 %s" + strName[8:9]', '', '', '', '', '', '', '', 'executePrice', '=', 'strName', '[', '9', ':', ']', 'result', '.', 'loc', '[', 'idx', ',', "'meaningful_name'", ']', '=', "'%s,到期月份:%s,%s,行权价:%s' % (", '', '', 'putcall', ',', 'expireMonth', ',', 'adjust', ',', 'executePrice', ')', 'row', '=', 'result', '.', 'loc', '[', 'idx', ']', 'rows', '.', 'append', '(', 'row', ')', 'return', 'rows'] | #🛠todo 获取期权合约的上市日期 ? 暂时没有。
:return: list Series | ['#🛠todo', '获取期权合约的上市日期', '?', '暂时没有。', ':', 'return', ':', 'list', 'Series'] | train | https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAFetch/QATdx.py#L1649-L1730 |
4,088 | eaton-lab/toytree | toytree/TreeParser.py | Newick2TreeNode.newick_from_string | def newick_from_string(self):
"Reads a newick string in the New Hampshire format."
# split on parentheses to traverse hierarchical tree structure
for chunk in self.data.split("(")[1:]:
# add child to make this node a parent.
self.current_parent = (
self.root if self.current_parent is None else
self.current_parent.add_child()
)
# get all parenth endings from this parenth start
subchunks = [ch.strip() for ch in chunk.split(",")]
if subchunks[-1] != '' and not subchunks[-1].endswith(';'):
raise NewickError(
'Broken newick structure at: {}'.format(chunk))
# Every closing parenthesis will close a node and go up one level.
for idx, leaf in enumerate(subchunks):
if leaf.strip() == '' and idx == len(subchunks) - 1:
continue
closing_nodes = leaf.split(")")
# parse features and apply to the node object
self.apply_node_data(closing_nodes[0], "leaf")
# next contain closing nodes and data about the internal nodes.
if len(closing_nodes) > 1:
for closing_internal in closing_nodes[1:]:
closing_internal = closing_internal.rstrip(";")
# read internal node data and go up one level
self.apply_node_data(closing_internal, "internal")
self.current_parent = self.current_parent.up
return self.root | python | def newick_from_string(self):
"Reads a newick string in the New Hampshire format."
# split on parentheses to traverse hierarchical tree structure
for chunk in self.data.split("(")[1:]:
# add child to make this node a parent.
self.current_parent = (
self.root if self.current_parent is None else
self.current_parent.add_child()
)
# get all parenth endings from this parenth start
subchunks = [ch.strip() for ch in chunk.split(",")]
if subchunks[-1] != '' and not subchunks[-1].endswith(';'):
raise NewickError(
'Broken newick structure at: {}'.format(chunk))
# Every closing parenthesis will close a node and go up one level.
for idx, leaf in enumerate(subchunks):
if leaf.strip() == '' and idx == len(subchunks) - 1:
continue
closing_nodes = leaf.split(")")
# parse features and apply to the node object
self.apply_node_data(closing_nodes[0], "leaf")
# next contain closing nodes and data about the internal nodes.
if len(closing_nodes) > 1:
for closing_internal in closing_nodes[1:]:
closing_internal = closing_internal.rstrip(";")
# read internal node data and go up one level
self.apply_node_data(closing_internal, "internal")
self.current_parent = self.current_parent.up
return self.root | ['def', 'newick_from_string', '(', 'self', ')', ':', '# split on parentheses to traverse hierarchical tree structure', 'for', 'chunk', 'in', 'self', '.', 'data', '.', 'split', '(', '"("', ')', '[', '1', ':', ']', ':', '# add child to make this node a parent.', 'self', '.', 'current_parent', '=', '(', 'self', '.', 'root', 'if', 'self', '.', 'current_parent', 'is', 'None', 'else', 'self', '.', 'current_parent', '.', 'add_child', '(', ')', ')', '# get all parenth endings from this parenth start', 'subchunks', '=', '[', 'ch', '.', 'strip', '(', ')', 'for', 'ch', 'in', 'chunk', '.', 'split', '(', '","', ')', ']', 'if', 'subchunks', '[', '-', '1', ']', '!=', "''", 'and', 'not', 'subchunks', '[', '-', '1', ']', '.', 'endswith', '(', "';'", ')', ':', 'raise', 'NewickError', '(', "'Broken newick structure at: {}'", '.', 'format', '(', 'chunk', ')', ')', '# Every closing parenthesis will close a node and go up one level.', 'for', 'idx', ',', 'leaf', 'in', 'enumerate', '(', 'subchunks', ')', ':', 'if', 'leaf', '.', 'strip', '(', ')', '==', "''", 'and', 'idx', '==', 'len', '(', 'subchunks', ')', '-', '1', ':', 'continue', 'closing_nodes', '=', 'leaf', '.', 'split', '(', '")"', ')', '# parse features and apply to the node object', 'self', '.', 'apply_node_data', '(', 'closing_nodes', '[', '0', ']', ',', '"leaf"', ')', '# next contain closing nodes and data about the internal nodes.', 'if', 'len', '(', 'closing_nodes', ')', '>', '1', ':', 'for', 'closing_internal', 'in', 'closing_nodes', '[', '1', ':', ']', ':', 'closing_internal', '=', 'closing_internal', '.', 'rstrip', '(', '";"', ')', '# read internal node data and go up one level', 'self', '.', 'apply_node_data', '(', 'closing_internal', ',', '"internal"', ')', 'self', '.', 'current_parent', '=', 'self', '.', 'current_parent', '.', 'up', 'return', 'self', '.', 'root'] | Reads a newick string in the New Hampshire format. | ['Reads', 'a', 'newick', 'string', 'in', 'the', 'New', 'Hampshire', 'format', '.'] | train | https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/toytree/TreeParser.py#L319-L352 |
4,089 | yamcs/yamcs-python | yamcs-client/yamcs/client.py | YamcsClient.get_user_info | def get_user_info(self):
"""
Get information on the authenticated user.
:rtype: .UserInfo
"""
response = self.get_proto(path='/user')
message = yamcsManagement_pb2.UserInfo()
message.ParseFromString(response.content)
return UserInfo(message) | python | def get_user_info(self):
"""
Get information on the authenticated user.
:rtype: .UserInfo
"""
response = self.get_proto(path='/user')
message = yamcsManagement_pb2.UserInfo()
message.ParseFromString(response.content)
return UserInfo(message) | ['def', 'get_user_info', '(', 'self', ')', ':', 'response', '=', 'self', '.', 'get_proto', '(', 'path', '=', "'/user'", ')', 'message', '=', 'yamcsManagement_pb2', '.', 'UserInfo', '(', ')', 'message', '.', 'ParseFromString', '(', 'response', '.', 'content', ')', 'return', 'UserInfo', '(', 'message', ')'] | Get information on the authenticated user.
:rtype: .UserInfo | ['Get', 'information', 'on', 'the', 'authenticated', 'user', '.'] | train | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/client.py#L193-L202 |
4,090 | nicolargo/glances | glances/plugins/glances_cpu.py | Plugin.msg_curse | def msg_curse(self, args=None, max_width=None):
"""Return the list to display in the UI."""
# Init the return message
ret = []
# Only process if stats exist and plugin not disable
if not self.stats or self.args.percpu or self.is_disable():
return ret
# Build the string message
# If user stat is not here, display only idle / total CPU usage (for
# exemple on Windows OS)
idle_tag = 'user' not in self.stats
# Header
msg = '{}'.format('CPU')
ret.append(self.curse_add_line(msg, "TITLE"))
trend_user = self.get_trend('user')
trend_system = self.get_trend('system')
if trend_user is None or trend_user is None:
trend_cpu = None
else:
trend_cpu = trend_user + trend_system
msg = ' {:4}'.format(self.trend_msg(trend_cpu))
ret.append(self.curse_add_line(msg))
# Total CPU usage
msg = '{:5.1f}%'.format(self.stats['total'])
if idle_tag:
ret.append(self.curse_add_line(
msg, self.get_views(key='total', option='decoration')))
else:
ret.append(self.curse_add_line(msg))
# Nice CPU
if 'nice' in self.stats:
msg = ' {:8}'.format('nice:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='nice', option='optional')))
msg = '{:5.1f}%'.format(self.stats['nice'])
ret.append(self.curse_add_line(msg, optional=self.get_views(key='nice', option='optional')))
# ctx_switches
if 'ctx_switches' in self.stats:
msg = ' {:8}'.format('ctx_sw:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='ctx_switches', option='optional')))
msg = '{:>5}'.format(self.auto_unit(int(self.stats['ctx_switches'] // self.stats['time_since_update']),
min_symbol='K'))
ret.append(self.curse_add_line(
msg, self.get_views(key='ctx_switches', option='decoration'),
optional=self.get_views(key='ctx_switches', option='optional')))
# New line
ret.append(self.curse_new_line())
# User CPU
if 'user' in self.stats:
msg = '{:8}'.format('user:')
ret.append(self.curse_add_line(msg))
msg = '{:5.1f}%'.format(self.stats['user'])
ret.append(self.curse_add_line(
msg, self.get_views(key='user', option='decoration')))
elif 'idle' in self.stats:
msg = '{:8}'.format('idle:')
ret.append(self.curse_add_line(msg))
msg = '{:5.1f}%'.format(self.stats['idle'])
ret.append(self.curse_add_line(msg))
# IRQ CPU
if 'irq' in self.stats:
msg = ' {:8}'.format('irq:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='irq', option='optional')))
msg = '{:5.1f}%'.format(self.stats['irq'])
ret.append(self.curse_add_line(msg, optional=self.get_views(key='irq', option='optional')))
# interrupts
if 'interrupts' in self.stats:
msg = ' {:8}'.format('inter:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='interrupts', option='optional')))
msg = '{:>5}'.format(int(self.stats['interrupts'] // self.stats['time_since_update']))
ret.append(self.curse_add_line(msg, optional=self.get_views(key='interrupts', option='optional')))
# New line
ret.append(self.curse_new_line())
# System CPU
if 'system' in self.stats and not idle_tag:
msg = '{:8}'.format('system:')
ret.append(self.curse_add_line(msg))
msg = '{:5.1f}%'.format(self.stats['system'])
ret.append(self.curse_add_line(
msg, self.get_views(key='system', option='decoration')))
else:
msg = '{:8}'.format('core:')
ret.append(self.curse_add_line(msg))
msg = '{:>6}'.format(self.stats['nb_log_core'])
ret.append(self.curse_add_line(msg))
# IOWait CPU
if 'iowait' in self.stats:
msg = ' {:8}'.format('iowait:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='iowait', option='optional')))
msg = '{:5.1f}%'.format(self.stats['iowait'])
ret.append(self.curse_add_line(
msg, self.get_views(key='iowait', option='decoration'),
optional=self.get_views(key='iowait', option='optional')))
# soft_interrupts
if 'soft_interrupts' in self.stats:
msg = ' {:8}'.format('sw_int:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='soft_interrupts', option='optional')))
msg = '{:>5}'.format(int(self.stats['soft_interrupts'] // self.stats['time_since_update']))
ret.append(self.curse_add_line(msg, optional=self.get_views(key='soft_interrupts', option='optional')))
# New line
ret.append(self.curse_new_line())
# Idle CPU
if 'idle' in self.stats and not idle_tag:
msg = '{:8}'.format('idle:')
ret.append(self.curse_add_line(msg))
msg = '{:5.1f}%'.format(self.stats['idle'])
ret.append(self.curse_add_line(msg))
# Steal CPU usage
if 'steal' in self.stats:
msg = ' {:8}'.format('steal:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='steal', option='optional')))
msg = '{:5.1f}%'.format(self.stats['steal'])
ret.append(self.curse_add_line(
msg, self.get_views(key='steal', option='decoration'),
optional=self.get_views(key='steal', option='optional')))
# syscalls
# syscalls: number of system calls since boot. Always set to 0 on Linux. (do not display)
if 'syscalls' in self.stats and not LINUX:
msg = ' {:8}'.format('syscal:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='syscalls', option='optional')))
msg = '{:>5}'.format(int(self.stats['syscalls'] // self.stats['time_since_update']))
ret.append(self.curse_add_line(msg, optional=self.get_views(key='syscalls', option='optional')))
# Return the message with decoration
return ret | python | def msg_curse(self, args=None, max_width=None):
"""Return the list to display in the UI."""
# Init the return message
ret = []
# Only process if stats exist and plugin not disable
if not self.stats or self.args.percpu or self.is_disable():
return ret
# Build the string message
# If user stat is not here, display only idle / total CPU usage (for
# exemple on Windows OS)
idle_tag = 'user' not in self.stats
# Header
msg = '{}'.format('CPU')
ret.append(self.curse_add_line(msg, "TITLE"))
trend_user = self.get_trend('user')
trend_system = self.get_trend('system')
if trend_user is None or trend_user is None:
trend_cpu = None
else:
trend_cpu = trend_user + trend_system
msg = ' {:4}'.format(self.trend_msg(trend_cpu))
ret.append(self.curse_add_line(msg))
# Total CPU usage
msg = '{:5.1f}%'.format(self.stats['total'])
if idle_tag:
ret.append(self.curse_add_line(
msg, self.get_views(key='total', option='decoration')))
else:
ret.append(self.curse_add_line(msg))
# Nice CPU
if 'nice' in self.stats:
msg = ' {:8}'.format('nice:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='nice', option='optional')))
msg = '{:5.1f}%'.format(self.stats['nice'])
ret.append(self.curse_add_line(msg, optional=self.get_views(key='nice', option='optional')))
# ctx_switches
if 'ctx_switches' in self.stats:
msg = ' {:8}'.format('ctx_sw:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='ctx_switches', option='optional')))
msg = '{:>5}'.format(self.auto_unit(int(self.stats['ctx_switches'] // self.stats['time_since_update']),
min_symbol='K'))
ret.append(self.curse_add_line(
msg, self.get_views(key='ctx_switches', option='decoration'),
optional=self.get_views(key='ctx_switches', option='optional')))
# New line
ret.append(self.curse_new_line())
# User CPU
if 'user' in self.stats:
msg = '{:8}'.format('user:')
ret.append(self.curse_add_line(msg))
msg = '{:5.1f}%'.format(self.stats['user'])
ret.append(self.curse_add_line(
msg, self.get_views(key='user', option='decoration')))
elif 'idle' in self.stats:
msg = '{:8}'.format('idle:')
ret.append(self.curse_add_line(msg))
msg = '{:5.1f}%'.format(self.stats['idle'])
ret.append(self.curse_add_line(msg))
# IRQ CPU
if 'irq' in self.stats:
msg = ' {:8}'.format('irq:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='irq', option='optional')))
msg = '{:5.1f}%'.format(self.stats['irq'])
ret.append(self.curse_add_line(msg, optional=self.get_views(key='irq', option='optional')))
# interrupts
if 'interrupts' in self.stats:
msg = ' {:8}'.format('inter:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='interrupts', option='optional')))
msg = '{:>5}'.format(int(self.stats['interrupts'] // self.stats['time_since_update']))
ret.append(self.curse_add_line(msg, optional=self.get_views(key='interrupts', option='optional')))
# New line
ret.append(self.curse_new_line())
# System CPU
if 'system' in self.stats and not idle_tag:
msg = '{:8}'.format('system:')
ret.append(self.curse_add_line(msg))
msg = '{:5.1f}%'.format(self.stats['system'])
ret.append(self.curse_add_line(
msg, self.get_views(key='system', option='decoration')))
else:
msg = '{:8}'.format('core:')
ret.append(self.curse_add_line(msg))
msg = '{:>6}'.format(self.stats['nb_log_core'])
ret.append(self.curse_add_line(msg))
# IOWait CPU
if 'iowait' in self.stats:
msg = ' {:8}'.format('iowait:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='iowait', option='optional')))
msg = '{:5.1f}%'.format(self.stats['iowait'])
ret.append(self.curse_add_line(
msg, self.get_views(key='iowait', option='decoration'),
optional=self.get_views(key='iowait', option='optional')))
# soft_interrupts
if 'soft_interrupts' in self.stats:
msg = ' {:8}'.format('sw_int:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='soft_interrupts', option='optional')))
msg = '{:>5}'.format(int(self.stats['soft_interrupts'] // self.stats['time_since_update']))
ret.append(self.curse_add_line(msg, optional=self.get_views(key='soft_interrupts', option='optional')))
# New line
ret.append(self.curse_new_line())
# Idle CPU
if 'idle' in self.stats and not idle_tag:
msg = '{:8}'.format('idle:')
ret.append(self.curse_add_line(msg))
msg = '{:5.1f}%'.format(self.stats['idle'])
ret.append(self.curse_add_line(msg))
# Steal CPU usage
if 'steal' in self.stats:
msg = ' {:8}'.format('steal:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='steal', option='optional')))
msg = '{:5.1f}%'.format(self.stats['steal'])
ret.append(self.curse_add_line(
msg, self.get_views(key='steal', option='decoration'),
optional=self.get_views(key='steal', option='optional')))
# syscalls
# syscalls: number of system calls since boot. Always set to 0 on Linux. (do not display)
if 'syscalls' in self.stats and not LINUX:
msg = ' {:8}'.format('syscal:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='syscalls', option='optional')))
msg = '{:>5}'.format(int(self.stats['syscalls'] // self.stats['time_since_update']))
ret.append(self.curse_add_line(msg, optional=self.get_views(key='syscalls', option='optional')))
# Return the message with decoration
return ret | ['def', 'msg_curse', '(', 'self', ',', 'args', '=', 'None', ',', 'max_width', '=', 'None', ')', ':', '# Init the return message', 'ret', '=', '[', ']', '# Only process if stats exist and plugin not disable', 'if', 'not', 'self', '.', 'stats', 'or', 'self', '.', 'args', '.', 'percpu', 'or', 'self', '.', 'is_disable', '(', ')', ':', 'return', 'ret', '# Build the string message', '# If user stat is not here, display only idle / total CPU usage (for', '# exemple on Windows OS)', 'idle_tag', '=', "'user'", 'not', 'in', 'self', '.', 'stats', '# Header', 'msg', '=', "'{}'", '.', 'format', '(', "'CPU'", ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ',', '"TITLE"', ')', ')', 'trend_user', '=', 'self', '.', 'get_trend', '(', "'user'", ')', 'trend_system', '=', 'self', '.', 'get_trend', '(', "'system'", ')', 'if', 'trend_user', 'is', 'None', 'or', 'trend_user', 'is', 'None', ':', 'trend_cpu', '=', 'None', 'else', ':', 'trend_cpu', '=', 'trend_user', '+', 'trend_system', 'msg', '=', "' {:4}'", '.', 'format', '(', 'self', '.', 'trend_msg', '(', 'trend_cpu', ')', ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ')', ')', '# Total CPU usage', 'msg', '=', "'{:5.1f}%'", '.', 'format', '(', 'self', '.', 'stats', '[', "'total'", ']', ')', 'if', 'idle_tag', ':', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ',', 'self', '.', 'get_views', '(', 'key', '=', "'total'", ',', 'option', '=', "'decoration'", ')', ')', ')', 'else', ':', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ')', ')', '# Nice CPU', 'if', "'nice'", 'in', 'self', '.', 'stats', ':', 'msg', '=', "' {:8}'", '.', 'format', '(', "'nice:'", ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ',', 'optional', '=', 'self', '.', 'get_views', '(', 'key', '=', "'nice'", ',', 'option', '=', "'optional'", ')', ')', ')', 'msg', '=', "'{:5.1f}%'", '.', 'format', '(', 'self', '.', 'stats', '[', "'nice'", ']', ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ',', 'optional', '=', 'self', '.', 'get_views', '(', 'key', '=', "'nice'", ',', 'option', '=', "'optional'", ')', ')', ')', '# ctx_switches', 'if', "'ctx_switches'", 'in', 'self', '.', 'stats', ':', 'msg', '=', "' {:8}'", '.', 'format', '(', "'ctx_sw:'", ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ',', 'optional', '=', 'self', '.', 'get_views', '(', 'key', '=', "'ctx_switches'", ',', 'option', '=', "'optional'", ')', ')', ')', 'msg', '=', "'{:>5}'", '.', 'format', '(', 'self', '.', 'auto_unit', '(', 'int', '(', 'self', '.', 'stats', '[', "'ctx_switches'", ']', '//', 'self', '.', 'stats', '[', "'time_since_update'", ']', ')', ',', 'min_symbol', '=', "'K'", ')', ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ',', 'self', '.', 'get_views', '(', 'key', '=', "'ctx_switches'", ',', 'option', '=', "'decoration'", ')', ',', 'optional', '=', 'self', '.', 'get_views', '(', 'key', '=', "'ctx_switches'", ',', 'option', '=', "'optional'", ')', ')', ')', '# New line', 'ret', '.', 'append', '(', 'self', '.', 'curse_new_line', '(', ')', ')', '# User CPU', 'if', "'user'", 'in', 'self', '.', 'stats', ':', 'msg', '=', "'{:8}'", '.', 'format', '(', "'user:'", ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ')', ')', 'msg', '=', "'{:5.1f}%'", '.', 'format', '(', 'self', '.', 'stats', '[', "'user'", ']', ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ',', 'self', '.', 'get_views', '(', 'key', '=', "'user'", ',', 'option', '=', "'decoration'", ')', ')', ')', 'elif', "'idle'", 'in', 'self', '.', 'stats', ':', 'msg', '=', "'{:8}'", '.', 'format', '(', "'idle:'", ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ')', ')', 'msg', '=', "'{:5.1f}%'", '.', 'format', '(', 'self', '.', 'stats', '[', "'idle'", ']', ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ')', ')', '# IRQ CPU', 'if', "'irq'", 'in', 'self', '.', 'stats', ':', 'msg', '=', "' {:8}'", '.', 'format', '(', "'irq:'", ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ',', 'optional', '=', 'self', '.', 'get_views', '(', 'key', '=', "'irq'", ',', 'option', '=', "'optional'", ')', ')', ')', 'msg', '=', "'{:5.1f}%'", '.', 'format', '(', 'self', '.', 'stats', '[', "'irq'", ']', ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ',', 'optional', '=', 'self', '.', 'get_views', '(', 'key', '=', "'irq'", ',', 'option', '=', "'optional'", ')', ')', ')', '# interrupts', 'if', "'interrupts'", 'in', 'self', '.', 'stats', ':', 'msg', '=', "' {:8}'", '.', 'format', '(', "'inter:'", ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ',', 'optional', '=', 'self', '.', 'get_views', '(', 'key', '=', "'interrupts'", ',', 'option', '=', "'optional'", ')', ')', ')', 'msg', '=', "'{:>5}'", '.', 'format', '(', 'int', '(', 'self', '.', 'stats', '[', "'interrupts'", ']', '//', 'self', '.', 'stats', '[', "'time_since_update'", ']', ')', ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ',', 'optional', '=', 'self', '.', 'get_views', '(', 'key', '=', "'interrupts'", ',', 'option', '=', "'optional'", ')', ')', ')', '# New line', 'ret', '.', 'append', '(', 'self', '.', 'curse_new_line', '(', ')', ')', '# System CPU', 'if', "'system'", 'in', 'self', '.', 'stats', 'and', 'not', 'idle_tag', ':', 'msg', '=', "'{:8}'", '.', 'format', '(', "'system:'", ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ')', ')', 'msg', '=', "'{:5.1f}%'", '.', 'format', '(', 'self', '.', 'stats', '[', "'system'", ']', ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ',', 'self', '.', 'get_views', '(', 'key', '=', "'system'", ',', 'option', '=', "'decoration'", ')', ')', ')', 'else', ':', 'msg', '=', "'{:8}'", '.', 'format', '(', "'core:'", ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ')', ')', 'msg', '=', "'{:>6}'", '.', 'format', '(', 'self', '.', 'stats', '[', "'nb_log_core'", ']', ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ')', ')', '# IOWait CPU', 'if', "'iowait'", 'in', 'self', '.', 'stats', ':', 'msg', '=', "' {:8}'", '.', 'format', '(', "'iowait:'", ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ',', 'optional', '=', 'self', '.', 'get_views', '(', 'key', '=', "'iowait'", ',', 'option', '=', "'optional'", ')', ')', ')', 'msg', '=', "'{:5.1f}%'", '.', 'format', '(', 'self', '.', 'stats', '[', "'iowait'", ']', ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ',', 'self', '.', 'get_views', '(', 'key', '=', "'iowait'", ',', 'option', '=', "'decoration'", ')', ',', 'optional', '=', 'self', '.', 'get_views', '(', 'key', '=', "'iowait'", ',', 'option', '=', "'optional'", ')', ')', ')', '# soft_interrupts', 'if', "'soft_interrupts'", 'in', 'self', '.', 'stats', ':', 'msg', '=', "' {:8}'", '.', 'format', '(', "'sw_int:'", ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ',', 'optional', '=', 'self', '.', 'get_views', '(', 'key', '=', "'soft_interrupts'", ',', 'option', '=', "'optional'", ')', ')', ')', 'msg', '=', "'{:>5}'", '.', 'format', '(', 'int', '(', 'self', '.', 'stats', '[', "'soft_interrupts'", ']', '//', 'self', '.', 'stats', '[', "'time_since_update'", ']', ')', ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ',', 'optional', '=', 'self', '.', 'get_views', '(', 'key', '=', "'soft_interrupts'", ',', 'option', '=', "'optional'", ')', ')', ')', '# New line', 'ret', '.', 'append', '(', 'self', '.', 'curse_new_line', '(', ')', ')', '# Idle CPU', 'if', "'idle'", 'in', 'self', '.', 'stats', 'and', 'not', 'idle_tag', ':', 'msg', '=', "'{:8}'", '.', 'format', '(', "'idle:'", ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ')', ')', 'msg', '=', "'{:5.1f}%'", '.', 'format', '(', 'self', '.', 'stats', '[', "'idle'", ']', ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ')', ')', '# Steal CPU usage', 'if', "'steal'", 'in', 'self', '.', 'stats', ':', 'msg', '=', "' {:8}'", '.', 'format', '(', "'steal:'", ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ',', 'optional', '=', 'self', '.', 'get_views', '(', 'key', '=', "'steal'", ',', 'option', '=', "'optional'", ')', ')', ')', 'msg', '=', "'{:5.1f}%'", '.', 'format', '(', 'self', '.', 'stats', '[', "'steal'", ']', ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ',', 'self', '.', 'get_views', '(', 'key', '=', "'steal'", ',', 'option', '=', "'decoration'", ')', ',', 'optional', '=', 'self', '.', 'get_views', '(', 'key', '=', "'steal'", ',', 'option', '=', "'optional'", ')', ')', ')', '# syscalls', '# syscalls: number of system calls since boot. Always set to 0 on Linux. (do not display)', 'if', "'syscalls'", 'in', 'self', '.', 'stats', 'and', 'not', 'LINUX', ':', 'msg', '=', "' {:8}'", '.', 'format', '(', "'syscal:'", ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ',', 'optional', '=', 'self', '.', 'get_views', '(', 'key', '=', "'syscalls'", ',', 'option', '=', "'optional'", ')', ')', ')', 'msg', '=', "'{:>5}'", '.', 'format', '(', 'int', '(', 'self', '.', 'stats', '[', "'syscalls'", ']', '//', 'self', '.', 'stats', '[', "'time_since_update'", ']', ')', ')', 'ret', '.', 'append', '(', 'self', '.', 'curse_add_line', '(', 'msg', ',', 'optional', '=', 'self', '.', 'get_views', '(', 'key', '=', "'syscalls'", ',', 'option', '=', "'optional'", ')', ')', ')', '# Return the message with decoration', 'return', 'ret'] | Return the list to display in the UI. | ['Return', 'the', 'list', 'to', 'display', 'in', 'the', 'UI', '.'] | train | https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_cpu.py#L213-L342 |
4,091 | sorgerlab/indra | indra/tools/reading/readers.py | get_reader_classes | def get_reader_classes(parent=Reader):
"""Get all childless the descendants of a parent class, recursively."""
children = parent.__subclasses__()
descendants = children[:]
for child in children:
grandchildren = get_reader_classes(child)
if grandchildren:
descendants.remove(child)
descendants.extend(grandchildren)
return descendants | python | def get_reader_classes(parent=Reader):
"""Get all childless the descendants of a parent class, recursively."""
children = parent.__subclasses__()
descendants = children[:]
for child in children:
grandchildren = get_reader_classes(child)
if grandchildren:
descendants.remove(child)
descendants.extend(grandchildren)
return descendants | ['def', 'get_reader_classes', '(', 'parent', '=', 'Reader', ')', ':', 'children', '=', 'parent', '.', '__subclasses__', '(', ')', 'descendants', '=', 'children', '[', ':', ']', 'for', 'child', 'in', 'children', ':', 'grandchildren', '=', 'get_reader_classes', '(', 'child', ')', 'if', 'grandchildren', ':', 'descendants', '.', 'remove', '(', 'child', ')', 'descendants', '.', 'extend', '(', 'grandchildren', ')', 'return', 'descendants'] | Get all childless the descendants of a parent class, recursively. | ['Get', 'all', 'childless', 'the', 'descendants', 'of', 'a', 'parent', 'class', 'recursively', '.'] | train | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/readers.py#L761-L770 |
4,092 | kbr/fritzconnection | fritzconnection/fritzstatus.py | FritzStatus.str_max_bit_rate | def str_max_bit_rate(self):
"""
Returns a human readable maximun upstream- and downstream-rate
of the given connection. The rate is given in bits/sec.
"""
upstream, downstream = self.max_bit_rate
return (
fritztools.format_rate(upstream, unit='bits'),
fritztools.format_rate(downstream, unit ='bits')
) | python | def str_max_bit_rate(self):
"""
Returns a human readable maximun upstream- and downstream-rate
of the given connection. The rate is given in bits/sec.
"""
upstream, downstream = self.max_bit_rate
return (
fritztools.format_rate(upstream, unit='bits'),
fritztools.format_rate(downstream, unit ='bits')
) | ['def', 'str_max_bit_rate', '(', 'self', ')', ':', 'upstream', ',', 'downstream', '=', 'self', '.', 'max_bit_rate', 'return', '(', 'fritztools', '.', 'format_rate', '(', 'upstream', ',', 'unit', '=', "'bits'", ')', ',', 'fritztools', '.', 'format_rate', '(', 'downstream', ',', 'unit', '=', "'bits'", ')', ')'] | Returns a human readable maximun upstream- and downstream-rate
of the given connection. The rate is given in bits/sec. | ['Returns', 'a', 'human', 'readable', 'maximun', 'upstream', '-', 'and', 'downstream', '-', 'rate', 'of', 'the', 'given', 'connection', '.', 'The', 'rate', 'is', 'given', 'in', 'bits', '/', 'sec', '.'] | train | https://github.com/kbr/fritzconnection/blob/b183f759ef19dd1652371e912d36cfe34f6639ac/fritzconnection/fritzstatus.py#L147-L156 |
4,093 | econ-ark/HARK | HARK/ConsumptionSaving/ConsIndShockModel.py | solveConsIndShock | def solveConsIndShock(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,PermGroFac,
BoroCnstArt,aXtraGrid,vFuncBool,CubicBool):
'''
Solves a single period consumption-saving problem with CRRA utility and risky
income (subject to permanent and transitory shocks). Can generate a value
function if requested; consumption function can be linear or cubic splines.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
aXtraGrid: np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
Indicator for whether the solver should use cubic or linear interpolation.
Returns
-------
solution_now : ConsumerSolution
The solution to the single period consumption-saving problem. Includes
a consumption function cFunc (using cubic or linear splines), a marginal
value function vPfunc, a minimum acceptable level of normalized market
resources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin
and MPCmax. It might also have a value function vFunc and marginal mar-
ginal value function vPPfunc.
'''
# Use the basic solver if user doesn't want cubic splines or the value function
if (not CubicBool) and (not vFuncBool):
solver = ConsIndShockSolverBasic(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,
Rfree,PermGroFac,BoroCnstArt,aXtraGrid,vFuncBool,
CubicBool)
else: # Use the "advanced" solver if either is requested
solver = ConsIndShockSolver(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,
PermGroFac,BoroCnstArt,aXtraGrid,vFuncBool,CubicBool)
solver.prepareToSolve() # Do some preparatory work
solution_now = solver.solve() # Solve the period
return solution_now | python | def solveConsIndShock(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,PermGroFac,
BoroCnstArt,aXtraGrid,vFuncBool,CubicBool):
'''
Solves a single period consumption-saving problem with CRRA utility and risky
income (subject to permanent and transitory shocks). Can generate a value
function if requested; consumption function can be linear or cubic splines.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
aXtraGrid: np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
Indicator for whether the solver should use cubic or linear interpolation.
Returns
-------
solution_now : ConsumerSolution
The solution to the single period consumption-saving problem. Includes
a consumption function cFunc (using cubic or linear splines), a marginal
value function vPfunc, a minimum acceptable level of normalized market
resources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin
and MPCmax. It might also have a value function vFunc and marginal mar-
ginal value function vPPfunc.
'''
# Use the basic solver if user doesn't want cubic splines or the value function
if (not CubicBool) and (not vFuncBool):
solver = ConsIndShockSolverBasic(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,
Rfree,PermGroFac,BoroCnstArt,aXtraGrid,vFuncBool,
CubicBool)
else: # Use the "advanced" solver if either is requested
solver = ConsIndShockSolver(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,
PermGroFac,BoroCnstArt,aXtraGrid,vFuncBool,CubicBool)
solver.prepareToSolve() # Do some preparatory work
solution_now = solver.solve() # Solve the period
return solution_now | ['def', 'solveConsIndShock', '(', 'solution_next', ',', 'IncomeDstn', ',', 'LivPrb', ',', 'DiscFac', ',', 'CRRA', ',', 'Rfree', ',', 'PermGroFac', ',', 'BoroCnstArt', ',', 'aXtraGrid', ',', 'vFuncBool', ',', 'CubicBool', ')', ':', "# Use the basic solver if user doesn't want cubic splines or the value function", 'if', '(', 'not', 'CubicBool', ')', 'and', '(', 'not', 'vFuncBool', ')', ':', 'solver', '=', 'ConsIndShockSolverBasic', '(', 'solution_next', ',', 'IncomeDstn', ',', 'LivPrb', ',', 'DiscFac', ',', 'CRRA', ',', 'Rfree', ',', 'PermGroFac', ',', 'BoroCnstArt', ',', 'aXtraGrid', ',', 'vFuncBool', ',', 'CubicBool', ')', 'else', ':', '# Use the "advanced" solver if either is requested', 'solver', '=', 'ConsIndShockSolver', '(', 'solution_next', ',', 'IncomeDstn', ',', 'LivPrb', ',', 'DiscFac', ',', 'CRRA', ',', 'Rfree', ',', 'PermGroFac', ',', 'BoroCnstArt', ',', 'aXtraGrid', ',', 'vFuncBool', ',', 'CubicBool', ')', 'solver', '.', 'prepareToSolve', '(', ')', '# Do some preparatory work', 'solution_now', '=', 'solver', '.', 'solve', '(', ')', '# Solve the period', 'return', 'solution_now'] | Solves a single period consumption-saving problem with CRRA utility and risky
income (subject to permanent and transitory shocks). Can generate a value
function if requested; consumption function can be linear or cubic splines.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
aXtraGrid: np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
Indicator for whether the solver should use cubic or linear interpolation.
Returns
-------
solution_now : ConsumerSolution
The solution to the single period consumption-saving problem. Includes
a consumption function cFunc (using cubic or linear splines), a marginal
value function vPfunc, a minimum acceptable level of normalized market
resources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin
and MPCmax. It might also have a value function vFunc and marginal mar-
ginal value function vPPfunc. | ['Solves', 'a', 'single', 'period', 'consumption', '-', 'saving', 'problem', 'with', 'CRRA', 'utility', 'and', 'risky', 'income', '(', 'subject', 'to', 'permanent', 'and', 'transitory', 'shocks', ')', '.', 'Can', 'generate', 'a', 'value', 'function', 'if', 'requested', ';', 'consumption', 'function', 'can', 'be', 'linear', 'or', 'cubic', 'splines', '.'] | train | https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/ConsumptionSaving/ConsIndShockModel.py#L1183-L1244 |
4,094 | awslabs/sockeye | sockeye/training.py | EarlyStoppingTrainer._save_training_state | def _save_training_state(self, train_iter: data_io.BaseParallelSampleIter):
"""
Saves current training state.
"""
# Create temporary directory for storing the state of the optimization process
training_state_dirname = os.path.join(self.model.output_dir, C.TRAINING_STATE_TEMP_DIRNAME)
if not os.path.exists(training_state_dirname):
os.mkdir(training_state_dirname)
# (1) Parameters: link current file
params_base_fname = C.PARAMS_NAME % self.state.checkpoint
params_file = os.path.join(training_state_dirname, C.TRAINING_STATE_PARAMS_NAME)
if os.path.exists(params_file):
os.unlink(params_file)
os.symlink(os.path.join("..", params_base_fname), params_file)
# (2) Optimizer states
opt_state_fname = os.path.join(training_state_dirname, C.OPT_STATES_LAST)
self.model.save_optimizer_states(opt_state_fname)
# (3) Data iterator
train_iter.save_state(os.path.join(training_state_dirname, C.BUCKET_ITER_STATE_NAME))
# (4) Random generators
# RNG states: python's random and np.random provide functions for
# storing the state, mxnet does not, but inside our code mxnet's RNG is
# not used AFAIK
with open(os.path.join(training_state_dirname, C.RNG_STATE_NAME), "wb") as fp:
pickle.dump(random.getstate(), fp)
pickle.dump(np.random.get_state(), fp)
# (5) Training state
self.state.save(os.path.join(training_state_dirname, C.TRAINING_STATE_NAME))
# (6) Learning rate scheduler
with open(os.path.join(training_state_dirname, C.SCHEDULER_STATE_NAME), "wb") as fp:
pickle.dump(self.optimizer_config.lr_scheduler, fp)
# First we rename the existing directory to minimize the risk of state
# loss if the process is aborted during deletion (which will be slower
# than directory renaming)
delete_training_state_dirname = os.path.join(self.model.output_dir, C.TRAINING_STATE_TEMP_DELETENAME)
if os.path.exists(self.training_state_dirname):
os.rename(self.training_state_dirname, delete_training_state_dirname)
os.rename(training_state_dirname, self.training_state_dirname)
if os.path.exists(delete_training_state_dirname):
shutil.rmtree(delete_training_state_dirname) | python | def _save_training_state(self, train_iter: data_io.BaseParallelSampleIter):
"""
Saves current training state.
"""
# Create temporary directory for storing the state of the optimization process
training_state_dirname = os.path.join(self.model.output_dir, C.TRAINING_STATE_TEMP_DIRNAME)
if not os.path.exists(training_state_dirname):
os.mkdir(training_state_dirname)
# (1) Parameters: link current file
params_base_fname = C.PARAMS_NAME % self.state.checkpoint
params_file = os.path.join(training_state_dirname, C.TRAINING_STATE_PARAMS_NAME)
if os.path.exists(params_file):
os.unlink(params_file)
os.symlink(os.path.join("..", params_base_fname), params_file)
# (2) Optimizer states
opt_state_fname = os.path.join(training_state_dirname, C.OPT_STATES_LAST)
self.model.save_optimizer_states(opt_state_fname)
# (3) Data iterator
train_iter.save_state(os.path.join(training_state_dirname, C.BUCKET_ITER_STATE_NAME))
# (4) Random generators
# RNG states: python's random and np.random provide functions for
# storing the state, mxnet does not, but inside our code mxnet's RNG is
# not used AFAIK
with open(os.path.join(training_state_dirname, C.RNG_STATE_NAME), "wb") as fp:
pickle.dump(random.getstate(), fp)
pickle.dump(np.random.get_state(), fp)
# (5) Training state
self.state.save(os.path.join(training_state_dirname, C.TRAINING_STATE_NAME))
# (6) Learning rate scheduler
with open(os.path.join(training_state_dirname, C.SCHEDULER_STATE_NAME), "wb") as fp:
pickle.dump(self.optimizer_config.lr_scheduler, fp)
# First we rename the existing directory to minimize the risk of state
# loss if the process is aborted during deletion (which will be slower
# than directory renaming)
delete_training_state_dirname = os.path.join(self.model.output_dir, C.TRAINING_STATE_TEMP_DELETENAME)
if os.path.exists(self.training_state_dirname):
os.rename(self.training_state_dirname, delete_training_state_dirname)
os.rename(training_state_dirname, self.training_state_dirname)
if os.path.exists(delete_training_state_dirname):
shutil.rmtree(delete_training_state_dirname) | ['def', '_save_training_state', '(', 'self', ',', 'train_iter', ':', 'data_io', '.', 'BaseParallelSampleIter', ')', ':', '# Create temporary directory for storing the state of the optimization process', 'training_state_dirname', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'model', '.', 'output_dir', ',', 'C', '.', 'TRAINING_STATE_TEMP_DIRNAME', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'training_state_dirname', ')', ':', 'os', '.', 'mkdir', '(', 'training_state_dirname', ')', '# (1) Parameters: link current file', 'params_base_fname', '=', 'C', '.', 'PARAMS_NAME', '%', 'self', '.', 'state', '.', 'checkpoint', 'params_file', '=', 'os', '.', 'path', '.', 'join', '(', 'training_state_dirname', ',', 'C', '.', 'TRAINING_STATE_PARAMS_NAME', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'params_file', ')', ':', 'os', '.', 'unlink', '(', 'params_file', ')', 'os', '.', 'symlink', '(', 'os', '.', 'path', '.', 'join', '(', '".."', ',', 'params_base_fname', ')', ',', 'params_file', ')', '# (2) Optimizer states', 'opt_state_fname', '=', 'os', '.', 'path', '.', 'join', '(', 'training_state_dirname', ',', 'C', '.', 'OPT_STATES_LAST', ')', 'self', '.', 'model', '.', 'save_optimizer_states', '(', 'opt_state_fname', ')', '# (3) Data iterator', 'train_iter', '.', 'save_state', '(', 'os', '.', 'path', '.', 'join', '(', 'training_state_dirname', ',', 'C', '.', 'BUCKET_ITER_STATE_NAME', ')', ')', '# (4) Random generators', "# RNG states: python's random and np.random provide functions for", "# storing the state, mxnet does not, but inside our code mxnet's RNG is", '# not used AFAIK', 'with', 'open', '(', 'os', '.', 'path', '.', 'join', '(', 'training_state_dirname', ',', 'C', '.', 'RNG_STATE_NAME', ')', ',', '"wb"', ')', 'as', 'fp', ':', 'pickle', '.', 'dump', '(', 'random', '.', 'getstate', '(', ')', ',', 'fp', ')', 'pickle', '.', 'dump', '(', 'np', '.', 'random', '.', 'get_state', '(', ')', ',', 'fp', ')', '# (5) Training state', 'self', '.', 'state', '.', 'save', '(', 'os', '.', 'path', '.', 'join', '(', 'training_state_dirname', ',', 'C', '.', 'TRAINING_STATE_NAME', ')', ')', '# (6) Learning rate scheduler', 'with', 'open', '(', 'os', '.', 'path', '.', 'join', '(', 'training_state_dirname', ',', 'C', '.', 'SCHEDULER_STATE_NAME', ')', ',', '"wb"', ')', 'as', 'fp', ':', 'pickle', '.', 'dump', '(', 'self', '.', 'optimizer_config', '.', 'lr_scheduler', ',', 'fp', ')', '# First we rename the existing directory to minimize the risk of state', '# loss if the process is aborted during deletion (which will be slower', '# than directory renaming)', 'delete_training_state_dirname', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'model', '.', 'output_dir', ',', 'C', '.', 'TRAINING_STATE_TEMP_DELETENAME', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'self', '.', 'training_state_dirname', ')', ':', 'os', '.', 'rename', '(', 'self', '.', 'training_state_dirname', ',', 'delete_training_state_dirname', ')', 'os', '.', 'rename', '(', 'training_state_dirname', ',', 'self', '.', 'training_state_dirname', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'delete_training_state_dirname', ')', ':', 'shutil', '.', 'rmtree', '(', 'delete_training_state_dirname', ')'] | Saves current training state. | ['Saves', 'current', 'training', 'state', '.'] | train | https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/training.py#L1093-L1139 |
4,095 | Telefonica/toolium | toolium/driver_wrapper.py | DriverWrapper.should_reuse_driver | def should_reuse_driver(self, scope, test_passed, context=None):
"""Check if the driver should be reused
:param scope: execution scope (function, module, class or session)
:param test_passed: True if the test has passed
:param context: behave context
:returns: True if the driver should be reused
"""
reuse_driver = self.config.getboolean_optional('Driver', 'reuse_driver')
reuse_driver_session = self.config.getboolean_optional('Driver', 'reuse_driver_session')
restart_driver_after_failure = (self.config.getboolean_optional('Driver', 'restart_driver_after_failure') or
self.config.getboolean_optional('Driver', 'restart_driver_fail'))
if context and scope == 'function':
reuse_driver = reuse_driver or (hasattr(context, 'reuse_driver_from_tags')
and context.reuse_driver_from_tags)
return (((reuse_driver and scope == 'function') or (reuse_driver_session and scope != 'session'))
and (test_passed or not restart_driver_after_failure)) | python | def should_reuse_driver(self, scope, test_passed, context=None):
"""Check if the driver should be reused
:param scope: execution scope (function, module, class or session)
:param test_passed: True if the test has passed
:param context: behave context
:returns: True if the driver should be reused
"""
reuse_driver = self.config.getboolean_optional('Driver', 'reuse_driver')
reuse_driver_session = self.config.getboolean_optional('Driver', 'reuse_driver_session')
restart_driver_after_failure = (self.config.getboolean_optional('Driver', 'restart_driver_after_failure') or
self.config.getboolean_optional('Driver', 'restart_driver_fail'))
if context and scope == 'function':
reuse_driver = reuse_driver or (hasattr(context, 'reuse_driver_from_tags')
and context.reuse_driver_from_tags)
return (((reuse_driver and scope == 'function') or (reuse_driver_session and scope != 'session'))
and (test_passed or not restart_driver_after_failure)) | ['def', 'should_reuse_driver', '(', 'self', ',', 'scope', ',', 'test_passed', ',', 'context', '=', 'None', ')', ':', 'reuse_driver', '=', 'self', '.', 'config', '.', 'getboolean_optional', '(', "'Driver'", ',', "'reuse_driver'", ')', 'reuse_driver_session', '=', 'self', '.', 'config', '.', 'getboolean_optional', '(', "'Driver'", ',', "'reuse_driver_session'", ')', 'restart_driver_after_failure', '=', '(', 'self', '.', 'config', '.', 'getboolean_optional', '(', "'Driver'", ',', "'restart_driver_after_failure'", ')', 'or', 'self', '.', 'config', '.', 'getboolean_optional', '(', "'Driver'", ',', "'restart_driver_fail'", ')', ')', 'if', 'context', 'and', 'scope', '==', "'function'", ':', 'reuse_driver', '=', 'reuse_driver', 'or', '(', 'hasattr', '(', 'context', ',', "'reuse_driver_from_tags'", ')', 'and', 'context', '.', 'reuse_driver_from_tags', ')', 'return', '(', '(', '(', 'reuse_driver', 'and', 'scope', '==', "'function'", ')', 'or', '(', 'reuse_driver_session', 'and', 'scope', '!=', "'session'", ')', ')', 'and', '(', 'test_passed', 'or', 'not', 'restart_driver_after_failure', ')', ')'] | Check if the driver should be reused
:param scope: execution scope (function, module, class or session)
:param test_passed: True if the test has passed
:param context: behave context
:returns: True if the driver should be reused | ['Check', 'if', 'the', 'driver', 'should', 'be', 'reused'] | train | https://github.com/Telefonica/toolium/blob/56847c243b3a98876df74c184b75e43f8810e475/toolium/driver_wrapper.py#L325-L341 |
4,096 | googlefonts/glyphsLib | Lib/glyphsLib/builder/builders.py | GlyphsBuilder._fake_designspace | def _fake_designspace(self, ufos):
"""Build a fake designspace with the given UFOs as sources, so that all
builder functions can rely on the presence of a designspace.
"""
designspace = designspaceLib.DesignSpaceDocument()
ufo_to_location = defaultdict(dict)
# Make weight and width axis if relevant
for info_key, axis_def in zip(
("openTypeOS2WeightClass", "openTypeOS2WidthClass"),
(WEIGHT_AXIS_DEF, WIDTH_AXIS_DEF),
):
axis = designspace.newAxisDescriptor()
axis.tag = axis_def.tag
axis.name = axis_def.name
mapping = []
for ufo in ufos:
user_loc = getattr(ufo.info, info_key)
if user_loc is not None:
design_loc = class_to_value(axis_def.tag, user_loc)
mapping.append((user_loc, design_loc))
ufo_to_location[ufo][axis_def.name] = design_loc
mapping = sorted(set(mapping))
if len(mapping) > 1:
axis.map = mapping
axis.minimum = min([user_loc for user_loc, _ in mapping])
axis.maximum = max([user_loc for user_loc, _ in mapping])
axis.default = min(
axis.maximum, max(axis.minimum, axis_def.default_user_loc)
)
designspace.addAxis(axis)
for ufo in ufos:
source = designspace.newSourceDescriptor()
source.font = ufo
source.familyName = ufo.info.familyName
source.styleName = ufo.info.styleName
# source.name = '%s %s' % (source.familyName, source.styleName)
source.path = ufo.path
source.location = ufo_to_location[ufo]
designspace.addSource(source)
# UFO-level skip list lib keys are usually ignored, except when we don't have a
# Designspace file to start from. If they exist in the UFOs, promote them to a
# Designspace-level lib key. However, to avoid accidents, expect the list to
# exist in none or be the same in all UFOs.
if any("public.skipExportGlyphs" in ufo.lib for ufo in ufos):
skip_export_glyphs = {
frozenset(ufo.lib.get("public.skipExportGlyphs", [])) for ufo in ufos
}
if len(skip_export_glyphs) == 1:
designspace.lib["public.skipExportGlyphs"] = sorted(
next(iter(skip_export_glyphs))
)
else:
raise ValueError(
"The `public.skipExportGlyphs` list of all UFOs must either not "
"exist or be the same in every UFO."
)
return designspace | python | def _fake_designspace(self, ufos):
"""Build a fake designspace with the given UFOs as sources, so that all
builder functions can rely on the presence of a designspace.
"""
designspace = designspaceLib.DesignSpaceDocument()
ufo_to_location = defaultdict(dict)
# Make weight and width axis if relevant
for info_key, axis_def in zip(
("openTypeOS2WeightClass", "openTypeOS2WidthClass"),
(WEIGHT_AXIS_DEF, WIDTH_AXIS_DEF),
):
axis = designspace.newAxisDescriptor()
axis.tag = axis_def.tag
axis.name = axis_def.name
mapping = []
for ufo in ufos:
user_loc = getattr(ufo.info, info_key)
if user_loc is not None:
design_loc = class_to_value(axis_def.tag, user_loc)
mapping.append((user_loc, design_loc))
ufo_to_location[ufo][axis_def.name] = design_loc
mapping = sorted(set(mapping))
if len(mapping) > 1:
axis.map = mapping
axis.minimum = min([user_loc for user_loc, _ in mapping])
axis.maximum = max([user_loc for user_loc, _ in mapping])
axis.default = min(
axis.maximum, max(axis.minimum, axis_def.default_user_loc)
)
designspace.addAxis(axis)
for ufo in ufos:
source = designspace.newSourceDescriptor()
source.font = ufo
source.familyName = ufo.info.familyName
source.styleName = ufo.info.styleName
# source.name = '%s %s' % (source.familyName, source.styleName)
source.path = ufo.path
source.location = ufo_to_location[ufo]
designspace.addSource(source)
# UFO-level skip list lib keys are usually ignored, except when we don't have a
# Designspace file to start from. If they exist in the UFOs, promote them to a
# Designspace-level lib key. However, to avoid accidents, expect the list to
# exist in none or be the same in all UFOs.
if any("public.skipExportGlyphs" in ufo.lib for ufo in ufos):
skip_export_glyphs = {
frozenset(ufo.lib.get("public.skipExportGlyphs", [])) for ufo in ufos
}
if len(skip_export_glyphs) == 1:
designspace.lib["public.skipExportGlyphs"] = sorted(
next(iter(skip_export_glyphs))
)
else:
raise ValueError(
"The `public.skipExportGlyphs` list of all UFOs must either not "
"exist or be the same in every UFO."
)
return designspace | ['def', '_fake_designspace', '(', 'self', ',', 'ufos', ')', ':', 'designspace', '=', 'designspaceLib', '.', 'DesignSpaceDocument', '(', ')', 'ufo_to_location', '=', 'defaultdict', '(', 'dict', ')', '# Make weight and width axis if relevant', 'for', 'info_key', ',', 'axis_def', 'in', 'zip', '(', '(', '"openTypeOS2WeightClass"', ',', '"openTypeOS2WidthClass"', ')', ',', '(', 'WEIGHT_AXIS_DEF', ',', 'WIDTH_AXIS_DEF', ')', ',', ')', ':', 'axis', '=', 'designspace', '.', 'newAxisDescriptor', '(', ')', 'axis', '.', 'tag', '=', 'axis_def', '.', 'tag', 'axis', '.', 'name', '=', 'axis_def', '.', 'name', 'mapping', '=', '[', ']', 'for', 'ufo', 'in', 'ufos', ':', 'user_loc', '=', 'getattr', '(', 'ufo', '.', 'info', ',', 'info_key', ')', 'if', 'user_loc', 'is', 'not', 'None', ':', 'design_loc', '=', 'class_to_value', '(', 'axis_def', '.', 'tag', ',', 'user_loc', ')', 'mapping', '.', 'append', '(', '(', 'user_loc', ',', 'design_loc', ')', ')', 'ufo_to_location', '[', 'ufo', ']', '[', 'axis_def', '.', 'name', ']', '=', 'design_loc', 'mapping', '=', 'sorted', '(', 'set', '(', 'mapping', ')', ')', 'if', 'len', '(', 'mapping', ')', '>', '1', ':', 'axis', '.', 'map', '=', 'mapping', 'axis', '.', 'minimum', '=', 'min', '(', '[', 'user_loc', 'for', 'user_loc', ',', '_', 'in', 'mapping', ']', ')', 'axis', '.', 'maximum', '=', 'max', '(', '[', 'user_loc', 'for', 'user_loc', ',', '_', 'in', 'mapping', ']', ')', 'axis', '.', 'default', '=', 'min', '(', 'axis', '.', 'maximum', ',', 'max', '(', 'axis', '.', 'minimum', ',', 'axis_def', '.', 'default_user_loc', ')', ')', 'designspace', '.', 'addAxis', '(', 'axis', ')', 'for', 'ufo', 'in', 'ufos', ':', 'source', '=', 'designspace', '.', 'newSourceDescriptor', '(', ')', 'source', '.', 'font', '=', 'ufo', 'source', '.', 'familyName', '=', 'ufo', '.', 'info', '.', 'familyName', 'source', '.', 'styleName', '=', 'ufo', '.', 'info', '.', 'styleName', "# source.name = '%s %s' % (source.familyName, source.styleName)", 'source', '.', 'path', '=', 'ufo', '.', 'path', 'source', '.', 'location', '=', 'ufo_to_location', '[', 'ufo', ']', 'designspace', '.', 'addSource', '(', 'source', ')', "# UFO-level skip list lib keys are usually ignored, except when we don't have a", '# Designspace file to start from. If they exist in the UFOs, promote them to a', '# Designspace-level lib key. However, to avoid accidents, expect the list to', '# exist in none or be the same in all UFOs.', 'if', 'any', '(', '"public.skipExportGlyphs"', 'in', 'ufo', '.', 'lib', 'for', 'ufo', 'in', 'ufos', ')', ':', 'skip_export_glyphs', '=', '{', 'frozenset', '(', 'ufo', '.', 'lib', '.', 'get', '(', '"public.skipExportGlyphs"', ',', '[', ']', ')', ')', 'for', 'ufo', 'in', 'ufos', '}', 'if', 'len', '(', 'skip_export_glyphs', ')', '==', '1', ':', 'designspace', '.', 'lib', '[', '"public.skipExportGlyphs"', ']', '=', 'sorted', '(', 'next', '(', 'iter', '(', 'skip_export_glyphs', ')', ')', ')', 'else', ':', 'raise', 'ValueError', '(', '"The `public.skipExportGlyphs` list of all UFOs must either not "', '"exist or be the same in every UFO."', ')', 'return', 'designspace'] | Build a fake designspace with the given UFOs as sources, so that all
builder functions can rely on the presence of a designspace. | ['Build', 'a', 'fake', 'designspace', 'with', 'the', 'given', 'UFOs', 'as', 'sources', 'so', 'that', 'all', 'builder', 'functions', 'can', 'rely', 'on', 'the', 'presence', 'of', 'a', 'designspace', '.'] | train | https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/builders.py#L640-L702 |
4,097 | ask/carrot | carrot/backends/pystomp.py | Message.ack | def ack(self):
"""Acknowledge this message as being processed.,
This will remove the message from the queue.
:raises MessageStateError: If the message has already been
acknowledged/requeued/rejected.
"""
if self.acknowledged:
raise self.MessageStateError(
"Message already acknowledged with state: %s" % self._state)
self.backend.ack(self._frame)
self._state = "ACK" | python | def ack(self):
"""Acknowledge this message as being processed.,
This will remove the message from the queue.
:raises MessageStateError: If the message has already been
acknowledged/requeued/rejected.
"""
if self.acknowledged:
raise self.MessageStateError(
"Message already acknowledged with state: %s" % self._state)
self.backend.ack(self._frame)
self._state = "ACK" | ['def', 'ack', '(', 'self', ')', ':', 'if', 'self', '.', 'acknowledged', ':', 'raise', 'self', '.', 'MessageStateError', '(', '"Message already acknowledged with state: %s"', '%', 'self', '.', '_state', ')', 'self', '.', 'backend', '.', 'ack', '(', 'self', '.', '_frame', ')', 'self', '.', '_state', '=', '"ACK"'] | Acknowledge this message as being processed.,
This will remove the message from the queue.
:raises MessageStateError: If the message has already been
acknowledged/requeued/rejected. | ['Acknowledge', 'this', 'message', 'as', 'being', 'processed', '.', 'This', 'will', 'remove', 'the', 'message', 'from', 'the', 'queue', '.'] | train | https://github.com/ask/carrot/blob/5889a25cd2e274642071c9bba39772f4b3e3d9da/carrot/backends/pystomp.py#L54-L66 |
4,098 | david-cortes/costsensitive | costsensitive/__init__.py | WeightedOneVsRest.predict | def predict(self, X):
"""
Predict the less costly class for a given observation
Parameters
----------
X : array (n_samples, n_features)
Data for which to predict minimum cost label.
Returns
-------
y_hat : array (n_samples,)
Label with expected minimum cost for each observation.
"""
X = _check_2d_inp(X)
return np.argmax(self.decision_function(X), axis=1) | python | def predict(self, X):
"""
Predict the less costly class for a given observation
Parameters
----------
X : array (n_samples, n_features)
Data for which to predict minimum cost label.
Returns
-------
y_hat : array (n_samples,)
Label with expected minimum cost for each observation.
"""
X = _check_2d_inp(X)
return np.argmax(self.decision_function(X), axis=1) | ['def', 'predict', '(', 'self', ',', 'X', ')', ':', 'X', '=', '_check_2d_inp', '(', 'X', ')', 'return', 'np', '.', 'argmax', '(', 'self', '.', 'decision_function', '(', 'X', ')', ',', 'axis', '=', '1', ')'] | Predict the less costly class for a given observation
Parameters
----------
X : array (n_samples, n_features)
Data for which to predict minimum cost label.
Returns
-------
y_hat : array (n_samples,)
Label with expected minimum cost for each observation. | ['Predict', 'the', 'less', 'costly', 'class', 'for', 'a', 'given', 'observation', 'Parameters', '----------', 'X', ':', 'array', '(', 'n_samples', 'n_features', ')', 'Data', 'for', 'which', 'to', 'predict', 'minimum', 'cost', 'label', '.', 'Returns', '-------', 'y_hat', ':', 'array', '(', 'n_samples', ')', 'Label', 'with', 'expected', 'minimum', 'cost', 'for', 'each', 'observation', '.'] | train | https://github.com/david-cortes/costsensitive/blob/355fbf20397ce673ce9e22048b6c52dbeeb354cc/costsensitive/__init__.py#L802-L817 |
4,099 | hyde/fswrap | fswrap.py | FS.file_or_folder | def file_or_folder(path):
"""
Returns a File or Folder object that would represent the given path.
"""
target = unicode(path)
return Folder(target) if os.path.isdir(target) else File(target) | python | def file_or_folder(path):
"""
Returns a File or Folder object that would represent the given path.
"""
target = unicode(path)
return Folder(target) if os.path.isdir(target) else File(target) | ['def', 'file_or_folder', '(', 'path', ')', ':', 'target', '=', 'unicode', '(', 'path', ')', 'return', 'Folder', '(', 'target', ')', 'if', 'os', '.', 'path', '.', 'isdir', '(', 'target', ')', 'else', 'File', '(', 'target', ')'] | Returns a File or Folder object that would represent the given path. | ['Returns', 'a', 'File', 'or', 'Folder', 'object', 'that', 'would', 'represent', 'the', 'given', 'path', '.'] | train | https://github.com/hyde/fswrap/blob/41e4ad6f7e9ba73eabe61bd97847cd284e3edbd2/fswrap.py#L157-L162 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.