Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
5,700
willhardy/django-seo
rollyourown/seo/options.py
Options._register_elements
def _register_elements(self, elements): """ Takes elements from the metadata class and creates a base model for all backend models . """ self.elements = elements for key, obj in elements.items(): obj.contribute_to_class(self.metadata, key) # Create the common Django fields fields = {} for key, obj in elements.items(): if obj.editable: field = obj.get_field() if not field.help_text: if key in self.bulk_help_text: field.help_text = self.bulk_help_text[key] fields[key] = field # 0. Abstract base model with common fields base_meta = type('Meta', (), self.original_meta) class BaseMeta(base_meta): abstract = True app_label = 'seo' fields['Meta'] = BaseMeta # Do we need this? fields['__module__'] = __name__ #attrs['__module__'] self.MetadataBaseModel = type('%sBase' % self.name, (models.Model,), fields)
python
def _register_elements(self, elements): """ Takes elements from the metadata class and creates a base model for all backend models . """ self.elements = elements for key, obj in elements.items(): obj.contribute_to_class(self.metadata, key) # Create the common Django fields fields = {} for key, obj in elements.items(): if obj.editable: field = obj.get_field() if not field.help_text: if key in self.bulk_help_text: field.help_text = self.bulk_help_text[key] fields[key] = field # 0. Abstract base model with common fields base_meta = type('Meta', (), self.original_meta) class BaseMeta(base_meta): abstract = True app_label = 'seo' fields['Meta'] = BaseMeta # Do we need this? fields['__module__'] = __name__ #attrs['__module__'] self.MetadataBaseModel = type('%sBase' % self.name, (models.Model,), fields)
['def', '_register_elements', '(', 'self', ',', 'elements', ')', ':', 'self', '.', 'elements', '=', 'elements', 'for', 'key', ',', 'obj', 'in', 'elements', '.', 'items', '(', ')', ':', 'obj', '.', 'contribute_to_class', '(', 'self', '.', 'metadata', ',', 'key', ')', '# Create the common Django fields', 'fields', '=', '{', '}', 'for', 'key', ',', 'obj', 'in', 'elements', '.', 'items', '(', ')', ':', 'if', 'obj', '.', 'editable', ':', 'field', '=', 'obj', '.', 'get_field', '(', ')', 'if', 'not', 'field', '.', 'help_text', ':', 'if', 'key', 'in', 'self', '.', 'bulk_help_text', ':', 'field', '.', 'help_text', '=', 'self', '.', 'bulk_help_text', '[', 'key', ']', 'fields', '[', 'key', ']', '=', 'field', '# 0. Abstract base model with common fields', 'base_meta', '=', 'type', '(', "'Meta'", ',', '(', ')', ',', 'self', '.', 'original_meta', ')', 'class', 'BaseMeta', '(', 'base_meta', ')', ':', 'abstract', '=', 'True', 'app_label', '=', "'seo'", 'fields', '[', "'Meta'", ']', '=', 'BaseMeta', '# Do we need this?', 'fields', '[', "'__module__'", ']', '=', '__name__', "#attrs['__module__']", 'self', '.', 'MetadataBaseModel', '=', 'type', '(', "'%sBase'", '%', 'self', '.', 'name', ',', '(', 'models', '.', 'Model', ',', ')', ',', 'fields', ')']
Takes elements from the metadata class and creates a base model for all backend models .
['Takes', 'elements', 'from', 'the', 'metadata', 'class', 'and', 'creates', 'a', 'base', 'model', 'for', 'all', 'backend', 'models', '.']
train
https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/options.py#L38-L64
5,701
juju/python-libjuju
juju/utils.py
generate_user_controller_access_token
def generate_user_controller_access_token(username, controller_endpoints, secret_key, controller_name): """" Implement in python what is currently done in GO https://github.com/juju/juju/blob/a5ab92ec9b7f5da3678d9ac603fe52d45af24412/cmd/juju/user/utils.go#L16 :param username: name of the user to register :param controller_endpoints: juju controller endpoints list in the format <ip>:<port> :param secret_key: base64 encoded string of the secret-key generated by juju :param controller_name: name of the controller to register to. """ # Secret key is returned as base64 encoded string in: # https://websockets.readthedocs.io/en/stable/_modules/websockets/protocol.html#WebSocketCommonProtocol.recv # Deconding it before marshalling into the ASN.1 message secret_key = base64.b64decode(secret_key) addr = Addrs() for endpoint in controller_endpoints: addr.append(endpoint) registration_string = RegistrationInfo() registration_string.setComponentByPosition(0, char.PrintableString(username)) registration_string.setComponentByPosition(1, addr) registration_string.setComponentByPosition(2, univ.OctetString(secret_key)) registration_string.setComponentByPosition(3, char.PrintableString(controller_name)) registration_string = encode(registration_string) remainder = len(registration_string) % 3 registration_string += b"\0" * (3 - remainder) return base64.urlsafe_b64encode(registration_string)
python
def generate_user_controller_access_token(username, controller_endpoints, secret_key, controller_name): """" Implement in python what is currently done in GO https://github.com/juju/juju/blob/a5ab92ec9b7f5da3678d9ac603fe52d45af24412/cmd/juju/user/utils.go#L16 :param username: name of the user to register :param controller_endpoints: juju controller endpoints list in the format <ip>:<port> :param secret_key: base64 encoded string of the secret-key generated by juju :param controller_name: name of the controller to register to. """ # Secret key is returned as base64 encoded string in: # https://websockets.readthedocs.io/en/stable/_modules/websockets/protocol.html#WebSocketCommonProtocol.recv # Deconding it before marshalling into the ASN.1 message secret_key = base64.b64decode(secret_key) addr = Addrs() for endpoint in controller_endpoints: addr.append(endpoint) registration_string = RegistrationInfo() registration_string.setComponentByPosition(0, char.PrintableString(username)) registration_string.setComponentByPosition(1, addr) registration_string.setComponentByPosition(2, univ.OctetString(secret_key)) registration_string.setComponentByPosition(3, char.PrintableString(controller_name)) registration_string = encode(registration_string) remainder = len(registration_string) % 3 registration_string += b"\0" * (3 - remainder) return base64.urlsafe_b64encode(registration_string)
['def', 'generate_user_controller_access_token', '(', 'username', ',', 'controller_endpoints', ',', 'secret_key', ',', 'controller_name', ')', ':', '# Secret key is returned as base64 encoded string in:', '# https://websockets.readthedocs.io/en/stable/_modules/websockets/protocol.html#WebSocketCommonProtocol.recv', '# Deconding it before marshalling into the ASN.1 message', 'secret_key', '=', 'base64', '.', 'b64decode', '(', 'secret_key', ')', 'addr', '=', 'Addrs', '(', ')', 'for', 'endpoint', 'in', 'controller_endpoints', ':', 'addr', '.', 'append', '(', 'endpoint', ')', 'registration_string', '=', 'RegistrationInfo', '(', ')', 'registration_string', '.', 'setComponentByPosition', '(', '0', ',', 'char', '.', 'PrintableString', '(', 'username', ')', ')', 'registration_string', '.', 'setComponentByPosition', '(', '1', ',', 'addr', ')', 'registration_string', '.', 'setComponentByPosition', '(', '2', ',', 'univ', '.', 'OctetString', '(', 'secret_key', ')', ')', 'registration_string', '.', 'setComponentByPosition', '(', '3', ',', 'char', '.', 'PrintableString', '(', 'controller_name', ')', ')', 'registration_string', '=', 'encode', '(', 'registration_string', ')', 'remainder', '=', 'len', '(', 'registration_string', ')', '%', '3', 'registration_string', '+=', 'b"\\0"', '*', '(', '3', '-', 'remainder', ')', 'return', 'base64', '.', 'urlsafe_b64encode', '(', 'registration_string', ')']
Implement in python what is currently done in GO https://github.com/juju/juju/blob/a5ab92ec9b7f5da3678d9ac603fe52d45af24412/cmd/juju/user/utils.go#L16 :param username: name of the user to register :param controller_endpoints: juju controller endpoints list in the format <ip>:<port> :param secret_key: base64 encoded string of the secret-key generated by juju :param controller_name: name of the controller to register to.
['Implement', 'in', 'python', 'what', 'is', 'currently', 'done', 'in', 'GO', 'https', ':', '//', 'github', '.', 'com', '/', 'juju', '/', 'juju', '/', 'blob', '/', 'a5ab92ec9b7f5da3678d9ac603fe52d45af24412', '/', 'cmd', '/', 'juju', '/', 'user', '/', 'utils', '.', 'go#L16']
train
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/utils.py#L139-L165
5,702
svenkreiss/pysparkling
pysparkling/fileio/file.py
File.resolve_filenames
def resolve_filenames(all_expr): """resolve expression for a filename :param all_expr: A comma separated list of expressions. The expressions can contain the wildcard characters ``*`` and ``?``. It also resolves Spark datasets to the paths of the individual partitions (i.e. ``my_data`` gets resolved to ``[my_data/part-00000, my_data/part-00001]``). :returns: A list of file names. :rtype: list """ files = [] for expr in all_expr.split(','): expr = expr.strip() files += fs.get_fs(expr).resolve_filenames(expr) log.debug('Filenames: {0}'.format(files)) return files
python
def resolve_filenames(all_expr): """resolve expression for a filename :param all_expr: A comma separated list of expressions. The expressions can contain the wildcard characters ``*`` and ``?``. It also resolves Spark datasets to the paths of the individual partitions (i.e. ``my_data`` gets resolved to ``[my_data/part-00000, my_data/part-00001]``). :returns: A list of file names. :rtype: list """ files = [] for expr in all_expr.split(','): expr = expr.strip() files += fs.get_fs(expr).resolve_filenames(expr) log.debug('Filenames: {0}'.format(files)) return files
['def', 'resolve_filenames', '(', 'all_expr', ')', ':', 'files', '=', '[', ']', 'for', 'expr', 'in', 'all_expr', '.', 'split', '(', "','", ')', ':', 'expr', '=', 'expr', '.', 'strip', '(', ')', 'files', '+=', 'fs', '.', 'get_fs', '(', 'expr', ')', '.', 'resolve_filenames', '(', 'expr', ')', 'log', '.', 'debug', '(', "'Filenames: {0}'", '.', 'format', '(', 'files', ')', ')', 'return', 'files']
resolve expression for a filename :param all_expr: A comma separated list of expressions. The expressions can contain the wildcard characters ``*`` and ``?``. It also resolves Spark datasets to the paths of the individual partitions (i.e. ``my_data`` gets resolved to ``[my_data/part-00000, my_data/part-00001]``). :returns: A list of file names. :rtype: list
['resolve', 'expression', 'for', 'a', 'filename']
train
https://github.com/svenkreiss/pysparkling/blob/596d0ef2793100f7115efe228ff9bfc17beaa08d/pysparkling/fileio/file.py#L24-L42
5,703
databricks/spark-sklearn
python/spark_sklearn/converter.py
Converter._toSparkGLM
def _toSparkGLM(self, model): """ Private method for converting a GLM to a Spark model TODO: Add model parameters as well. """ skl_cls = type(model) py_cls = self._skl2spark_classes[skl_cls].py jvm_cls_name = self._skl2spark_classes[skl_cls].jvm intercept = model.intercept_ weights = model.coef_ if len(np.shape(weights)) == 1\ or (len(np.shape(weights)) == 2 and np.shape(weights)[0] == 1): # Binary classification uid = _randomUID(skl_cls) _java_model = _new_java_obj(self.sc, jvm_cls_name, uid, Vectors.dense(weights), float(intercept)) return py_cls(_java_model) elif len(np.shape(weights)) == 2 and skl_cls == SKL_LogisticRegression: # Multiclass label raise ValueError("Converter.toSpark cannot convert a multiclass sklearn Logistic" + " Regression model to Spark because Spark does not yet support" + " multiclass. Given model is for %d classes." % np.shape(weights)[0]) else: raise Exception("Converter.toSpark experienced unknown error when trying to convert" + " a model of type: " + type(model) + " " + len(np.shape(weights)))
python
def _toSparkGLM(self, model): """ Private method for converting a GLM to a Spark model TODO: Add model parameters as well. """ skl_cls = type(model) py_cls = self._skl2spark_classes[skl_cls].py jvm_cls_name = self._skl2spark_classes[skl_cls].jvm intercept = model.intercept_ weights = model.coef_ if len(np.shape(weights)) == 1\ or (len(np.shape(weights)) == 2 and np.shape(weights)[0] == 1): # Binary classification uid = _randomUID(skl_cls) _java_model = _new_java_obj(self.sc, jvm_cls_name, uid, Vectors.dense(weights), float(intercept)) return py_cls(_java_model) elif len(np.shape(weights)) == 2 and skl_cls == SKL_LogisticRegression: # Multiclass label raise ValueError("Converter.toSpark cannot convert a multiclass sklearn Logistic" + " Regression model to Spark because Spark does not yet support" + " multiclass. Given model is for %d classes." % np.shape(weights)[0]) else: raise Exception("Converter.toSpark experienced unknown error when trying to convert" + " a model of type: " + type(model) + " " + len(np.shape(weights)))
['def', '_toSparkGLM', '(', 'self', ',', 'model', ')', ':', 'skl_cls', '=', 'type', '(', 'model', ')', 'py_cls', '=', 'self', '.', '_skl2spark_classes', '[', 'skl_cls', ']', '.', 'py', 'jvm_cls_name', '=', 'self', '.', '_skl2spark_classes', '[', 'skl_cls', ']', '.', 'jvm', 'intercept', '=', 'model', '.', 'intercept_', 'weights', '=', 'model', '.', 'coef_', 'if', 'len', '(', 'np', '.', 'shape', '(', 'weights', ')', ')', '==', '1', 'or', '(', 'len', '(', 'np', '.', 'shape', '(', 'weights', ')', ')', '==', '2', 'and', 'np', '.', 'shape', '(', 'weights', ')', '[', '0', ']', '==', '1', ')', ':', '# Binary classification', 'uid', '=', '_randomUID', '(', 'skl_cls', ')', '_java_model', '=', '_new_java_obj', '(', 'self', '.', 'sc', ',', 'jvm_cls_name', ',', 'uid', ',', 'Vectors', '.', 'dense', '(', 'weights', ')', ',', 'float', '(', 'intercept', ')', ')', 'return', 'py_cls', '(', '_java_model', ')', 'elif', 'len', '(', 'np', '.', 'shape', '(', 'weights', ')', ')', '==', '2', 'and', 'skl_cls', '==', 'SKL_LogisticRegression', ':', '# Multiclass label', 'raise', 'ValueError', '(', '"Converter.toSpark cannot convert a multiclass sklearn Logistic"', '+', '" Regression model to Spark because Spark does not yet support"', '+', '" multiclass. Given model is for %d classes."', '%', 'np', '.', 'shape', '(', 'weights', ')', '[', '0', ']', ')', 'else', ':', 'raise', 'Exception', '(', '"Converter.toSpark experienced unknown error when trying to convert"', '+', '" a model of type: "', '+', 'type', '(', 'model', ')', '+', '" "', '+', 'len', '(', 'np', '.', 'shape', '(', 'weights', ')', ')', ')']
Private method for converting a GLM to a Spark model TODO: Add model parameters as well.
['Private', 'method', 'for', 'converting', 'a', 'GLM', 'to', 'a', 'Spark', 'model', 'TODO', ':', 'Add', 'model', 'parameters', 'as', 'well', '.']
train
https://github.com/databricks/spark-sklearn/blob/cbde36f6311b73d967e2ec8a97040dfd71eca579/python/spark_sklearn/converter.py#L71-L94
5,704
moderngl/moderngl
examples/window/__init__.py
parse_args
def parse_args(args=None): """Parse arguments from sys.argv""" parser = argparse.ArgumentParser() parser.add_argument( '-w', '--window', default="pyqt5", choices=find_window_classes(), help='Name for the window type to use', ) parser.add_argument( '-fs', '--fullscreen', action="store_true", help='Open the window in fullscreen mode', ) parser.add_argument( '-vs', '--vsync', type=str2bool, default="1", help="Enable or disable vsync", ) parser.add_argument( '-s', '--samples', type=int, default=4, help="Specify the desired number of samples to use for multisampling", ) parser.add_argument( '-c', '--cursor', type=str2bool, default="true", help="Enable or disable displaying the mouse cursor", ) return parser.parse_args(args or sys.argv[1:])
python
def parse_args(args=None): """Parse arguments from sys.argv""" parser = argparse.ArgumentParser() parser.add_argument( '-w', '--window', default="pyqt5", choices=find_window_classes(), help='Name for the window type to use', ) parser.add_argument( '-fs', '--fullscreen', action="store_true", help='Open the window in fullscreen mode', ) parser.add_argument( '-vs', '--vsync', type=str2bool, default="1", help="Enable or disable vsync", ) parser.add_argument( '-s', '--samples', type=int, default=4, help="Specify the desired number of samples to use for multisampling", ) parser.add_argument( '-c', '--cursor', type=str2bool, default="true", help="Enable or disable displaying the mouse cursor", ) return parser.parse_args(args or sys.argv[1:])
['def', 'parse_args', '(', 'args', '=', 'None', ')', ':', 'parser', '=', 'argparse', '.', 'ArgumentParser', '(', ')', 'parser', '.', 'add_argument', '(', "'-w'", ',', "'--window'", ',', 'default', '=', '"pyqt5"', ',', 'choices', '=', 'find_window_classes', '(', ')', ',', 'help', '=', "'Name for the window type to use'", ',', ')', 'parser', '.', 'add_argument', '(', "'-fs'", ',', "'--fullscreen'", ',', 'action', '=', '"store_true"', ',', 'help', '=', "'Open the window in fullscreen mode'", ',', ')', 'parser', '.', 'add_argument', '(', "'-vs'", ',', "'--vsync'", ',', 'type', '=', 'str2bool', ',', 'default', '=', '"1"', ',', 'help', '=', '"Enable or disable vsync"', ',', ')', 'parser', '.', 'add_argument', '(', "'-s'", ',', "'--samples'", ',', 'type', '=', 'int', ',', 'default', '=', '4', ',', 'help', '=', '"Specify the desired number of samples to use for multisampling"', ',', ')', 'parser', '.', 'add_argument', '(', "'-c'", ',', "'--cursor'", ',', 'type', '=', 'str2bool', ',', 'default', '=', '"true"', ',', 'help', '=', '"Enable or disable displaying the mouse cursor"', ',', ')', 'return', 'parser', '.', 'parse_args', '(', 'args', 'or', 'sys', '.', 'argv', '[', '1', ':', ']', ')']
Parse arguments from sys.argv
['Parse', 'arguments', 'from', 'sys', '.', 'argv']
train
https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/examples/window/__init__.py#L65-L99
5,705
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Executor.py
Executor.scan
def scan(self, scanner, node_list): """Scan a list of this Executor's files (targets or sources) for implicit dependencies and update all of the targets with them. This essentially short-circuits an N*M scan of the sources for each individual target, which is a hell of a lot more efficient. """ env = self.get_build_env() path = self.get_build_scanner_path kw = self.get_kw() # TODO(batch): scan by batches) deps = [] for node in node_list: node.disambiguate() deps.extend(node.get_implicit_deps(env, scanner, path, kw)) deps.extend(self.get_implicit_deps()) for tgt in self.get_all_targets(): tgt.add_to_implicit(deps)
python
def scan(self, scanner, node_list): """Scan a list of this Executor's files (targets or sources) for implicit dependencies and update all of the targets with them. This essentially short-circuits an N*M scan of the sources for each individual target, which is a hell of a lot more efficient. """ env = self.get_build_env() path = self.get_build_scanner_path kw = self.get_kw() # TODO(batch): scan by batches) deps = [] for node in node_list: node.disambiguate() deps.extend(node.get_implicit_deps(env, scanner, path, kw)) deps.extend(self.get_implicit_deps()) for tgt in self.get_all_targets(): tgt.add_to_implicit(deps)
['def', 'scan', '(', 'self', ',', 'scanner', ',', 'node_list', ')', ':', 'env', '=', 'self', '.', 'get_build_env', '(', ')', 'path', '=', 'self', '.', 'get_build_scanner_path', 'kw', '=', 'self', '.', 'get_kw', '(', ')', '# TODO(batch): scan by batches)', 'deps', '=', '[', ']', 'for', 'node', 'in', 'node_list', ':', 'node', '.', 'disambiguate', '(', ')', 'deps', '.', 'extend', '(', 'node', '.', 'get_implicit_deps', '(', 'env', ',', 'scanner', ',', 'path', ',', 'kw', ')', ')', 'deps', '.', 'extend', '(', 'self', '.', 'get_implicit_deps', '(', ')', ')', 'for', 'tgt', 'in', 'self', '.', 'get_all_targets', '(', ')', ':', 'tgt', '.', 'add_to_implicit', '(', 'deps', ')']
Scan a list of this Executor's files (targets or sources) for implicit dependencies and update all of the targets with them. This essentially short-circuits an N*M scan of the sources for each individual target, which is a hell of a lot more efficient.
['Scan', 'a', 'list', 'of', 'this', 'Executor', 's', 'files', '(', 'targets', 'or', 'sources', ')', 'for', 'implicit', 'dependencies', 'and', 'update', 'all', 'of', 'the', 'targets', 'with', 'them', '.', 'This', 'essentially', 'short', '-', 'circuits', 'an', 'N', '*', 'M', 'scan', 'of', 'the', 'sources', 'for', 'each', 'individual', 'target', 'which', 'is', 'a', 'hell', 'of', 'a', 'lot', 'more', 'efficient', '.']
train
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Executor.py#L487-L507
5,706
eqcorrscan/EQcorrscan
eqcorrscan/utils/pre_processing.py
_fill_gaps
def _fill_gaps(tr): """ Interpolate through gaps and work-out where gaps are. :param tr: Gappy trace (e.g. tr.data is np.ma.MaskedArray) :type tr: `obspy.core.stream.Trace` :return: gaps, trace, where gaps is a list of dict """ tr = tr.split() gaps = tr.get_gaps() tr = tr.detrend().merge(fill_value=0)[0] gaps = [{'starttime': gap[4], 'endtime': gap[5]} for gap in gaps] return gaps, tr
python
def _fill_gaps(tr): """ Interpolate through gaps and work-out where gaps are. :param tr: Gappy trace (e.g. tr.data is np.ma.MaskedArray) :type tr: `obspy.core.stream.Trace` :return: gaps, trace, where gaps is a list of dict """ tr = tr.split() gaps = tr.get_gaps() tr = tr.detrend().merge(fill_value=0)[0] gaps = [{'starttime': gap[4], 'endtime': gap[5]} for gap in gaps] return gaps, tr
['def', '_fill_gaps', '(', 'tr', ')', ':', 'tr', '=', 'tr', '.', 'split', '(', ')', 'gaps', '=', 'tr', '.', 'get_gaps', '(', ')', 'tr', '=', 'tr', '.', 'detrend', '(', ')', '.', 'merge', '(', 'fill_value', '=', '0', ')', '[', '0', ']', 'gaps', '=', '[', '{', "'starttime'", ':', 'gap', '[', '4', ']', ',', "'endtime'", ':', 'gap', '[', '5', ']', '}', 'for', 'gap', 'in', 'gaps', ']', 'return', 'gaps', ',', 'tr']
Interpolate through gaps and work-out where gaps are. :param tr: Gappy trace (e.g. tr.data is np.ma.MaskedArray) :type tr: `obspy.core.stream.Trace` :return: gaps, trace, where gaps is a list of dict
['Interpolate', 'through', 'gaps', 'and', 'work', '-', 'out', 'where', 'gaps', 'are', '.']
train
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/pre_processing.py#L621-L634
5,707
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Hms.parse
def parse(self, fo): """ Convert HMS output to motifs Parameters ---------- fo : file-like File object containing HMS output. Returns ------- motifs : list List of Motif instances. """ motifs = [] m = [[float(x) for x in fo.readline().strip().split(" ")] for i in range(4)] matrix = [[m[0][i], m[1][i],m[2][i],m[3][i]] for i in range(len(m[0]))] motifs = [Motif(matrix)] motifs[-1].id = self.name return motifs
python
def parse(self, fo): """ Convert HMS output to motifs Parameters ---------- fo : file-like File object containing HMS output. Returns ------- motifs : list List of Motif instances. """ motifs = [] m = [[float(x) for x in fo.readline().strip().split(" ")] for i in range(4)] matrix = [[m[0][i], m[1][i],m[2][i],m[3][i]] for i in range(len(m[0]))] motifs = [Motif(matrix)] motifs[-1].id = self.name return motifs
['def', 'parse', '(', 'self', ',', 'fo', ')', ':', 'motifs', '=', '[', ']', 'm', '=', '[', '[', 'float', '(', 'x', ')', 'for', 'x', 'in', 'fo', '.', 'readline', '(', ')', '.', 'strip', '(', ')', '.', 'split', '(', '" "', ')', ']', 'for', 'i', 'in', 'range', '(', '4', ')', ']', 'matrix', '=', '[', '[', 'm', '[', '0', ']', '[', 'i', ']', ',', 'm', '[', '1', ']', '[', 'i', ']', ',', 'm', '[', '2', ']', '[', 'i', ']', ',', 'm', '[', '3', ']', '[', 'i', ']', ']', 'for', 'i', 'in', 'range', '(', 'len', '(', 'm', '[', '0', ']', ')', ')', ']', 'motifs', '=', '[', 'Motif', '(', 'matrix', ')', ']', 'motifs', '[', '-', '1', ']', '.', 'id', '=', 'self', '.', 'name', 'return', 'motifs']
Convert HMS output to motifs Parameters ---------- fo : file-like File object containing HMS output. Returns ------- motifs : list List of Motif instances.
['Convert', 'HMS', 'output', 'to', 'motifs', 'Parameters', '----------', 'fo', ':', 'file', '-', 'like', 'File', 'object', 'containing', 'HMS', 'output', '.']
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L633-L653
5,708
inasafe/inasafe
safe/messaging/item/message_element.py
MessageElement._is_qstring
def _is_qstring(message): """Check if its a QString without adding any dep to PyQt5.""" my_class = str(message.__class__) my_class_name = my_class.replace('<class \'', '').replace('\'>', '') if my_class_name == 'PyQt5.QtCore.QString': return True return False
python
def _is_qstring(message): """Check if its a QString without adding any dep to PyQt5.""" my_class = str(message.__class__) my_class_name = my_class.replace('<class \'', '').replace('\'>', '') if my_class_name == 'PyQt5.QtCore.QString': return True return False
['def', '_is_qstring', '(', 'message', ')', ':', 'my_class', '=', 'str', '(', 'message', '.', '__class__', ')', 'my_class_name', '=', 'my_class', '.', 'replace', '(', "'<class \\''", ',', "''", ')', '.', 'replace', '(', "'\\'>'", ',', "''", ')', 'if', 'my_class_name', '==', "'PyQt5.QtCore.QString'", ':', 'return', 'True', 'return', 'False']
Check if its a QString without adding any dep to PyQt5.
['Check', 'if', 'its', 'a', 'QString', 'without', 'adding', 'any', 'dep', 'to', 'PyQt5', '.']
train
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/messaging/item/message_element.py#L57-L64
5,709
SmartTeleMax/iktomi
iktomi/unstable/db/sqla/replication.py
replicate_filter
def replicate_filter(sources, model, cache=None): '''Replicates the list of objects to other class and returns their reflections''' targets = [replicate_no_merge(source, model, cache=cache) for source in sources] # Some objects may not be available in target DB (not published), so we # have to exclude None from the list. return [target for target in targets if target is not None]
python
def replicate_filter(sources, model, cache=None): '''Replicates the list of objects to other class and returns their reflections''' targets = [replicate_no_merge(source, model, cache=cache) for source in sources] # Some objects may not be available in target DB (not published), so we # have to exclude None from the list. return [target for target in targets if target is not None]
['def', 'replicate_filter', '(', 'sources', ',', 'model', ',', 'cache', '=', 'None', ')', ':', 'targets', '=', '[', 'replicate_no_merge', '(', 'source', ',', 'model', ',', 'cache', '=', 'cache', ')', 'for', 'source', 'in', 'sources', ']', '# Some objects may not be available in target DB (not published), so we', '# have to exclude None from the list.', 'return', '[', 'target', 'for', 'target', 'in', 'targets', 'if', 'target', 'is', 'not', 'None', ']']
Replicates the list of objects to other class and returns their reflections
['Replicates', 'the', 'list', 'of', 'objects', 'to', 'other', 'class', 'and', 'returns', 'their', 'reflections']
train
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/unstable/db/sqla/replication.py#L217-L224
5,710
frejanordsiek/GeminiMotorDrive
GeminiMotorDrive/compilers/move_sequence.py
get_sequence_time
def get_sequence_time(cycles, unit_converter=None, eres=None): """ Calculates the time the move sequence will take to complete. Calculates the amount of time it will take to complete the given move sequence. Types of motion supported are moves from one position to another (the motion will always come to a stop before doing the next motion), waiting a given interval of time till starting the next move, and looping over a sequence of moves. Parameters ---------- cycles : list of dicts The ``list`` of cycles of motion to do one after another. See ``compile_sequence`` for format. unit_converter : UnitConverter, optional ``GeminiMotorDrive.utilities.UnitConverter`` to use to convert the units in `cycles` to motor units. ``None`` indicates that they are already in motor units. eres : int Encoder resolution. Only relevant if `unit_converter` is ``None``. Returns ------- time : float Time the move sequence will take in seconds. See Also -------- compile_sequence GeminiMotorDrive.utilities.UnitConverter move_time """ # If we are doing unit conversion, then that is equivalent to motor # units but with eres equal to one. if unit_converter is not None: eres = 1 # Starting with 0 time, steadily add the time of each movement. tme = 0.0 # Go through each cycle and collect times. for cycle in cycles: # Add all the wait times. tme += cycle['iterations']*sum(cycle['wait_times']) # Add the time for each individual move. for move in cycle['moves']: tme += cycle['iterations'] \ * move_time(move, eres=eres) # Done. return tme
python
def get_sequence_time(cycles, unit_converter=None, eres=None): """ Calculates the time the move sequence will take to complete. Calculates the amount of time it will take to complete the given move sequence. Types of motion supported are moves from one position to another (the motion will always come to a stop before doing the next motion), waiting a given interval of time till starting the next move, and looping over a sequence of moves. Parameters ---------- cycles : list of dicts The ``list`` of cycles of motion to do one after another. See ``compile_sequence`` for format. unit_converter : UnitConverter, optional ``GeminiMotorDrive.utilities.UnitConverter`` to use to convert the units in `cycles` to motor units. ``None`` indicates that they are already in motor units. eres : int Encoder resolution. Only relevant if `unit_converter` is ``None``. Returns ------- time : float Time the move sequence will take in seconds. See Also -------- compile_sequence GeminiMotorDrive.utilities.UnitConverter move_time """ # If we are doing unit conversion, then that is equivalent to motor # units but with eres equal to one. if unit_converter is not None: eres = 1 # Starting with 0 time, steadily add the time of each movement. tme = 0.0 # Go through each cycle and collect times. for cycle in cycles: # Add all the wait times. tme += cycle['iterations']*sum(cycle['wait_times']) # Add the time for each individual move. for move in cycle['moves']: tme += cycle['iterations'] \ * move_time(move, eres=eres) # Done. return tme
['def', 'get_sequence_time', '(', 'cycles', ',', 'unit_converter', '=', 'None', ',', 'eres', '=', 'None', ')', ':', '# If we are doing unit conversion, then that is equivalent to motor', '# units but with eres equal to one.', 'if', 'unit_converter', 'is', 'not', 'None', ':', 'eres', '=', '1', '# Starting with 0 time, steadily add the time of each movement.', 'tme', '=', '0.0', '# Go through each cycle and collect times.', 'for', 'cycle', 'in', 'cycles', ':', '# Add all the wait times.', 'tme', '+=', 'cycle', '[', "'iterations'", ']', '*', 'sum', '(', 'cycle', '[', "'wait_times'", ']', ')', '# Add the time for each individual move.', 'for', 'move', 'in', 'cycle', '[', "'moves'", ']', ':', 'tme', '+=', 'cycle', '[', "'iterations'", ']', '*', 'move_time', '(', 'move', ',', 'eres', '=', 'eres', ')', '# Done.', 'return', 'tme']
Calculates the time the move sequence will take to complete. Calculates the amount of time it will take to complete the given move sequence. Types of motion supported are moves from one position to another (the motion will always come to a stop before doing the next motion), waiting a given interval of time till starting the next move, and looping over a sequence of moves. Parameters ---------- cycles : list of dicts The ``list`` of cycles of motion to do one after another. See ``compile_sequence`` for format. unit_converter : UnitConverter, optional ``GeminiMotorDrive.utilities.UnitConverter`` to use to convert the units in `cycles` to motor units. ``None`` indicates that they are already in motor units. eres : int Encoder resolution. Only relevant if `unit_converter` is ``None``. Returns ------- time : float Time the move sequence will take in seconds. See Also -------- compile_sequence GeminiMotorDrive.utilities.UnitConverter move_time
['Calculates', 'the', 'time', 'the', 'move', 'sequence', 'will', 'take', 'to', 'complete', '.']
train
https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/compilers/move_sequence.py#L268-L318
5,711
bsmurphy/PyKrige
pykrige/core.py
_variogram_residuals
def _variogram_residuals(params, x, y, variogram_function, weight): """Function used in variogram model estimation. Returns residuals between calculated variogram and actual data (lags/semivariance). Called by _calculate_variogram_model. Parameters ---------- params: list or 1D array parameters for calculating the model variogram x: ndarray lags (distances) at which to evaluate the model variogram y: ndarray experimental semivariances at the specified lags variogram_function: callable the actual funtion that evaluates the model variogram weight: bool flag for implementing the crude weighting routine, used in order to fit smaller lags better Returns ------- resid: 1d array residuals, dimension same as y """ # this crude weighting routine can be used to better fit the model # variogram to the experimental variogram at smaller lags... # the weights are calculated from a logistic function, so weights at small # lags are ~1 and weights at the longest lags are ~0; # the center of the logistic weighting is hard-coded to be at 70% of the # distance from the shortest lag to the largest lag if weight: drange = np.amax(x) - np.amin(x) k = 2.1972 / (0.1 * drange) x0 = 0.7 * drange + np.amin(x) weights = 1. / (1. + np.exp(-k * (x0 - x))) weights /= np.sum(weights) resid = (variogram_function(params, x) - y) * weights else: resid = variogram_function(params, x) - y return resid
python
def _variogram_residuals(params, x, y, variogram_function, weight): """Function used in variogram model estimation. Returns residuals between calculated variogram and actual data (lags/semivariance). Called by _calculate_variogram_model. Parameters ---------- params: list or 1D array parameters for calculating the model variogram x: ndarray lags (distances) at which to evaluate the model variogram y: ndarray experimental semivariances at the specified lags variogram_function: callable the actual funtion that evaluates the model variogram weight: bool flag for implementing the crude weighting routine, used in order to fit smaller lags better Returns ------- resid: 1d array residuals, dimension same as y """ # this crude weighting routine can be used to better fit the model # variogram to the experimental variogram at smaller lags... # the weights are calculated from a logistic function, so weights at small # lags are ~1 and weights at the longest lags are ~0; # the center of the logistic weighting is hard-coded to be at 70% of the # distance from the shortest lag to the largest lag if weight: drange = np.amax(x) - np.amin(x) k = 2.1972 / (0.1 * drange) x0 = 0.7 * drange + np.amin(x) weights = 1. / (1. + np.exp(-k * (x0 - x))) weights /= np.sum(weights) resid = (variogram_function(params, x) - y) * weights else: resid = variogram_function(params, x) - y return resid
['def', '_variogram_residuals', '(', 'params', ',', 'x', ',', 'y', ',', 'variogram_function', ',', 'weight', ')', ':', '# this crude weighting routine can be used to better fit the model', '# variogram to the experimental variogram at smaller lags...', '# the weights are calculated from a logistic function, so weights at small', '# lags are ~1 and weights at the longest lags are ~0;', '# the center of the logistic weighting is hard-coded to be at 70% of the', '# distance from the shortest lag to the largest lag', 'if', 'weight', ':', 'drange', '=', 'np', '.', 'amax', '(', 'x', ')', '-', 'np', '.', 'amin', '(', 'x', ')', 'k', '=', '2.1972', '/', '(', '0.1', '*', 'drange', ')', 'x0', '=', '0.7', '*', 'drange', '+', 'np', '.', 'amin', '(', 'x', ')', 'weights', '=', '1.', '/', '(', '1.', '+', 'np', '.', 'exp', '(', '-', 'k', '*', '(', 'x0', '-', 'x', ')', ')', ')', 'weights', '/=', 'np', '.', 'sum', '(', 'weights', ')', 'resid', '=', '(', 'variogram_function', '(', 'params', ',', 'x', ')', '-', 'y', ')', '*', 'weights', 'else', ':', 'resid', '=', 'variogram_function', '(', 'params', ',', 'x', ')', '-', 'y', 'return', 'resid']
Function used in variogram model estimation. Returns residuals between calculated variogram and actual data (lags/semivariance). Called by _calculate_variogram_model. Parameters ---------- params: list or 1D array parameters for calculating the model variogram x: ndarray lags (distances) at which to evaluate the model variogram y: ndarray experimental semivariances at the specified lags variogram_function: callable the actual funtion that evaluates the model variogram weight: bool flag for implementing the crude weighting routine, used in order to fit smaller lags better Returns ------- resid: 1d array residuals, dimension same as y
['Function', 'used', 'in', 'variogram', 'model', 'estimation', '.', 'Returns', 'residuals', 'between', 'calculated', 'variogram', 'and', 'actual', 'data', '(', 'lags', '/', 'semivariance', ')', '.', 'Called', 'by', '_calculate_variogram_model', '.']
train
https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/pykrige/core.py#L487-L528
5,712
ldomic/lintools
lintools/analysis/pistacking.py
PiStacking.make_table
def make_table(self): """Make numpy array from timeseries data.""" num_records = int(np.sum([1 for frame in self.timeseries])) dtype = [ ("frame",float),("time",float),("proteinring",list), ("ligand_ring_ids",list),("distance",float),("angle",float), ("offset",float),("type","|U4"),("resid",int),("resname","|U4"),("segid","|U8") ] out = np.empty((num_records,),dtype=dtype) cursor=0 for contact in self.timeseries: out[cursor] = (contact.frame, contact.time,contact.proteinring,contact.ligandring,contact.distance,contact.angle,contact.offset,contact.type,contact.resid,contact.resname,contact.segid) cursor+=1 return out.view(np.recarray)
python
def make_table(self): """Make numpy array from timeseries data.""" num_records = int(np.sum([1 for frame in self.timeseries])) dtype = [ ("frame",float),("time",float),("proteinring",list), ("ligand_ring_ids",list),("distance",float),("angle",float), ("offset",float),("type","|U4"),("resid",int),("resname","|U4"),("segid","|U8") ] out = np.empty((num_records,),dtype=dtype) cursor=0 for contact in self.timeseries: out[cursor] = (contact.frame, contact.time,contact.proteinring,contact.ligandring,contact.distance,contact.angle,contact.offset,contact.type,contact.resid,contact.resname,contact.segid) cursor+=1 return out.view(np.recarray)
['def', 'make_table', '(', 'self', ')', ':', 'num_records', '=', 'int', '(', 'np', '.', 'sum', '(', '[', '1', 'for', 'frame', 'in', 'self', '.', 'timeseries', ']', ')', ')', 'dtype', '=', '[', '(', '"frame"', ',', 'float', ')', ',', '(', '"time"', ',', 'float', ')', ',', '(', '"proteinring"', ',', 'list', ')', ',', '(', '"ligand_ring_ids"', ',', 'list', ')', ',', '(', '"distance"', ',', 'float', ')', ',', '(', '"angle"', ',', 'float', ')', ',', '(', '"offset"', ',', 'float', ')', ',', '(', '"type"', ',', '"|U4"', ')', ',', '(', '"resid"', ',', 'int', ')', ',', '(', '"resname"', ',', '"|U4"', ')', ',', '(', '"segid"', ',', '"|U8"', ')', ']', 'out', '=', 'np', '.', 'empty', '(', '(', 'num_records', ',', ')', ',', 'dtype', '=', 'dtype', ')', 'cursor', '=', '0', 'for', 'contact', 'in', 'self', '.', 'timeseries', ':', 'out', '[', 'cursor', ']', '=', '(', 'contact', '.', 'frame', ',', 'contact', '.', 'time', ',', 'contact', '.', 'proteinring', ',', 'contact', '.', 'ligandring', ',', 'contact', '.', 'distance', ',', 'contact', '.', 'angle', ',', 'contact', '.', 'offset', ',', 'contact', '.', 'type', ',', 'contact', '.', 'resid', ',', 'contact', '.', 'resname', ',', 'contact', '.', 'segid', ')', 'cursor', '+=', '1', 'return', 'out', '.', 'view', '(', 'np', '.', 'recarray', ')']
Make numpy array from timeseries data.
['Make', 'numpy', 'array', 'from', 'timeseries', 'data', '.']
train
https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/analysis/pistacking.py#L186-L198
5,713
ladybug-tools/ladybug
ladybug/location.py
Location.ep_style_location_string
def ep_style_location_string(self): """Return EnergyPlus's location string.""" return "Site:Location,\n " + \ self.city + ',\n ' + \ str(self.latitude) + ', !Latitude\n ' + \ str(self.longitude) + ', !Longitude\n ' + \ str(self.time_zone) + ', !Time Zone\n ' + \ str(self.elevation) + '; !Elevation'
python
def ep_style_location_string(self): """Return EnergyPlus's location string.""" return "Site:Location,\n " + \ self.city + ',\n ' + \ str(self.latitude) + ', !Latitude\n ' + \ str(self.longitude) + ', !Longitude\n ' + \ str(self.time_zone) + ', !Time Zone\n ' + \ str(self.elevation) + '; !Elevation'
['def', 'ep_style_location_string', '(', 'self', ')', ':', 'return', '"Site:Location,\\n "', '+', 'self', '.', 'city', '+', "',\\n '", '+', 'str', '(', 'self', '.', 'latitude', ')', '+', "', !Latitude\\n '", '+', 'str', '(', 'self', '.', 'longitude', ')', '+', "', !Longitude\\n '", '+', 'str', '(', 'self', '.', 'time_zone', ')', '+', "', !Time Zone\\n '", '+', 'str', '(', 'self', '.', 'elevation', ')', '+', "'; !Elevation'"]
Return EnergyPlus's location string.
['Return', 'EnergyPlus', 's', 'location', 'string', '.']
train
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/location.py#L165-L172
5,714
pyvisa/pyvisa
pyvisa/ctwrapper/functions.py
in_8
def in_8(library, session, space, offset, extended=False): """Reads in an 8-bit value from the specified memory space and offset. Corresponds to viIn8* function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param space: Specifies the address space. (Constants.*SPACE*) :param offset: Offset (in bytes) of the address or register from which to read. :param extended: Use 64 bits offset independent of the platform. :return: Data read from memory, return value of the library call. :rtype: int, :class:`pyvisa.constants.StatusCode` """ value_8 = ViUInt8() if extended: ret = library.viIn8Ex(session, space, offset, byref(value_8)) else: ret = library.viIn8(session, space, offset, byref(value_8)) return value_8.value, ret
python
def in_8(library, session, space, offset, extended=False): """Reads in an 8-bit value from the specified memory space and offset. Corresponds to viIn8* function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param space: Specifies the address space. (Constants.*SPACE*) :param offset: Offset (in bytes) of the address or register from which to read. :param extended: Use 64 bits offset independent of the platform. :return: Data read from memory, return value of the library call. :rtype: int, :class:`pyvisa.constants.StatusCode` """ value_8 = ViUInt8() if extended: ret = library.viIn8Ex(session, space, offset, byref(value_8)) else: ret = library.viIn8(session, space, offset, byref(value_8)) return value_8.value, ret
['def', 'in_8', '(', 'library', ',', 'session', ',', 'space', ',', 'offset', ',', 'extended', '=', 'False', ')', ':', 'value_8', '=', 'ViUInt8', '(', ')', 'if', 'extended', ':', 'ret', '=', 'library', '.', 'viIn8Ex', '(', 'session', ',', 'space', ',', 'offset', ',', 'byref', '(', 'value_8', ')', ')', 'else', ':', 'ret', '=', 'library', '.', 'viIn8', '(', 'session', ',', 'space', ',', 'offset', ',', 'byref', '(', 'value_8', ')', ')', 'return', 'value_8', '.', 'value', ',', 'ret']
Reads in an 8-bit value from the specified memory space and offset. Corresponds to viIn8* function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param space: Specifies the address space. (Constants.*SPACE*) :param offset: Offset (in bytes) of the address or register from which to read. :param extended: Use 64 bits offset independent of the platform. :return: Data read from memory, return value of the library call. :rtype: int, :class:`pyvisa.constants.StatusCode`
['Reads', 'in', 'an', '8', '-', 'bit', 'value', 'from', 'the', 'specified', 'memory', 'space', 'and', 'offset', '.']
train
https://github.com/pyvisa/pyvisa/blob/b8b2d4371e1f00782856aa9176ff1ced6bcb3798/pyvisa/ctwrapper/functions.py#L640-L658
5,715
juju/charm-helpers
charmhelpers/contrib/network/ufw.py
service
def service(name, action): """ Open/close access to a service :param name: could be a service name defined in `/etc/services` or a port number. :param action: `open` or `close` """ if action == 'open': subprocess.check_output(['ufw', 'allow', str(name)], universal_newlines=True) elif action == 'close': subprocess.check_output(['ufw', 'delete', 'allow', str(name)], universal_newlines=True) else: raise UFWError(("'{}' not supported, use 'allow' " "or 'delete'").format(action))
python
def service(name, action): """ Open/close access to a service :param name: could be a service name defined in `/etc/services` or a port number. :param action: `open` or `close` """ if action == 'open': subprocess.check_output(['ufw', 'allow', str(name)], universal_newlines=True) elif action == 'close': subprocess.check_output(['ufw', 'delete', 'allow', str(name)], universal_newlines=True) else: raise UFWError(("'{}' not supported, use 'allow' " "or 'delete'").format(action))
['def', 'service', '(', 'name', ',', 'action', ')', ':', 'if', 'action', '==', "'open'", ':', 'subprocess', '.', 'check_output', '(', '[', "'ufw'", ',', "'allow'", ',', 'str', '(', 'name', ')', ']', ',', 'universal_newlines', '=', 'True', ')', 'elif', 'action', '==', "'close'", ':', 'subprocess', '.', 'check_output', '(', '[', "'ufw'", ',', "'delete'", ',', "'allow'", ',', 'str', '(', 'name', ')', ']', ',', 'universal_newlines', '=', 'True', ')', 'else', ':', 'raise', 'UFWError', '(', '(', '"\'{}\' not supported, use \'allow\' "', '"or \'delete\'"', ')', '.', 'format', '(', 'action', ')', ')']
Open/close access to a service :param name: could be a service name defined in `/etc/services` or a port number. :param action: `open` or `close`
['Open', '/', 'close', 'access', 'to', 'a', 'service']
train
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/network/ufw.py#L323-L339
5,716
panosl/django-currencies
currencies/management/commands/_yahoofinance.py
CurrencyHandler.get_rate
def get_rate(self, code): """ Helper function to access the rates structure Returns a dict containing name, price, symbol (the code), timestamp, type, utctime & volume """ rateslist = self.rates['list']['resources'] for rate in rateslist: rateobj = rate['resource']['fields'] if rateobj['symbol'].startswith(code): return rateobj raise RuntimeError("%s: %s not found" % (self.name, code))
python
def get_rate(self, code): """ Helper function to access the rates structure Returns a dict containing name, price, symbol (the code), timestamp, type, utctime & volume """ rateslist = self.rates['list']['resources'] for rate in rateslist: rateobj = rate['resource']['fields'] if rateobj['symbol'].startswith(code): return rateobj raise RuntimeError("%s: %s not found" % (self.name, code))
['def', 'get_rate', '(', 'self', ',', 'code', ')', ':', 'rateslist', '=', 'self', '.', 'rates', '[', "'list'", ']', '[', "'resources'", ']', 'for', 'rate', 'in', 'rateslist', ':', 'rateobj', '=', 'rate', '[', "'resource'", ']', '[', "'fields'", ']', 'if', 'rateobj', '[', "'symbol'", ']', '.', 'startswith', '(', 'code', ')', ':', 'return', 'rateobj', 'raise', 'RuntimeError', '(', '"%s: %s not found"', '%', '(', 'self', '.', 'name', ',', 'code', ')', ')']
Helper function to access the rates structure Returns a dict containing name, price, symbol (the code), timestamp, type, utctime & volume
['Helper', 'function', 'to', 'access', 'the', 'rates', 'structure', 'Returns', 'a', 'dict', 'containing', 'name', 'price', 'symbol', '(', 'the', 'code', ')', 'timestamp', 'type', 'utctime', '&', 'volume']
train
https://github.com/panosl/django-currencies/blob/8d4c6c202ad7c4cc06263ab2c1b1f969bbe99acd/currencies/management/commands/_yahoofinance.py#L173-L183
5,717
stephenmcd/django-socketio
django_socketio/channels.py
SocketIOChannelProxy.unsubscribe
def unsubscribe(self, channel): """ Remove the channel from this socket's channels, and from the list of subscribed session IDs for the channel. Return False if not subscribed, otherwise True. """ try: CHANNELS[channel].remove(self.socket.session.session_id) self.channels.remove(channel) except ValueError: return False return True
python
def unsubscribe(self, channel): """ Remove the channel from this socket's channels, and from the list of subscribed session IDs for the channel. Return False if not subscribed, otherwise True. """ try: CHANNELS[channel].remove(self.socket.session.session_id) self.channels.remove(channel) except ValueError: return False return True
['def', 'unsubscribe', '(', 'self', ',', 'channel', ')', ':', 'try', ':', 'CHANNELS', '[', 'channel', ']', '.', 'remove', '(', 'self', '.', 'socket', '.', 'session', '.', 'session_id', ')', 'self', '.', 'channels', '.', 'remove', '(', 'channel', ')', 'except', 'ValueError', ':', 'return', 'False', 'return', 'True']
Remove the channel from this socket's channels, and from the list of subscribed session IDs for the channel. Return False if not subscribed, otherwise True.
['Remove', 'the', 'channel', 'from', 'this', 'socket', 's', 'channels', 'and', 'from', 'the', 'list', 'of', 'subscribed', 'session', 'IDs', 'for', 'the', 'channel', '.', 'Return', 'False', 'if', 'not', 'subscribed', 'otherwise', 'True', '.']
train
https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/channels.py#L34-L45
5,718
scanny/python-pptx
pptx/shapes/connector.py
Connector._move_begin_to_cxn
def _move_begin_to_cxn(self, shape, cxn_pt_idx): """ Move the begin point of this connector to coordinates of the connection point of *shape* specified by *cxn_pt_idx*. """ x, y, cx, cy = shape.left, shape.top, shape.width, shape.height self.begin_x, self.begin_y = { 0: (int(x + cx/2), y), 1: (x, int(y + cy/2)), 2: (int(x + cx/2), y + cy), 3: (x + cx, int(y + cy/2)), }[cxn_pt_idx]
python
def _move_begin_to_cxn(self, shape, cxn_pt_idx): """ Move the begin point of this connector to coordinates of the connection point of *shape* specified by *cxn_pt_idx*. """ x, y, cx, cy = shape.left, shape.top, shape.width, shape.height self.begin_x, self.begin_y = { 0: (int(x + cx/2), y), 1: (x, int(y + cy/2)), 2: (int(x + cx/2), y + cy), 3: (x + cx, int(y + cy/2)), }[cxn_pt_idx]
['def', '_move_begin_to_cxn', '(', 'self', ',', 'shape', ',', 'cxn_pt_idx', ')', ':', 'x', ',', 'y', ',', 'cx', ',', 'cy', '=', 'shape', '.', 'left', ',', 'shape', '.', 'top', ',', 'shape', '.', 'width', ',', 'shape', '.', 'height', 'self', '.', 'begin_x', ',', 'self', '.', 'begin_y', '=', '{', '0', ':', '(', 'int', '(', 'x', '+', 'cx', '/', '2', ')', ',', 'y', ')', ',', '1', ':', '(', 'x', ',', 'int', '(', 'y', '+', 'cy', '/', '2', ')', ')', ',', '2', ':', '(', 'int', '(', 'x', '+', 'cx', '/', '2', ')', ',', 'y', '+', 'cy', ')', ',', '3', ':', '(', 'x', '+', 'cx', ',', 'int', '(', 'y', '+', 'cy', '/', '2', ')', ')', ',', '}', '[', 'cxn_pt_idx', ']']
Move the begin point of this connector to coordinates of the connection point of *shape* specified by *cxn_pt_idx*.
['Move', 'the', 'begin', 'point', 'of', 'this', 'connector', 'to', 'coordinates', 'of', 'the', 'connection', 'point', 'of', '*', 'shape', '*', 'specified', 'by', '*', 'cxn_pt_idx', '*', '.']
train
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/shapes/connector.py#L267-L278
5,719
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
_table_arg_to_table_ref
def _table_arg_to_table_ref(value, default_project=None): """Helper to convert a string or Table to TableReference. This function keeps TableReference and other kinds of objects unchanged. """ if isinstance(value, six.string_types): value = TableReference.from_string(value, default_project=default_project) if isinstance(value, (Table, TableListItem)): value = value.reference return value
python
def _table_arg_to_table_ref(value, default_project=None): """Helper to convert a string or Table to TableReference. This function keeps TableReference and other kinds of objects unchanged. """ if isinstance(value, six.string_types): value = TableReference.from_string(value, default_project=default_project) if isinstance(value, (Table, TableListItem)): value = value.reference return value
['def', '_table_arg_to_table_ref', '(', 'value', ',', 'default_project', '=', 'None', ')', ':', 'if', 'isinstance', '(', 'value', ',', 'six', '.', 'string_types', ')', ':', 'value', '=', 'TableReference', '.', 'from_string', '(', 'value', ',', 'default_project', '=', 'default_project', ')', 'if', 'isinstance', '(', 'value', ',', '(', 'Table', ',', 'TableListItem', ')', ')', ':', 'value', '=', 'value', '.', 'reference', 'return', 'value']
Helper to convert a string or Table to TableReference. This function keeps TableReference and other kinds of objects unchanged.
['Helper', 'to', 'convert', 'a', 'string', 'or', 'Table', 'to', 'TableReference', '.']
train
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L1928-L1937
5,720
last-partizan/pytils
pytils/numeral.py
in_words_float
def in_words_float(amount, _gender=FEMALE): """ Float in words @param amount: float numeral @type amount: C{float} or C{Decimal} @return: in-words reprsentation of float numeral @rtype: C{unicode} @raise ValueError: when ammount is negative """ check_positive(amount) pts = [] # преобразуем целую часть pts.append(sum_string(int(amount), 2, (u"целая", u"целых", u"целых"))) # теперь то, что после запятой remainder = _get_float_remainder(amount) signs = len(str(remainder)) - 1 pts.append(sum_string(int(remainder), 2, FRACTIONS[signs])) return u" ".join(pts)
python
def in_words_float(amount, _gender=FEMALE): """ Float in words @param amount: float numeral @type amount: C{float} or C{Decimal} @return: in-words reprsentation of float numeral @rtype: C{unicode} @raise ValueError: when ammount is negative """ check_positive(amount) pts = [] # преобразуем целую часть pts.append(sum_string(int(amount), 2, (u"целая", u"целых", u"целых"))) # теперь то, что после запятой remainder = _get_float_remainder(amount) signs = len(str(remainder)) - 1 pts.append(sum_string(int(remainder), 2, FRACTIONS[signs])) return u" ".join(pts)
['def', 'in_words_float', '(', 'amount', ',', '_gender', '=', 'FEMALE', ')', ':', 'check_positive', '(', 'amount', ')', 'pts', '=', '[', ']', '# преобразуем целую часть', 'pts', '.', 'append', '(', 'sum_string', '(', 'int', '(', 'amount', ')', ',', '2', ',', '(', 'u"целая", u"ц', 'е', 'ых", u"целых"', ')', ')', '', '', '', '# теперь то, что после запятой', 'remainder', '=', '_get_float_remainder', '(', 'amount', ')', 'signs', '=', 'len', '(', 'str', '(', 'remainder', ')', ')', '-', '1', 'pts', '.', 'append', '(', 'sum_string', '(', 'int', '(', 'remainder', ')', ',', '2', ',', 'FRACTIONS', '[', 'signs', ']', ')', ')', 'return', 'u" "', '.', 'join', '(', 'pts', ')']
Float in words @param amount: float numeral @type amount: C{float} or C{Decimal} @return: in-words reprsentation of float numeral @rtype: C{unicode} @raise ValueError: when ammount is negative
['Float', 'in', 'words']
train
https://github.com/last-partizan/pytils/blob/1c570a32b15e564bc68587b8207e32d464e61d08/pytils/numeral.py#L263-L286
5,721
kylef/refract.py
refract/elements/array.py
Array.index
def index(self, element: Element) -> int: """ Return the index in the array of the first item whose value is element. It is an error if there is no such item. >>> element = String('hello') >>> array = Array(content=[element]) >>> array.index(element) 0 """ from refract.refraction import refract return self.content.index(refract(element))
python
def index(self, element: Element) -> int: """ Return the index in the array of the first item whose value is element. It is an error if there is no such item. >>> element = String('hello') >>> array = Array(content=[element]) >>> array.index(element) 0 """ from refract.refraction import refract return self.content.index(refract(element))
['def', 'index', '(', 'self', ',', 'element', ':', 'Element', ')', '->', 'int', ':', 'from', 'refract', '.', 'refraction', 'import', 'refract', 'return', 'self', '.', 'content', '.', 'index', '(', 'refract', '(', 'element', ')', ')']
Return the index in the array of the first item whose value is element. It is an error if there is no such item. >>> element = String('hello') >>> array = Array(content=[element]) >>> array.index(element) 0
['Return', 'the', 'index', 'in', 'the', 'array', 'of', 'the', 'first', 'item', 'whose', 'value', 'is', 'element', '.', 'It', 'is', 'an', 'error', 'if', 'there', 'is', 'no', 'such', 'item', '.']
train
https://github.com/kylef/refract.py/blob/f58ddf619038b580ab50c2e7f867d59d153eabbb/refract/elements/array.py#L82-L94
5,722
googlefonts/ufo2ft
Lib/ufo2ft/filters/flattenComponents.py
_flattenComponent
def _flattenComponent(glyphSet, component): """Returns a list of tuples (baseGlyph, transform) of nested component.""" glyph = glyphSet[component.baseGlyph] if not glyph.components: transformation = Transform(*component.transformation) return [(component.baseGlyph, transformation)] all_flattened_components = [] for nested in glyph.components: flattened_components = _flattenComponent(glyphSet, nested) for i, (_, tr) in enumerate(flattened_components): tr = tr.transform(component.transformation) flattened_components[i] = (flattened_components[i][0], tr) all_flattened_components.extend(flattened_components) return all_flattened_components
python
def _flattenComponent(glyphSet, component): """Returns a list of tuples (baseGlyph, transform) of nested component.""" glyph = glyphSet[component.baseGlyph] if not glyph.components: transformation = Transform(*component.transformation) return [(component.baseGlyph, transformation)] all_flattened_components = [] for nested in glyph.components: flattened_components = _flattenComponent(glyphSet, nested) for i, (_, tr) in enumerate(flattened_components): tr = tr.transform(component.transformation) flattened_components[i] = (flattened_components[i][0], tr) all_flattened_components.extend(flattened_components) return all_flattened_components
['def', '_flattenComponent', '(', 'glyphSet', ',', 'component', ')', ':', 'glyph', '=', 'glyphSet', '[', 'component', '.', 'baseGlyph', ']', 'if', 'not', 'glyph', '.', 'components', ':', 'transformation', '=', 'Transform', '(', '*', 'component', '.', 'transformation', ')', 'return', '[', '(', 'component', '.', 'baseGlyph', ',', 'transformation', ')', ']', 'all_flattened_components', '=', '[', ']', 'for', 'nested', 'in', 'glyph', '.', 'components', ':', 'flattened_components', '=', '_flattenComponent', '(', 'glyphSet', ',', 'nested', ')', 'for', 'i', ',', '(', '_', ',', 'tr', ')', 'in', 'enumerate', '(', 'flattened_components', ')', ':', 'tr', '=', 'tr', '.', 'transform', '(', 'component', '.', 'transformation', ')', 'flattened_components', '[', 'i', ']', '=', '(', 'flattened_components', '[', 'i', ']', '[', '0', ']', ',', 'tr', ')', 'all_flattened_components', '.', 'extend', '(', 'flattened_components', ')', 'return', 'all_flattened_components']
Returns a list of tuples (baseGlyph, transform) of nested component.
['Returns', 'a', 'list', 'of', 'tuples', '(', 'baseGlyph', 'transform', ')', 'of', 'nested', 'component', '.']
train
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/filters/flattenComponents.py#L37-L52
5,723
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAData/base_datastruct.py
_quotation_base.get_bar
def get_bar(self, code, time): """ 获取一个bar的数据 返回一个series 如果不存在,raise ValueError """ try: return self.data.loc[(pd.Timestamp(time), code)] except: raise ValueError( 'DATASTRUCT CURRENTLY CANNOT FIND THIS BAR WITH {} {}'.format( code, time ) )
python
def get_bar(self, code, time): """ 获取一个bar的数据 返回一个series 如果不存在,raise ValueError """ try: return self.data.loc[(pd.Timestamp(time), code)] except: raise ValueError( 'DATASTRUCT CURRENTLY CANNOT FIND THIS BAR WITH {} {}'.format( code, time ) )
['def', 'get_bar', '(', 'self', ',', 'code', ',', 'time', ')', ':', 'try', ':', 'return', 'self', '.', 'data', '.', 'loc', '[', '(', 'pd', '.', 'Timestamp', '(', 'time', ')', ',', 'code', ')', ']', 'except', ':', 'raise', 'ValueError', '(', "'DATASTRUCT CURRENTLY CANNOT FIND THIS BAR WITH {} {}'", '.', 'format', '(', 'code', ',', 'time', ')', ')']
获取一个bar的数据 返回一个series 如果不存在,raise ValueError
['获取一个bar的数据', '返回一个series', '如果不存在', 'raise', 'ValueError']
train
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAData/base_datastruct.py#L1264-L1278
5,724
mozilla/configman
configman/value_sources/for_configobj.py
ConfigObjWithIncludes._expand_files
def _expand_files(self, file_name, original_path, indent=""): """This recursive function accepts a file name, opens the file and then spools the contents of the file into a list, examining each line as it does so. If it detects a line beginning with "+include", it assumes the string immediately following is a file name. Recursing, the file new file is openned and its contents are spooled into the accumulating list.""" expanded_file_contents = [] with open(file_name) as f: for a_line in f: match = ConfigObjWithIncludes._include_re.match(a_line) if match: include_file = match.group(2) include_file = os.path.join( original_path, include_file ) new_lines = self._expand_files( include_file, os.path.dirname(include_file), indent + match.group(1) ) expanded_file_contents.extend(new_lines) else: expanded_file_contents.append(indent + a_line.rstrip()) return expanded_file_contents
python
def _expand_files(self, file_name, original_path, indent=""): """This recursive function accepts a file name, opens the file and then spools the contents of the file into a list, examining each line as it does so. If it detects a line beginning with "+include", it assumes the string immediately following is a file name. Recursing, the file new file is openned and its contents are spooled into the accumulating list.""" expanded_file_contents = [] with open(file_name) as f: for a_line in f: match = ConfigObjWithIncludes._include_re.match(a_line) if match: include_file = match.group(2) include_file = os.path.join( original_path, include_file ) new_lines = self._expand_files( include_file, os.path.dirname(include_file), indent + match.group(1) ) expanded_file_contents.extend(new_lines) else: expanded_file_contents.append(indent + a_line.rstrip()) return expanded_file_contents
['def', '_expand_files', '(', 'self', ',', 'file_name', ',', 'original_path', ',', 'indent', '=', '""', ')', ':', 'expanded_file_contents', '=', '[', ']', 'with', 'open', '(', 'file_name', ')', 'as', 'f', ':', 'for', 'a_line', 'in', 'f', ':', 'match', '=', 'ConfigObjWithIncludes', '.', '_include_re', '.', 'match', '(', 'a_line', ')', 'if', 'match', ':', 'include_file', '=', 'match', '.', 'group', '(', '2', ')', 'include_file', '=', 'os', '.', 'path', '.', 'join', '(', 'original_path', ',', 'include_file', ')', 'new_lines', '=', 'self', '.', '_expand_files', '(', 'include_file', ',', 'os', '.', 'path', '.', 'dirname', '(', 'include_file', ')', ',', 'indent', '+', 'match', '.', 'group', '(', '1', ')', ')', 'expanded_file_contents', '.', 'extend', '(', 'new_lines', ')', 'else', ':', 'expanded_file_contents', '.', 'append', '(', 'indent', '+', 'a_line', '.', 'rstrip', '(', ')', ')', 'return', 'expanded_file_contents']
This recursive function accepts a file name, opens the file and then spools the contents of the file into a list, examining each line as it does so. If it detects a line beginning with "+include", it assumes the string immediately following is a file name. Recursing, the file new file is openned and its contents are spooled into the accumulating list.
['This', 'recursive', 'function', 'accepts', 'a', 'file', 'name', 'opens', 'the', 'file', 'and', 'then', 'spools', 'the', 'contents', 'of', 'the', 'file', 'into', 'a', 'list', 'examining', 'each', 'line', 'as', 'it', 'does', 'so', '.', 'If', 'it', 'detects', 'a', 'line', 'beginning', 'with', '+', 'include', 'it', 'assumes', 'the', 'string', 'immediately', 'following', 'is', 'a', 'file', 'name', '.', 'Recursing', 'the', 'file', 'new', 'file', 'is', 'openned', 'and', 'its', 'contents', 'are', 'spooled', 'into', 'the', 'accumulating', 'list', '.']
train
https://github.com/mozilla/configman/blob/83159fed61cc4cbbe5a4a6a00d3acad8a0c39c96/configman/value_sources/for_configobj.py#L73-L98
5,725
datosgobar/pydatajson
pydatajson/documentation.py
distribution_to_markdown
def distribution_to_markdown(distribution): """Genera texto en markdown a partir de los metadatos de una `distribution`. Args: distribution (dict): Diccionario con metadatos de una `distribution`. Returns: str: Texto que describe una `distribution`. """ text_template = """ ### {title} {description} #### Campos del recurso {fields} """ if "field" in distribution: fields = "- " + \ "\n- ".join(map(field_to_markdown, distribution["field"])) else: fields = "" text = text_template.format( title=distribution["title"], description=distribution.get("description", ""), fields=fields ) return text
python
def distribution_to_markdown(distribution): """Genera texto en markdown a partir de los metadatos de una `distribution`. Args: distribution (dict): Diccionario con metadatos de una `distribution`. Returns: str: Texto que describe una `distribution`. """ text_template = """ ### {title} {description} #### Campos del recurso {fields} """ if "field" in distribution: fields = "- " + \ "\n- ".join(map(field_to_markdown, distribution["field"])) else: fields = "" text = text_template.format( title=distribution["title"], description=distribution.get("description", ""), fields=fields ) return text
['def', 'distribution_to_markdown', '(', 'distribution', ')', ':', 'text_template', '=', '"""\n### {title}\n\n{description}\n\n#### Campos del recurso\n\n{fields}\n"""', 'if', '"field"', 'in', 'distribution', ':', 'fields', '=', '"- "', '+', '"\\n- "', '.', 'join', '(', 'map', '(', 'field_to_markdown', ',', 'distribution', '[', '"field"', ']', ')', ')', 'else', ':', 'fields', '=', '""', 'text', '=', 'text_template', '.', 'format', '(', 'title', '=', 'distribution', '[', '"title"', ']', ',', 'description', '=', 'distribution', '.', 'get', '(', '"description"', ',', '""', ')', ',', 'fields', '=', 'fields', ')', 'return', 'text']
Genera texto en markdown a partir de los metadatos de una `distribution`. Args: distribution (dict): Diccionario con metadatos de una `distribution`. Returns: str: Texto que describe una `distribution`.
['Genera', 'texto', 'en', 'markdown', 'a', 'partir', 'de', 'los', 'metadatos', 'de', 'una', 'distribution', '.']
train
https://github.com/datosgobar/pydatajson/blob/3141082ffbaa295e2deaf6ffbbc5a59f5859960e/pydatajson/documentation.py#L49-L82
5,726
yahoo/TensorFlowOnSpark
tensorflowonspark/TFSparkNode.py
TFNodeContext.export_saved_model
def export_saved_model(self, sess, export_dir, tag_set, signatures): """Convenience function to access ``TFNode.export_saved_model`` directly from this object instance.""" TFNode.export_saved_model(sess, export_dir, tag_set, signatures)
python
def export_saved_model(self, sess, export_dir, tag_set, signatures): """Convenience function to access ``TFNode.export_saved_model`` directly from this object instance.""" TFNode.export_saved_model(sess, export_dir, tag_set, signatures)
['def', 'export_saved_model', '(', 'self', ',', 'sess', ',', 'export_dir', ',', 'tag_set', ',', 'signatures', ')', ':', 'TFNode', '.', 'export_saved_model', '(', 'sess', ',', 'export_dir', ',', 'tag_set', ',', 'signatures', ')']
Convenience function to access ``TFNode.export_saved_model`` directly from this object instance.
['Convenience', 'function', 'to', 'access', 'TFNode', '.', 'export_saved_model', 'directly', 'from', 'this', 'object', 'instance', '.']
train
https://github.com/yahoo/TensorFlowOnSpark/blob/5e4b6c185ab722fd0104ede0377e1149ea8d6f7c/tensorflowonspark/TFSparkNode.py#L65-L67
5,727
swevm/scaleio-py
scaleiopy/api/scaleio/cluster/sds.py
Sds.get_sds_by_name
def get_sds_by_name(self,name): """ Get ScaleIO SDS object by its name :param name: Name of SDS :return: ScaleIO SDS object :raise KeyError: No SDS with specified name found :rtype: SDS object """ for sds in self.sds: if sds.name == name: return sds raise KeyError("SDS of that name not found")
python
def get_sds_by_name(self,name): """ Get ScaleIO SDS object by its name :param name: Name of SDS :return: ScaleIO SDS object :raise KeyError: No SDS with specified name found :rtype: SDS object """ for sds in self.sds: if sds.name == name: return sds raise KeyError("SDS of that name not found")
['def', 'get_sds_by_name', '(', 'self', ',', 'name', ')', ':', 'for', 'sds', 'in', 'self', '.', 'sds', ':', 'if', 'sds', '.', 'name', '==', 'name', ':', 'return', 'sds', 'raise', 'KeyError', '(', '"SDS of that name not found"', ')']
Get ScaleIO SDS object by its name :param name: Name of SDS :return: ScaleIO SDS object :raise KeyError: No SDS with specified name found :rtype: SDS object
['Get', 'ScaleIO', 'SDS', 'object', 'by', 'its', 'name', ':', 'param', 'name', ':', 'Name', 'of', 'SDS', ':', 'return', ':', 'ScaleIO', 'SDS', 'object', ':', 'raise', 'KeyError', ':', 'No', 'SDS', 'with', 'specified', 'name', 'found', ':', 'rtype', ':', 'SDS', 'object']
train
https://github.com/swevm/scaleio-py/blob/d043a0137cb925987fd5c895a3210968ce1d9028/scaleiopy/api/scaleio/cluster/sds.py#L91-L102
5,728
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_firmware_ext.py
brocade_firmware_ext.show_firmware_version_output_show_firmware_version_control_processor_chipset
def show_firmware_version_output_show_firmware_version_control_processor_chipset(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_firmware_version = ET.Element("show_firmware_version") config = show_firmware_version output = ET.SubElement(show_firmware_version, "output") show_firmware_version = ET.SubElement(output, "show-firmware-version") control_processor_chipset = ET.SubElement(show_firmware_version, "control-processor-chipset") control_processor_chipset.text = kwargs.pop('control_processor_chipset') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def show_firmware_version_output_show_firmware_version_control_processor_chipset(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_firmware_version = ET.Element("show_firmware_version") config = show_firmware_version output = ET.SubElement(show_firmware_version, "output") show_firmware_version = ET.SubElement(output, "show-firmware-version") control_processor_chipset = ET.SubElement(show_firmware_version, "control-processor-chipset") control_processor_chipset.text = kwargs.pop('control_processor_chipset') callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'show_firmware_version_output_show_firmware_version_control_processor_chipset', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'show_firmware_version', '=', 'ET', '.', 'Element', '(', '"show_firmware_version"', ')', 'config', '=', 'show_firmware_version', 'output', '=', 'ET', '.', 'SubElement', '(', 'show_firmware_version', ',', '"output"', ')', 'show_firmware_version', '=', 'ET', '.', 'SubElement', '(', 'output', ',', '"show-firmware-version"', ')', 'control_processor_chipset', '=', 'ET', '.', 'SubElement', '(', 'show_firmware_version', ',', '"control-processor-chipset"', ')', 'control_processor_chipset', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'control_processor_chipset'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_firmware_ext.py#L123-L135
5,729
pyviz/holoviews
holoviews/plotting/mpl/util.py
fix_aspect
def fix_aspect(fig, nrows, ncols, title=None, extra_artists=[], vspace=0.2, hspace=0.2): """ Calculate heights and widths of axes and adjust the size of the figure to match the aspect. """ fig.canvas.draw() w, h = fig.get_size_inches() # Compute maximum height and width of each row and columns rows = resolve_rows([[ax] for ax in fig.axes]) rs, cs = len(rows), max([len(r) for r in rows]) heights = [[] for i in range(cs)] widths = [[] for i in range(rs)] for r, row in enumerate(rows): for c, ax in enumerate(row): bbox = ax.get_tightbbox(fig.canvas.get_renderer()) heights[c].append(bbox.height) widths[r].append(bbox.width) height = (max([sum(c) for c in heights])) + nrows*vspace*fig.dpi width = (max([sum(r) for r in widths])) + ncols*hspace*fig.dpi # Compute aspect and set new size (in inches) aspect = height/width offset = 0 if title and title.get_text(): offset = title.get_window_extent().height/fig.dpi fig.set_size_inches(w, (w*aspect)+offset) # Redraw and adjust title position if defined fig.canvas.draw() if title and title.get_text(): extra_artists = [a for a in extra_artists if a is not title] bbox = get_tight_bbox(fig, extra_artists) top = bbox.intervaly[1] if title and title.get_text(): title.set_y((top/(w*aspect)))
python
def fix_aspect(fig, nrows, ncols, title=None, extra_artists=[], vspace=0.2, hspace=0.2): """ Calculate heights and widths of axes and adjust the size of the figure to match the aspect. """ fig.canvas.draw() w, h = fig.get_size_inches() # Compute maximum height and width of each row and columns rows = resolve_rows([[ax] for ax in fig.axes]) rs, cs = len(rows), max([len(r) for r in rows]) heights = [[] for i in range(cs)] widths = [[] for i in range(rs)] for r, row in enumerate(rows): for c, ax in enumerate(row): bbox = ax.get_tightbbox(fig.canvas.get_renderer()) heights[c].append(bbox.height) widths[r].append(bbox.width) height = (max([sum(c) for c in heights])) + nrows*vspace*fig.dpi width = (max([sum(r) for r in widths])) + ncols*hspace*fig.dpi # Compute aspect and set new size (in inches) aspect = height/width offset = 0 if title and title.get_text(): offset = title.get_window_extent().height/fig.dpi fig.set_size_inches(w, (w*aspect)+offset) # Redraw and adjust title position if defined fig.canvas.draw() if title and title.get_text(): extra_artists = [a for a in extra_artists if a is not title] bbox = get_tight_bbox(fig, extra_artists) top = bbox.intervaly[1] if title and title.get_text(): title.set_y((top/(w*aspect)))
['def', 'fix_aspect', '(', 'fig', ',', 'nrows', ',', 'ncols', ',', 'title', '=', 'None', ',', 'extra_artists', '=', '[', ']', ',', 'vspace', '=', '0.2', ',', 'hspace', '=', '0.2', ')', ':', 'fig', '.', 'canvas', '.', 'draw', '(', ')', 'w', ',', 'h', '=', 'fig', '.', 'get_size_inches', '(', ')', '# Compute maximum height and width of each row and columns', 'rows', '=', 'resolve_rows', '(', '[', '[', 'ax', ']', 'for', 'ax', 'in', 'fig', '.', 'axes', ']', ')', 'rs', ',', 'cs', '=', 'len', '(', 'rows', ')', ',', 'max', '(', '[', 'len', '(', 'r', ')', 'for', 'r', 'in', 'rows', ']', ')', 'heights', '=', '[', '[', ']', 'for', 'i', 'in', 'range', '(', 'cs', ')', ']', 'widths', '=', '[', '[', ']', 'for', 'i', 'in', 'range', '(', 'rs', ')', ']', 'for', 'r', ',', 'row', 'in', 'enumerate', '(', 'rows', ')', ':', 'for', 'c', ',', 'ax', 'in', 'enumerate', '(', 'row', ')', ':', 'bbox', '=', 'ax', '.', 'get_tightbbox', '(', 'fig', '.', 'canvas', '.', 'get_renderer', '(', ')', ')', 'heights', '[', 'c', ']', '.', 'append', '(', 'bbox', '.', 'height', ')', 'widths', '[', 'r', ']', '.', 'append', '(', 'bbox', '.', 'width', ')', 'height', '=', '(', 'max', '(', '[', 'sum', '(', 'c', ')', 'for', 'c', 'in', 'heights', ']', ')', ')', '+', 'nrows', '*', 'vspace', '*', 'fig', '.', 'dpi', 'width', '=', '(', 'max', '(', '[', 'sum', '(', 'r', ')', 'for', 'r', 'in', 'widths', ']', ')', ')', '+', 'ncols', '*', 'hspace', '*', 'fig', '.', 'dpi', '# Compute aspect and set new size (in inches)', 'aspect', '=', 'height', '/', 'width', 'offset', '=', '0', 'if', 'title', 'and', 'title', '.', 'get_text', '(', ')', ':', 'offset', '=', 'title', '.', 'get_window_extent', '(', ')', '.', 'height', '/', 'fig', '.', 'dpi', 'fig', '.', 'set_size_inches', '(', 'w', ',', '(', 'w', '*', 'aspect', ')', '+', 'offset', ')', '# Redraw and adjust title position if defined', 'fig', '.', 'canvas', '.', 'draw', '(', ')', 'if', 'title', 'and', 'title', '.', 'get_text', '(', ')', ':', 'extra_artists', '=', '[', 'a', 'for', 'a', 'in', 'extra_artists', 'if', 'a', 'is', 'not', 'title', ']', 'bbox', '=', 'get_tight_bbox', '(', 'fig', ',', 'extra_artists', ')', 'top', '=', 'bbox', '.', 'intervaly', '[', '1', ']', 'if', 'title', 'and', 'title', '.', 'get_text', '(', ')', ':', 'title', '.', 'set_y', '(', '(', 'top', '/', '(', 'w', '*', 'aspect', ')', ')', ')']
Calculate heights and widths of axes and adjust the size of the figure to match the aspect.
['Calculate', 'heights', 'and', 'widths', 'of', 'axes', 'and', 'adjust', 'the', 'size', 'of', 'the', 'figure', 'to', 'match', 'the', 'aspect', '.']
train
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/mpl/util.py#L221-L258
5,730
datastax/python-driver
cassandra/cqltypes.py
lookup_casstype
def lookup_casstype(casstype): """ Given a Cassandra type as a string (possibly including parameters), hand back the CassandraType class responsible for it. If a name is not recognized, a custom _UnrecognizedType subclass will be created for it. Example: >>> lookup_casstype('org.apache.cassandra.db.marshal.MapType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.Int32Type)') <class 'cassandra.cqltypes.MapType(UTF8Type, Int32Type)'> """ if isinstance(casstype, (CassandraType, CassandraTypeType)): return casstype try: return parse_casstype_args(casstype) except (ValueError, AssertionError, IndexError) as e: raise ValueError("Don't know how to parse type string %r: %s" % (casstype, e))
python
def lookup_casstype(casstype): """ Given a Cassandra type as a string (possibly including parameters), hand back the CassandraType class responsible for it. If a name is not recognized, a custom _UnrecognizedType subclass will be created for it. Example: >>> lookup_casstype('org.apache.cassandra.db.marshal.MapType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.Int32Type)') <class 'cassandra.cqltypes.MapType(UTF8Type, Int32Type)'> """ if isinstance(casstype, (CassandraType, CassandraTypeType)): return casstype try: return parse_casstype_args(casstype) except (ValueError, AssertionError, IndexError) as e: raise ValueError("Don't know how to parse type string %r: %s" % (casstype, e))
['def', 'lookup_casstype', '(', 'casstype', ')', ':', 'if', 'isinstance', '(', 'casstype', ',', '(', 'CassandraType', ',', 'CassandraTypeType', ')', ')', ':', 'return', 'casstype', 'try', ':', 'return', 'parse_casstype_args', '(', 'casstype', ')', 'except', '(', 'ValueError', ',', 'AssertionError', ',', 'IndexError', ')', 'as', 'e', ':', 'raise', 'ValueError', '(', '"Don\'t know how to parse type string %r: %s"', '%', '(', 'casstype', ',', 'e', ')', ')']
Given a Cassandra type as a string (possibly including parameters), hand back the CassandraType class responsible for it. If a name is not recognized, a custom _UnrecognizedType subclass will be created for it. Example: >>> lookup_casstype('org.apache.cassandra.db.marshal.MapType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.Int32Type)') <class 'cassandra.cqltypes.MapType(UTF8Type, Int32Type)'>
['Given', 'a', 'Cassandra', 'type', 'as', 'a', 'string', '(', 'possibly', 'including', 'parameters', ')', 'hand', 'back', 'the', 'CassandraType', 'class', 'responsible', 'for', 'it', '.', 'If', 'a', 'name', 'is', 'not', 'recognized', 'a', 'custom', '_UnrecognizedType', 'subclass', 'will', 'be', 'created', 'for', 'it', '.']
train
https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/cqltypes.py#L172-L189
5,731
smarie/python-parsyfiles
parsyfiles/converting_core.py
get_validated_type
def get_validated_type(object_type: Type[Any], name: str, enforce_not_joker: bool = True) -> Type[Any]: """ Utility to validate a type : * None is not allowed, * 'object', 'AnyObject' and 'Any' lead to the same 'AnyObject' type * JOKER is either rejected (if enforce_not_joker is True, default) or accepted 'as is' :param object_type: the type to validate :param name: a name used in exceptions if any :param enforce_not_joker: a boolean, set to False to tolerate JOKER types :return: the fixed type """ if object_type is object or object_type is Any or object_type is AnyObject: return AnyObject else: # -- !! Do not check TypeVar or Union : this is already handled at higher levels -- if object_type is JOKER: # optionally check if JOKER is allowed if enforce_not_joker: raise ValueError('JOKER is not allowed for object_type') else: # note: we dont check var earlier, since 'typing.Any' is not a subclass of type anymore check_var(object_type, var_types=type, var_name=name) return object_type
python
def get_validated_type(object_type: Type[Any], name: str, enforce_not_joker: bool = True) -> Type[Any]: """ Utility to validate a type : * None is not allowed, * 'object', 'AnyObject' and 'Any' lead to the same 'AnyObject' type * JOKER is either rejected (if enforce_not_joker is True, default) or accepted 'as is' :param object_type: the type to validate :param name: a name used in exceptions if any :param enforce_not_joker: a boolean, set to False to tolerate JOKER types :return: the fixed type """ if object_type is object or object_type is Any or object_type is AnyObject: return AnyObject else: # -- !! Do not check TypeVar or Union : this is already handled at higher levels -- if object_type is JOKER: # optionally check if JOKER is allowed if enforce_not_joker: raise ValueError('JOKER is not allowed for object_type') else: # note: we dont check var earlier, since 'typing.Any' is not a subclass of type anymore check_var(object_type, var_types=type, var_name=name) return object_type
['def', 'get_validated_type', '(', 'object_type', ':', 'Type', '[', 'Any', ']', ',', 'name', ':', 'str', ',', 'enforce_not_joker', ':', 'bool', '=', 'True', ')', '->', 'Type', '[', 'Any', ']', ':', 'if', 'object_type', 'is', 'object', 'or', 'object_type', 'is', 'Any', 'or', 'object_type', 'is', 'AnyObject', ':', 'return', 'AnyObject', 'else', ':', '# -- !! Do not check TypeVar or Union : this is already handled at higher levels --', 'if', 'object_type', 'is', 'JOKER', ':', '# optionally check if JOKER is allowed', 'if', 'enforce_not_joker', ':', 'raise', 'ValueError', '(', "'JOKER is not allowed for object_type'", ')', 'else', ':', "# note: we dont check var earlier, since 'typing.Any' is not a subclass of type anymore", 'check_var', '(', 'object_type', ',', 'var_types', '=', 'type', ',', 'var_name', '=', 'name', ')', 'return', 'object_type']
Utility to validate a type : * None is not allowed, * 'object', 'AnyObject' and 'Any' lead to the same 'AnyObject' type * JOKER is either rejected (if enforce_not_joker is True, default) or accepted 'as is' :param object_type: the type to validate :param name: a name used in exceptions if any :param enforce_not_joker: a boolean, set to False to tolerate JOKER types :return: the fixed type
['Utility', 'to', 'validate', 'a', 'type', ':', '*', 'None', 'is', 'not', 'allowed', '*', 'object', 'AnyObject', 'and', 'Any', 'lead', 'to', 'the', 'same', 'AnyObject', 'type', '*', 'JOKER', 'is', 'either', 'rejected', '(', 'if', 'enforce_not_joker', 'is', 'True', 'default', ')', 'or', 'accepted', 'as', 'is']
train
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/converting_core.py#L65-L88
5,732
oscarlazoarjona/fast
build/lib/fast/atomic_structure.py
collision_rate
def collision_rate(Temperature, element, isotope): r"""This function recieves the temperature of an atomic vapour (in Kelvin), the element, and the isotope of the atoms, and returns the angular frequency rate of collisions (in rad/s) in a vapour assuming a Maxwell-Boltzmann velocity distribution, and taking the cross section of the collision to be sigma=pi*(2*r)**2 where r is the atomic radius. colission rate returned is gamma_col=2*pi* ( sigma * v * n ) where v is the average velocity of the distribution, and n is the number density of the vapour. A few examples (in Hz): >>> print collision_rate(25 + 273.15, "Cs", 133)/2/pi 9.0607260277 For cesium collisions become important for temperatures above 120 Celsius. >>> print collision_rate(120 + 273.15, "Cs", 133)/2/pi 10519.235289 """ atom=Atom(element,isotope) sigma=pi*(2*atom.radius)**2 v=speed_average(Temperature,element,isotope) n=vapour_number_density(Temperature,element) return 2*pi*sigma*v*n
python
def collision_rate(Temperature, element, isotope): r"""This function recieves the temperature of an atomic vapour (in Kelvin), the element, and the isotope of the atoms, and returns the angular frequency rate of collisions (in rad/s) in a vapour assuming a Maxwell-Boltzmann velocity distribution, and taking the cross section of the collision to be sigma=pi*(2*r)**2 where r is the atomic radius. colission rate returned is gamma_col=2*pi* ( sigma * v * n ) where v is the average velocity of the distribution, and n is the number density of the vapour. A few examples (in Hz): >>> print collision_rate(25 + 273.15, "Cs", 133)/2/pi 9.0607260277 For cesium collisions become important for temperatures above 120 Celsius. >>> print collision_rate(120 + 273.15, "Cs", 133)/2/pi 10519.235289 """ atom=Atom(element,isotope) sigma=pi*(2*atom.radius)**2 v=speed_average(Temperature,element,isotope) n=vapour_number_density(Temperature,element) return 2*pi*sigma*v*n
['def', 'collision_rate', '(', 'Temperature', ',', 'element', ',', 'isotope', ')', ':', 'atom', '=', 'Atom', '(', 'element', ',', 'isotope', ')', 'sigma', '=', 'pi', '*', '(', '2', '*', 'atom', '.', 'radius', ')', '**', '2', 'v', '=', 'speed_average', '(', 'Temperature', ',', 'element', ',', 'isotope', ')', 'n', '=', 'vapour_number_density', '(', 'Temperature', ',', 'element', ')', 'return', '2', '*', 'pi', '*', 'sigma', '*', 'v', '*', 'n']
r"""This function recieves the temperature of an atomic vapour (in Kelvin), the element, and the isotope of the atoms, and returns the angular frequency rate of collisions (in rad/s) in a vapour assuming a Maxwell-Boltzmann velocity distribution, and taking the cross section of the collision to be sigma=pi*(2*r)**2 where r is the atomic radius. colission rate returned is gamma_col=2*pi* ( sigma * v * n ) where v is the average velocity of the distribution, and n is the number density of the vapour. A few examples (in Hz): >>> print collision_rate(25 + 273.15, "Cs", 133)/2/pi 9.0607260277 For cesium collisions become important for temperatures above 120 Celsius. >>> print collision_rate(120 + 273.15, "Cs", 133)/2/pi 10519.235289
['r', 'This', 'function', 'recieves', 'the', 'temperature', 'of', 'an', 'atomic', 'vapour', '(', 'in', 'Kelvin', ')', 'the', 'element', 'and', 'the', 'isotope', 'of', 'the', 'atoms', 'and', 'returns', 'the', 'angular', 'frequency', 'rate', 'of', 'collisions', '(', 'in', 'rad', '/', 's', ')', 'in', 'a', 'vapour', 'assuming', 'a', 'Maxwell', '-', 'Boltzmann', 'velocity', 'distribution', 'and', 'taking', 'the', 'cross', 'section', 'of', 'the', 'collision', 'to', 'be']
train
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/build/lib/fast/atomic_structure.py#L2063-L2093
5,733
niemasd/TreeSwift
treeswift/Tree.py
Tree.extract_tree
def extract_tree(self, labels, without, suppress_unifurcations=True): '''Helper function for ``extract_tree_*`` functions''' if not isinstance(suppress_unifurcations, bool): raise TypeError("suppress_unifurcations must be a bool") if labels is not None and not isinstance(labels, set): try: labels = set(labels) except: raise TypeError("labels must be iterable") label_to_leaf = dict(); keep = set() for node in self.traverse_leaves(): label_to_leaf[str(node)] = node if labels is None or (without and str(node) not in labels) or (not without and str(node) in labels): keep.add(node) for node in list(keep): for a in node.traverse_ancestors(include_self=False): keep.add(a) out = Tree(); out.root.label = self.root.label; out.root.edge_length = self.root.edge_length q_old = deque(); q_old.append(self.root) q_new = deque(); q_new.append(out.root) while len(q_old) != 0: n_old = q_old.popleft(); n_new = q_new.popleft() for c_old in n_old.children: if c_old in keep: c_new = Node(label=str(c_old), edge_length=c_old.edge_length); n_new.add_child(c_new) q_old.append(c_old); q_new.append(c_new) if suppress_unifurcations: out.suppress_unifurcations() return out
python
def extract_tree(self, labels, without, suppress_unifurcations=True): '''Helper function for ``extract_tree_*`` functions''' if not isinstance(suppress_unifurcations, bool): raise TypeError("suppress_unifurcations must be a bool") if labels is not None and not isinstance(labels, set): try: labels = set(labels) except: raise TypeError("labels must be iterable") label_to_leaf = dict(); keep = set() for node in self.traverse_leaves(): label_to_leaf[str(node)] = node if labels is None or (without and str(node) not in labels) or (not without and str(node) in labels): keep.add(node) for node in list(keep): for a in node.traverse_ancestors(include_self=False): keep.add(a) out = Tree(); out.root.label = self.root.label; out.root.edge_length = self.root.edge_length q_old = deque(); q_old.append(self.root) q_new = deque(); q_new.append(out.root) while len(q_old) != 0: n_old = q_old.popleft(); n_new = q_new.popleft() for c_old in n_old.children: if c_old in keep: c_new = Node(label=str(c_old), edge_length=c_old.edge_length); n_new.add_child(c_new) q_old.append(c_old); q_new.append(c_new) if suppress_unifurcations: out.suppress_unifurcations() return out
['def', 'extract_tree', '(', 'self', ',', 'labels', ',', 'without', ',', 'suppress_unifurcations', '=', 'True', ')', ':', 'if', 'not', 'isinstance', '(', 'suppress_unifurcations', ',', 'bool', ')', ':', 'raise', 'TypeError', '(', '"suppress_unifurcations must be a bool"', ')', 'if', 'labels', 'is', 'not', 'None', 'and', 'not', 'isinstance', '(', 'labels', ',', 'set', ')', ':', 'try', ':', 'labels', '=', 'set', '(', 'labels', ')', 'except', ':', 'raise', 'TypeError', '(', '"labels must be iterable"', ')', 'label_to_leaf', '=', 'dict', '(', ')', 'keep', '=', 'set', '(', ')', 'for', 'node', 'in', 'self', '.', 'traverse_leaves', '(', ')', ':', 'label_to_leaf', '[', 'str', '(', 'node', ')', ']', '=', 'node', 'if', 'labels', 'is', 'None', 'or', '(', 'without', 'and', 'str', '(', 'node', ')', 'not', 'in', 'labels', ')', 'or', '(', 'not', 'without', 'and', 'str', '(', 'node', ')', 'in', 'labels', ')', ':', 'keep', '.', 'add', '(', 'node', ')', 'for', 'node', 'in', 'list', '(', 'keep', ')', ':', 'for', 'a', 'in', 'node', '.', 'traverse_ancestors', '(', 'include_self', '=', 'False', ')', ':', 'keep', '.', 'add', '(', 'a', ')', 'out', '=', 'Tree', '(', ')', 'out', '.', 'root', '.', 'label', '=', 'self', '.', 'root', '.', 'label', 'out', '.', 'root', '.', 'edge_length', '=', 'self', '.', 'root', '.', 'edge_length', 'q_old', '=', 'deque', '(', ')', 'q_old', '.', 'append', '(', 'self', '.', 'root', ')', 'q_new', '=', 'deque', '(', ')', 'q_new', '.', 'append', '(', 'out', '.', 'root', ')', 'while', 'len', '(', 'q_old', ')', '!=', '0', ':', 'n_old', '=', 'q_old', '.', 'popleft', '(', ')', 'n_new', '=', 'q_new', '.', 'popleft', '(', ')', 'for', 'c_old', 'in', 'n_old', '.', 'children', ':', 'if', 'c_old', 'in', 'keep', ':', 'c_new', '=', 'Node', '(', 'label', '=', 'str', '(', 'c_old', ')', ',', 'edge_length', '=', 'c_old', '.', 'edge_length', ')', 'n_new', '.', 'add_child', '(', 'c_new', ')', 'q_old', '.', 'append', '(', 'c_old', ')', 'q_new', '.', 'append', '(', 'c_new', ')', 'if', 'suppress_unifurcations', ':', 'out', '.', 'suppress_unifurcations', '(', ')', 'return', 'out']
Helper function for ``extract_tree_*`` functions
['Helper', 'function', 'for', 'extract_tree_', '*', 'functions']
train
https://github.com/niemasd/TreeSwift/blob/7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917/treeswift/Tree.py#L421-L449
5,734
watson-developer-cloud/python-sdk
ibm_watson/natural_language_understanding_v1.py
SemanticRolesResult._to_dict
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'sentence') and self.sentence is not None: _dict['sentence'] = self.sentence if hasattr(self, 'subject') and self.subject is not None: _dict['subject'] = self.subject._to_dict() if hasattr(self, 'action') and self.action is not None: _dict['action'] = self.action._to_dict() if hasattr(self, 'object') and self.object is not None: _dict['object'] = self.object._to_dict() return _dict
python
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'sentence') and self.sentence is not None: _dict['sentence'] = self.sentence if hasattr(self, 'subject') and self.subject is not None: _dict['subject'] = self.subject._to_dict() if hasattr(self, 'action') and self.action is not None: _dict['action'] = self.action._to_dict() if hasattr(self, 'object') and self.object is not None: _dict['object'] = self.object._to_dict() return _dict
['def', '_to_dict', '(', 'self', ')', ':', '_dict', '=', '{', '}', 'if', 'hasattr', '(', 'self', ',', "'sentence'", ')', 'and', 'self', '.', 'sentence', 'is', 'not', 'None', ':', '_dict', '[', "'sentence'", ']', '=', 'self', '.', 'sentence', 'if', 'hasattr', '(', 'self', ',', "'subject'", ')', 'and', 'self', '.', 'subject', 'is', 'not', 'None', ':', '_dict', '[', "'subject'", ']', '=', 'self', '.', 'subject', '.', '_to_dict', '(', ')', 'if', 'hasattr', '(', 'self', ',', "'action'", ')', 'and', 'self', '.', 'action', 'is', 'not', 'None', ':', '_dict', '[', "'action'", ']', '=', 'self', '.', 'action', '.', '_to_dict', '(', ')', 'if', 'hasattr', '(', 'self', ',', "'object'", ')', 'and', 'self', '.', 'object', 'is', 'not', 'None', ':', '_dict', '[', "'object'", ']', '=', 'self', '.', 'object', '.', '_to_dict', '(', ')', 'return', '_dict']
Return a json dictionary representing this model.
['Return', 'a', 'json', 'dictionary', 'representing', 'this', 'model', '.']
train
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/natural_language_understanding_v1.py#L2644-L2655
5,735
pulumi/pulumi
sdk/python/lib/pulumi/config.py
Config.require_bool
def require_bool(self, key: str) -> bool: """ Returns a configuration value, as a bool, by its given key. If it doesn't exist, or the configuration value is not a legal bool, an error is thrown. :param str key: The requested configuration key. :return: The configuration key's value. :rtype: bool :raises ConfigMissingError: The configuration value did not exist. :raises ConfigTypeError: The configuration value existed but couldn't be coerced to bool. """ v = self.get_bool(key) if v is None: raise ConfigMissingError(self.full_key(key)) return v
python
def require_bool(self, key: str) -> bool: """ Returns a configuration value, as a bool, by its given key. If it doesn't exist, or the configuration value is not a legal bool, an error is thrown. :param str key: The requested configuration key. :return: The configuration key's value. :rtype: bool :raises ConfigMissingError: The configuration value did not exist. :raises ConfigTypeError: The configuration value existed but couldn't be coerced to bool. """ v = self.get_bool(key) if v is None: raise ConfigMissingError(self.full_key(key)) return v
['def', 'require_bool', '(', 'self', ',', 'key', ':', 'str', ')', '->', 'bool', ':', 'v', '=', 'self', '.', 'get_bool', '(', 'key', ')', 'if', 'v', 'is', 'None', ':', 'raise', 'ConfigMissingError', '(', 'self', '.', 'full_key', '(', 'key', ')', ')', 'return', 'v']
Returns a configuration value, as a bool, by its given key. If it doesn't exist, or the configuration value is not a legal bool, an error is thrown. :param str key: The requested configuration key. :return: The configuration key's value. :rtype: bool :raises ConfigMissingError: The configuration value did not exist. :raises ConfigTypeError: The configuration value existed but couldn't be coerced to bool.
['Returns', 'a', 'configuration', 'value', 'as', 'a', 'bool', 'by', 'its', 'given', 'key', '.', 'If', 'it', 'doesn', 't', 'exist', 'or', 'the', 'configuration', 'value', 'is', 'not', 'a', 'legal', 'bool', 'an', 'error', 'is', 'thrown', '.']
train
https://github.com/pulumi/pulumi/blob/95d51efe6ab9a533838b6d83aa240b5f912e72aa/sdk/python/lib/pulumi/config.py#L129-L143
5,736
kobejohn/PQHelper
pqhelper/versus.py
Advisor._summarize_action
def _summarize_action(self, root_action): """Return a dictionary with various information about this root_action. Note: Scoring assumes that each actor makes the "best" choices in their turn based on the simulation available. """ def is_target_node(node): return isinstance(node, base.EOT) or (node is root_action) # store per-turn results from the bottom up. realistic_ends_by_node = dict() for node in root_action.post_order_nodes(): # bottom up to this action # only work with EOT or the root action if not is_target_node(node): continue # by the time this node is reached, the results of all children # have been stored under it in realistic_ends (except leaves) # so choose the best of the available results and send it # up to the next valid node try: # get the results stored previously in the deeper turn realistic_ends = realistic_ends_by_node[node] except KeyError: # leaves are own realistic end realistic_ends = [node] # identify the "best" end for this node if node is root_action: active = node.parent.active passive = node.parent.passive else: active = node.parent.passive passive = node.parent.active ends_by_score = dict() for realistic_end in realistic_ends: # determine the relative score. i.e. if delta is positive # then the end result is better for active than passive relative_score = self._relative_score(node, realistic_end, active, passive) ends_by_score[relative_score] = realistic_end best_end = ends_by_score[max(ends_by_score.keys())] # done after determining realistic result for root action if node is root_action: return self._summarize_result(root_action, best_end) # not done: place best end on the parent EOT's list of possibilities parent = node.parent while parent: if is_target_node(parent): break parent = parent.parent # keep moving up until target found # at this point the parent is either root_action or another EOT realistic_ends_by_node.setdefault(parent, list()).append(best_end) pass
python
def _summarize_action(self, root_action): """Return a dictionary with various information about this root_action. Note: Scoring assumes that each actor makes the "best" choices in their turn based on the simulation available. """ def is_target_node(node): return isinstance(node, base.EOT) or (node is root_action) # store per-turn results from the bottom up. realistic_ends_by_node = dict() for node in root_action.post_order_nodes(): # bottom up to this action # only work with EOT or the root action if not is_target_node(node): continue # by the time this node is reached, the results of all children # have been stored under it in realistic_ends (except leaves) # so choose the best of the available results and send it # up to the next valid node try: # get the results stored previously in the deeper turn realistic_ends = realistic_ends_by_node[node] except KeyError: # leaves are own realistic end realistic_ends = [node] # identify the "best" end for this node if node is root_action: active = node.parent.active passive = node.parent.passive else: active = node.parent.passive passive = node.parent.active ends_by_score = dict() for realistic_end in realistic_ends: # determine the relative score. i.e. if delta is positive # then the end result is better for active than passive relative_score = self._relative_score(node, realistic_end, active, passive) ends_by_score[relative_score] = realistic_end best_end = ends_by_score[max(ends_by_score.keys())] # done after determining realistic result for root action if node is root_action: return self._summarize_result(root_action, best_end) # not done: place best end on the parent EOT's list of possibilities parent = node.parent while parent: if is_target_node(parent): break parent = parent.parent # keep moving up until target found # at this point the parent is either root_action or another EOT realistic_ends_by_node.setdefault(parent, list()).append(best_end) pass
['def', '_summarize_action', '(', 'self', ',', 'root_action', ')', ':', 'def', 'is_target_node', '(', 'node', ')', ':', 'return', 'isinstance', '(', 'node', ',', 'base', '.', 'EOT', ')', 'or', '(', 'node', 'is', 'root_action', ')', '# store per-turn results from the bottom up.', 'realistic_ends_by_node', '=', 'dict', '(', ')', 'for', 'node', 'in', 'root_action', '.', 'post_order_nodes', '(', ')', ':', '# bottom up to this action', '# only work with EOT or the root action', 'if', 'not', 'is_target_node', '(', 'node', ')', ':', 'continue', '# by the time this node is reached, the results of all children', '# have been stored under it in realistic_ends (except leaves)', '# so choose the best of the available results and send it', '# up to the next valid node', 'try', ':', '# get the results stored previously in the deeper turn', 'realistic_ends', '=', 'realistic_ends_by_node', '[', 'node', ']', 'except', 'KeyError', ':', '# leaves are own realistic end', 'realistic_ends', '=', '[', 'node', ']', '# identify the "best" end for this node', 'if', 'node', 'is', 'root_action', ':', 'active', '=', 'node', '.', 'parent', '.', 'active', 'passive', '=', 'node', '.', 'parent', '.', 'passive', 'else', ':', 'active', '=', 'node', '.', 'parent', '.', 'passive', 'passive', '=', 'node', '.', 'parent', '.', 'active', 'ends_by_score', '=', 'dict', '(', ')', 'for', 'realistic_end', 'in', 'realistic_ends', ':', '# determine the relative score. i.e. if delta is positive', '# then the end result is better for active than passive', 'relative_score', '=', 'self', '.', '_relative_score', '(', 'node', ',', 'realistic_end', ',', 'active', ',', 'passive', ')', 'ends_by_score', '[', 'relative_score', ']', '=', 'realistic_end', 'best_end', '=', 'ends_by_score', '[', 'max', '(', 'ends_by_score', '.', 'keys', '(', ')', ')', ']', '# done after determining realistic result for root action', 'if', 'node', 'is', 'root_action', ':', 'return', 'self', '.', '_summarize_result', '(', 'root_action', ',', 'best_end', ')', "# not done: place best end on the parent EOT's list of possibilities", 'parent', '=', 'node', '.', 'parent', 'while', 'parent', ':', 'if', 'is_target_node', '(', 'parent', ')', ':', 'break', 'parent', '=', 'parent', '.', 'parent', '# keep moving up until target found', '# at this point the parent is either root_action or another EOT', 'realistic_ends_by_node', '.', 'setdefault', '(', 'parent', ',', 'list', '(', ')', ')', '.', 'append', '(', 'best_end', ')', 'pass']
Return a dictionary with various information about this root_action. Note: Scoring assumes that each actor makes the "best" choices in their turn based on the simulation available.
['Return', 'a', 'dictionary', 'with', 'various', 'information', 'about', 'this', 'root_action', '.']
train
https://github.com/kobejohn/PQHelper/blob/d2b78a22dcb631794295e6a159b06f39c3f10db6/pqhelper/versus.py#L39-L88
5,737
vmlaker/mpipe
src/Stage.py
Stage.link
def link(self, next_stage): """Link to the given downstream stage *next_stage* by adding its input tube to the list of this stage's output tubes. Return this stage.""" if next_stage is self: raise ValueError('cannot link stage to itself') self._output_tubes.append(next_stage._input_tube) self._next_stages.append(next_stage) return self
python
def link(self, next_stage): """Link to the given downstream stage *next_stage* by adding its input tube to the list of this stage's output tubes. Return this stage.""" if next_stage is self: raise ValueError('cannot link stage to itself') self._output_tubes.append(next_stage._input_tube) self._next_stages.append(next_stage) return self
['def', 'link', '(', 'self', ',', 'next_stage', ')', ':', 'if', 'next_stage', 'is', 'self', ':', 'raise', 'ValueError', '(', "'cannot link stage to itself'", ')', 'self', '.', '_output_tubes', '.', 'append', '(', 'next_stage', '.', '_input_tube', ')', 'self', '.', '_next_stages', '.', 'append', '(', 'next_stage', ')', 'return', 'self']
Link to the given downstream stage *next_stage* by adding its input tube to the list of this stage's output tubes. Return this stage.
['Link', 'to', 'the', 'given', 'downstream', 'stage', '*', 'next_stage', '*', 'by', 'adding', 'its', 'input', 'tube', 'to', 'the', 'list', 'of', 'this', 'stage', 's', 'output', 'tubes', '.', 'Return', 'this', 'stage', '.']
train
https://github.com/vmlaker/mpipe/blob/5a1804cf64271931f0cd3e4fff3e2b38291212dd/src/Stage.py#L64-L71
5,738
globocom/GloboNetworkAPI-client-python
networkapiclient/Interface.py
Interface.inserir
def inserir( self, nome, protegida, descricao, id_ligacao_front, id_ligacao_back, id_equipamento, tipo=None, vlan=None): """Insert new interface for an equipment. :param nome: Interface name. :param protegida: Indication of protected ('0' or '1'). :param descricao: Interface description. :param id_ligacao_front: Front end link interface identifier. :param id_ligacao_back: Back end link interface identifier. :param id_equipamento: Equipment identifier. :return: Dictionary with the following: {'interface': {'id': < id >}} :raise EquipamentoNaoExisteError: Equipment does not exist. :raise InvalidParameterError: The parameters nome, protegida and/or equipment id are none or invalid. :raise NomeInterfaceDuplicadoParaEquipamentoError: There is already an interface with this name for this equipment. :raise InterfaceNaoExisteError: Front link interface and/or back link interface doesn't exist. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ interface_map = dict() interface_map['nome'] = nome interface_map['protegida'] = protegida interface_map['descricao'] = descricao interface_map['id_ligacao_front'] = id_ligacao_front interface_map['id_ligacao_back'] = id_ligacao_back interface_map['id_equipamento'] = id_equipamento interface_map['tipo'] = tipo interface_map['vlan'] = vlan code, xml = self.submit( {'interface': interface_map}, 'POST', 'interface/') return self.response(code, xml)
python
def inserir( self, nome, protegida, descricao, id_ligacao_front, id_ligacao_back, id_equipamento, tipo=None, vlan=None): """Insert new interface for an equipment. :param nome: Interface name. :param protegida: Indication of protected ('0' or '1'). :param descricao: Interface description. :param id_ligacao_front: Front end link interface identifier. :param id_ligacao_back: Back end link interface identifier. :param id_equipamento: Equipment identifier. :return: Dictionary with the following: {'interface': {'id': < id >}} :raise EquipamentoNaoExisteError: Equipment does not exist. :raise InvalidParameterError: The parameters nome, protegida and/or equipment id are none or invalid. :raise NomeInterfaceDuplicadoParaEquipamentoError: There is already an interface with this name for this equipment. :raise InterfaceNaoExisteError: Front link interface and/or back link interface doesn't exist. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ interface_map = dict() interface_map['nome'] = nome interface_map['protegida'] = protegida interface_map['descricao'] = descricao interface_map['id_ligacao_front'] = id_ligacao_front interface_map['id_ligacao_back'] = id_ligacao_back interface_map['id_equipamento'] = id_equipamento interface_map['tipo'] = tipo interface_map['vlan'] = vlan code, xml = self.submit( {'interface': interface_map}, 'POST', 'interface/') return self.response(code, xml)
['def', 'inserir', '(', 'self', ',', 'nome', ',', 'protegida', ',', 'descricao', ',', 'id_ligacao_front', ',', 'id_ligacao_back', ',', 'id_equipamento', ',', 'tipo', '=', 'None', ',', 'vlan', '=', 'None', ')', ':', 'interface_map', '=', 'dict', '(', ')', 'interface_map', '[', "'nome'", ']', '=', 'nome', 'interface_map', '[', "'protegida'", ']', '=', 'protegida', 'interface_map', '[', "'descricao'", ']', '=', 'descricao', 'interface_map', '[', "'id_ligacao_front'", ']', '=', 'id_ligacao_front', 'interface_map', '[', "'id_ligacao_back'", ']', '=', 'id_ligacao_back', 'interface_map', '[', "'id_equipamento'", ']', '=', 'id_equipamento', 'interface_map', '[', "'tipo'", ']', '=', 'tipo', 'interface_map', '[', "'vlan'", ']', '=', 'vlan', 'code', ',', 'xml', '=', 'self', '.', 'submit', '(', '{', "'interface'", ':', 'interface_map', '}', ',', "'POST'", ',', "'interface/'", ')', 'return', 'self', '.', 'response', '(', 'code', ',', 'xml', ')']
Insert new interface for an equipment. :param nome: Interface name. :param protegida: Indication of protected ('0' or '1'). :param descricao: Interface description. :param id_ligacao_front: Front end link interface identifier. :param id_ligacao_back: Back end link interface identifier. :param id_equipamento: Equipment identifier. :return: Dictionary with the following: {'interface': {'id': < id >}} :raise EquipamentoNaoExisteError: Equipment does not exist. :raise InvalidParameterError: The parameters nome, protegida and/or equipment id are none or invalid. :raise NomeInterfaceDuplicadoParaEquipamentoError: There is already an interface with this name for this equipment. :raise InterfaceNaoExisteError: Front link interface and/or back link interface doesn't exist. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
['Insert', 'new', 'interface', 'for', 'an', 'equipment', '.']
train
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/Interface.py#L149-L189
5,739
SoftwareDefinedBuildings/XBOS
apps/Data_quality_analysis/Clean_Data.py
Clean_Data._utc_to_local
def _utc_to_local(self, data, local_zone="America/Los_Angeles"): """ Adjust index of dataframe according to timezone that is requested by user. Parameters ---------- data : pd.DataFrame() Pandas dataframe of json timeseries response from server. local_zone : str pytz.timezone string of specified local timezone to change index to. Returns ------- pd.DataFrame() Pandas dataframe with timestamp index adjusted for local timezone. """ # Accounts for localtime shift data.index = data.index.tz_localize(pytz.utc).tz_convert(local_zone) # Gets rid of extra offset information so can compare with csv data data.index = data.index.tz_localize(None) return data
python
def _utc_to_local(self, data, local_zone="America/Los_Angeles"): """ Adjust index of dataframe according to timezone that is requested by user. Parameters ---------- data : pd.DataFrame() Pandas dataframe of json timeseries response from server. local_zone : str pytz.timezone string of specified local timezone to change index to. Returns ------- pd.DataFrame() Pandas dataframe with timestamp index adjusted for local timezone. """ # Accounts for localtime shift data.index = data.index.tz_localize(pytz.utc).tz_convert(local_zone) # Gets rid of extra offset information so can compare with csv data data.index = data.index.tz_localize(None) return data
['def', '_utc_to_local', '(', 'self', ',', 'data', ',', 'local_zone', '=', '"America/Los_Angeles"', ')', ':', '# Accounts for localtime shift', 'data', '.', 'index', '=', 'data', '.', 'index', '.', 'tz_localize', '(', 'pytz', '.', 'utc', ')', '.', 'tz_convert', '(', 'local_zone', ')', '# Gets rid of extra offset information so can compare with csv data', 'data', '.', 'index', '=', 'data', '.', 'index', '.', 'tz_localize', '(', 'None', ')', 'return', 'data']
Adjust index of dataframe according to timezone that is requested by user. Parameters ---------- data : pd.DataFrame() Pandas dataframe of json timeseries response from server. local_zone : str pytz.timezone string of specified local timezone to change index to. Returns ------- pd.DataFrame() Pandas dataframe with timestamp index adjusted for local timezone.
['Adjust', 'index', 'of', 'dataframe', 'according', 'to', 'timezone', 'that', 'is', 'requested', 'by', 'user', '.']
train
https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Clean_Data.py#L308-L331
5,740
lxyu/pinyin
pinyin/cedict.py
translate_word
def translate_word(word, dictionary=['simplified']): ''' Return the set of translations for a single character or word, if available. ''' if not dictionaries: init() for d in dictionary: if word in dictionaries[d]: return dictionaries[d][word] return None
python
def translate_word(word, dictionary=['simplified']): ''' Return the set of translations for a single character or word, if available. ''' if not dictionaries: init() for d in dictionary: if word in dictionaries[d]: return dictionaries[d][word] return None
['def', 'translate_word', '(', 'word', ',', 'dictionary', '=', '[', "'simplified'", ']', ')', ':', 'if', 'not', 'dictionaries', ':', 'init', '(', ')', 'for', 'd', 'in', 'dictionary', ':', 'if', 'word', 'in', 'dictionaries', '[', 'd', ']', ':', 'return', 'dictionaries', '[', 'd', ']', '[', 'word', ']', 'return', 'None']
Return the set of translations for a single character or word, if available.
['Return', 'the', 'set', 'of', 'translations', 'for', 'a', 'single', 'character', 'or', 'word', 'if', 'available', '.']
train
https://github.com/lxyu/pinyin/blob/f9cac5902b0cfaf91d93af633dfc75a51d2bf0cd/pinyin/cedict.py#L91-L101
5,741
honzamach/pydgets
pydgets/widgets.py
TreeWidget.list_settings
def list_settings(self): """ Get list of all appropriate settings and their default values. """ result = super().list_settings() result.append((self.SETTING_TREE_FORMATING, {})) result.append((self.SETTING_TREE_STYLE, 'utf8.a')) return result
python
def list_settings(self): """ Get list of all appropriate settings and their default values. """ result = super().list_settings() result.append((self.SETTING_TREE_FORMATING, {})) result.append((self.SETTING_TREE_STYLE, 'utf8.a')) return result
['def', 'list_settings', '(', 'self', ')', ':', 'result', '=', 'super', '(', ')', '.', 'list_settings', '(', ')', 'result', '.', 'append', '(', '(', 'self', '.', 'SETTING_TREE_FORMATING', ',', '{', '}', ')', ')', 'result', '.', 'append', '(', '(', 'self', '.', 'SETTING_TREE_STYLE', ',', "'utf8.a'", ')', ')', 'return', 'result']
Get list of all appropriate settings and their default values.
['Get', 'list', 'of', 'all', 'appropriate', 'settings', 'and', 'their', 'default', 'values', '.']
train
https://github.com/honzamach/pydgets/blob/5ca4ce19fc2d9b5f41441fb9163810f8ca502e79/pydgets/widgets.py#L894-L901
5,742
OSSOS/MOP
src/jjk/preproc/MOPplot.py
plot.eps
def eps(self): """Print the canvas to a postscript file""" import tkFileDialog,tkMessageBox filename=tkFileDialog.asksaveasfilename(message="save postscript to file",filetypes=['eps','ps']) if filename is None: return self.postscript(file=filename)
python
def eps(self): """Print the canvas to a postscript file""" import tkFileDialog,tkMessageBox filename=tkFileDialog.asksaveasfilename(message="save postscript to file",filetypes=['eps','ps']) if filename is None: return self.postscript(file=filename)
['def', 'eps', '(', 'self', ')', ':', 'import', 'tkFileDialog', ',', 'tkMessageBox', 'filename', '=', 'tkFileDialog', '.', 'asksaveasfilename', '(', 'message', '=', '"save postscript to file"', ',', 'filetypes', '=', '[', "'eps'", ',', "'ps'", ']', ')', 'if', 'filename', 'is', 'None', ':', 'return', 'self', '.', 'postscript', '(', 'file', '=', 'filename', ')']
Print the canvas to a postscript file
['Print', 'the', 'canvas', 'to', 'a', 'postscript', 'file']
train
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/jjk/preproc/MOPplot.py#L208-L216
5,743
MIT-LCP/wfdb-python
wfdb/io/record.py
BaseRecord._adjust_datetime
def _adjust_datetime(self, sampfrom): """ Adjust date and time fields to reflect user input if possible. Helper function for the `_arrange_fields` of both Record and MultiRecord objects. """ if sampfrom: dt_seconds = sampfrom / self.fs if self.base_date and self.base_time: self.base_datetime = datetime.datetime.combine(self.base_date, self.base_time) self.base_datetime += datetime.timedelta(seconds=dt_seconds) self.base_date = self.base_datetime.date() self.base_time = self.base_datetime.time() # We can calculate the time even if there is no date elif self.base_time: tmp_datetime = datetime.datetime.combine( datetime.datetime.today().date(), self.base_time) self.base_time = (tmp_datetime + datetime.timedelta(seconds=dt_seconds)).time()
python
def _adjust_datetime(self, sampfrom): """ Adjust date and time fields to reflect user input if possible. Helper function for the `_arrange_fields` of both Record and MultiRecord objects. """ if sampfrom: dt_seconds = sampfrom / self.fs if self.base_date and self.base_time: self.base_datetime = datetime.datetime.combine(self.base_date, self.base_time) self.base_datetime += datetime.timedelta(seconds=dt_seconds) self.base_date = self.base_datetime.date() self.base_time = self.base_datetime.time() # We can calculate the time even if there is no date elif self.base_time: tmp_datetime = datetime.datetime.combine( datetime.datetime.today().date(), self.base_time) self.base_time = (tmp_datetime + datetime.timedelta(seconds=dt_seconds)).time()
['def', '_adjust_datetime', '(', 'self', ',', 'sampfrom', ')', ':', 'if', 'sampfrom', ':', 'dt_seconds', '=', 'sampfrom', '/', 'self', '.', 'fs', 'if', 'self', '.', 'base_date', 'and', 'self', '.', 'base_time', ':', 'self', '.', 'base_datetime', '=', 'datetime', '.', 'datetime', '.', 'combine', '(', 'self', '.', 'base_date', ',', 'self', '.', 'base_time', ')', 'self', '.', 'base_datetime', '+=', 'datetime', '.', 'timedelta', '(', 'seconds', '=', 'dt_seconds', ')', 'self', '.', 'base_date', '=', 'self', '.', 'base_datetime', '.', 'date', '(', ')', 'self', '.', 'base_time', '=', 'self', '.', 'base_datetime', '.', 'time', '(', ')', '# We can calculate the time even if there is no date', 'elif', 'self', '.', 'base_time', ':', 'tmp_datetime', '=', 'datetime', '.', 'datetime', '.', 'combine', '(', 'datetime', '.', 'datetime', '.', 'today', '(', ')', '.', 'date', '(', ')', ',', 'self', '.', 'base_time', ')', 'self', '.', 'base_time', '=', '(', 'tmp_datetime', '+', 'datetime', '.', 'timedelta', '(', 'seconds', '=', 'dt_seconds', ')', ')', '.', 'time', '(', ')']
Adjust date and time fields to reflect user input if possible. Helper function for the `_arrange_fields` of both Record and MultiRecord objects.
['Adjust', 'date', 'and', 'time', 'fields', 'to', 'reflect', 'user', 'input', 'if', 'possible', '.']
train
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/record.py#L235-L255
5,744
google/identity-toolkit-python-client
identitytoolkit/rpchelper.py
RpcHelper._GenerateAssertion
def _GenerateAssertion(self): """Generates the signed assertion that will be used in the request. Returns: string, signed Json Web Token (JWT) assertion. """ now = int(time.time()) payload = { 'aud': RpcHelper.TOKEN_ENDPOINT, 'scope': 'https://www.googleapis.com/auth/identitytoolkit', 'iat': now, 'exp': now + RpcHelper.MAX_TOKEN_LIFETIME_SECS, 'iss': self.service_account_email } return crypt.make_signed_jwt( crypt.Signer.from_string(self.service_account_key), payload)
python
def _GenerateAssertion(self): """Generates the signed assertion that will be used in the request. Returns: string, signed Json Web Token (JWT) assertion. """ now = int(time.time()) payload = { 'aud': RpcHelper.TOKEN_ENDPOINT, 'scope': 'https://www.googleapis.com/auth/identitytoolkit', 'iat': now, 'exp': now + RpcHelper.MAX_TOKEN_LIFETIME_SECS, 'iss': self.service_account_email } return crypt.make_signed_jwt( crypt.Signer.from_string(self.service_account_key), payload)
['def', '_GenerateAssertion', '(', 'self', ')', ':', 'now', '=', 'int', '(', 'time', '.', 'time', '(', ')', ')', 'payload', '=', '{', "'aud'", ':', 'RpcHelper', '.', 'TOKEN_ENDPOINT', ',', "'scope'", ':', "'https://www.googleapis.com/auth/identitytoolkit'", ',', "'iat'", ':', 'now', ',', "'exp'", ':', 'now', '+', 'RpcHelper', '.', 'MAX_TOKEN_LIFETIME_SECS', ',', "'iss'", ':', 'self', '.', 'service_account_email', '}', 'return', 'crypt', '.', 'make_signed_jwt', '(', 'crypt', '.', 'Signer', '.', 'from_string', '(', 'self', '.', 'service_account_key', ')', ',', 'payload', ')']
Generates the signed assertion that will be used in the request. Returns: string, signed Json Web Token (JWT) assertion.
['Generates', 'the', 'signed', 'assertion', 'that', 'will', 'be', 'used', 'in', 'the', 'request', '.']
train
https://github.com/google/identity-toolkit-python-client/blob/4cfe3013569c21576daa5d22ad21f9f4f8b30c4d/identitytoolkit/rpchelper.py#L244-L260
5,745
ianmiell/shutit
shutit_class.py
ShutIt.get_url
def get_url(self, filename, locations, command='curl', shutit_pexpect_child=None, timeout=shutit_global.shutit_global_object.default_timeout, fail_on_empty_before=True, record_command=True, exit_values=None, retry=3, note=None, loglevel=logging.DEBUG): """Handles the getting of a url for you. Example: get_url('somejar.jar', ['ftp://loc.org','http://anotherloc.com/jars']) @param filename: name of the file to download @param locations: list of URLs whence the file can be downloaded @param command: program to use to download the file (Default: wget) @param shutit_pexpect_child: See send() @param timeout: See send() @param fail_on_empty_before: See send() @param record_command: See send() @param exit_values: See send() @param retry: How many times to retry the download in case of failure. Default: 3 @param note: See send() @type filename: string @type locations: list of strings @type retry: integer @return: True if the download was completed successfully, False otherwise. @rtype: boolean """ shutit_global.shutit_global_object.yield_to_draw() shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child) return shutit_pexpect_session.get_url(filename, locations, send=command, timeout=timeout, fail_on_empty_before=fail_on_empty_before, record_command=record_command, exit_values=exit_values, retry=retry, note=note, loglevel=loglevel)
python
def get_url(self, filename, locations, command='curl', shutit_pexpect_child=None, timeout=shutit_global.shutit_global_object.default_timeout, fail_on_empty_before=True, record_command=True, exit_values=None, retry=3, note=None, loglevel=logging.DEBUG): """Handles the getting of a url for you. Example: get_url('somejar.jar', ['ftp://loc.org','http://anotherloc.com/jars']) @param filename: name of the file to download @param locations: list of URLs whence the file can be downloaded @param command: program to use to download the file (Default: wget) @param shutit_pexpect_child: See send() @param timeout: See send() @param fail_on_empty_before: See send() @param record_command: See send() @param exit_values: See send() @param retry: How many times to retry the download in case of failure. Default: 3 @param note: See send() @type filename: string @type locations: list of strings @type retry: integer @return: True if the download was completed successfully, False otherwise. @rtype: boolean """ shutit_global.shutit_global_object.yield_to_draw() shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child) return shutit_pexpect_session.get_url(filename, locations, send=command, timeout=timeout, fail_on_empty_before=fail_on_empty_before, record_command=record_command, exit_values=exit_values, retry=retry, note=note, loglevel=loglevel)
['def', 'get_url', '(', 'self', ',', 'filename', ',', 'locations', ',', 'command', '=', "'curl'", ',', 'shutit_pexpect_child', '=', 'None', ',', 'timeout', '=', 'shutit_global', '.', 'shutit_global_object', '.', 'default_timeout', ',', 'fail_on_empty_before', '=', 'True', ',', 'record_command', '=', 'True', ',', 'exit_values', '=', 'None', ',', 'retry', '=', '3', ',', 'note', '=', 'None', ',', 'loglevel', '=', 'logging', '.', 'DEBUG', ')', ':', 'shutit_global', '.', 'shutit_global_object', '.', 'yield_to_draw', '(', ')', 'shutit_pexpect_child', '=', 'shutit_pexpect_child', 'or', 'self', '.', 'get_current_shutit_pexpect_session', '(', ')', '.', 'pexpect_child', 'shutit_pexpect_session', '=', 'self', '.', 'get_shutit_pexpect_session_from_child', '(', 'shutit_pexpect_child', ')', 'return', 'shutit_pexpect_session', '.', 'get_url', '(', 'filename', ',', 'locations', ',', 'send', '=', 'command', ',', 'timeout', '=', 'timeout', ',', 'fail_on_empty_before', '=', 'fail_on_empty_before', ',', 'record_command', '=', 'record_command', ',', 'exit_values', '=', 'exit_values', ',', 'retry', '=', 'retry', ',', 'note', '=', 'note', ',', 'loglevel', '=', 'loglevel', ')']
Handles the getting of a url for you. Example: get_url('somejar.jar', ['ftp://loc.org','http://anotherloc.com/jars']) @param filename: name of the file to download @param locations: list of URLs whence the file can be downloaded @param command: program to use to download the file (Default: wget) @param shutit_pexpect_child: See send() @param timeout: See send() @param fail_on_empty_before: See send() @param record_command: See send() @param exit_values: See send() @param retry: How many times to retry the download in case of failure. Default: 3 @param note: See send() @type filename: string @type locations: list of strings @type retry: integer @return: True if the download was completed successfully, False otherwise. @rtype: boolean
['Handles', 'the', 'getting', 'of', 'a', 'url', 'for', 'you', '.']
train
https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_class.py#L1540-L1588
5,746
blockstack/blockstack-core
blockstack/lib/subdomains.py
Subdomain.serialize_to_txt
def serialize_to_txt(self): """ Serialize this subdomain record to a TXT record. The trailing newline will be omitted """ txtrec = { 'name': self.fqn if self.independent else self.subdomain, 'txt': self.pack_subdomain()[1:] } return blockstack_zones.record_processors.process_txt([txtrec], '{txt}').strip()
python
def serialize_to_txt(self): """ Serialize this subdomain record to a TXT record. The trailing newline will be omitted """ txtrec = { 'name': self.fqn if self.independent else self.subdomain, 'txt': self.pack_subdomain()[1:] } return blockstack_zones.record_processors.process_txt([txtrec], '{txt}').strip()
['def', 'serialize_to_txt', '(', 'self', ')', ':', 'txtrec', '=', '{', "'name'", ':', 'self', '.', 'fqn', 'if', 'self', '.', 'independent', 'else', 'self', '.', 'subdomain', ',', "'txt'", ':', 'self', '.', 'pack_subdomain', '(', ')', '[', '1', ':', ']', '}', 'return', 'blockstack_zones', '.', 'record_processors', '.', 'process_txt', '(', '[', 'txtrec', ']', ',', "'{txt}'", ')', '.', 'strip', '(', ')']
Serialize this subdomain record to a TXT record. The trailing newline will be omitted
['Serialize', 'this', 'subdomain', 'record', 'to', 'a', 'TXT', 'record', '.', 'The', 'trailing', 'newline', 'will', 'be', 'omitted']
train
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/subdomains.py#L224-L232
5,747
MartijnBraam/pyElectronics
electronics/devices/segmentdisplay.py
SegmentDisplayGPIO.write
def write(self, char): """ Display a single character on the display :type char: str or int :param char: Character to display """ char = str(char).lower() self.segments.write(self.font[char])
python
def write(self, char): """ Display a single character on the display :type char: str or int :param char: Character to display """ char = str(char).lower() self.segments.write(self.font[char])
['def', 'write', '(', 'self', ',', 'char', ')', ':', 'char', '=', 'str', '(', 'char', ')', '.', 'lower', '(', ')', 'self', '.', 'segments', '.', 'write', '(', 'self', '.', 'font', '[', 'char', ']', ')']
Display a single character on the display :type char: str or int :param char: Character to display
['Display', 'a', 'single', 'character', 'on', 'the', 'display']
train
https://github.com/MartijnBraam/pyElectronics/blob/a20878c9fa190135f1e478e9ea0b54ca43ff308e/electronics/devices/segmentdisplay.py#L205-L212
5,748
matousc89/padasip
padasip/misc/error_evaluation.py
MSE
def MSE(x1, x2=-1): """ Mean squared error - this function accepts two series of data or directly one series with error. **Args:** * `x1` - first data series or error (1d array) **Kwargs:** * `x2` - second series (1d array) if first series was not error directly,\\ then this should be the second series **Returns:** * `e` - MSE of error (float) obtained directly from `x1`, \\ or as a difference of `x1` and `x2` """ e = get_valid_error(x1, x2) return np.dot(e, e) / float(len(e))
python
def MSE(x1, x2=-1): """ Mean squared error - this function accepts two series of data or directly one series with error. **Args:** * `x1` - first data series or error (1d array) **Kwargs:** * `x2` - second series (1d array) if first series was not error directly,\\ then this should be the second series **Returns:** * `e` - MSE of error (float) obtained directly from `x1`, \\ or as a difference of `x1` and `x2` """ e = get_valid_error(x1, x2) return np.dot(e, e) / float(len(e))
['def', 'MSE', '(', 'x1', ',', 'x2', '=', '-', '1', ')', ':', 'e', '=', 'get_valid_error', '(', 'x1', ',', 'x2', ')', 'return', 'np', '.', 'dot', '(', 'e', ',', 'e', ')', '/', 'float', '(', 'len', '(', 'e', ')', ')']
Mean squared error - this function accepts two series of data or directly one series with error. **Args:** * `x1` - first data series or error (1d array) **Kwargs:** * `x2` - second series (1d array) if first series was not error directly,\\ then this should be the second series **Returns:** * `e` - MSE of error (float) obtained directly from `x1`, \\ or as a difference of `x1` and `x2`
['Mean', 'squared', 'error', '-', 'this', 'function', 'accepts', 'two', 'series', 'of', 'data', 'or', 'directly', 'one', 'series', 'with', 'error', '.']
train
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/misc/error_evaluation.py#L175-L196
5,749
pysal/mapclassify
mapclassify/classifiers.py
bin1d
def bin1d(x, bins): """ Place values of a 1-d array into bins and determine counts of values in each bin Parameters ---------- x : array (n, 1), values to bin bins : array (k,1), upper bounds of each bin (monotonic) Returns ------- binIds : array 1-d array of integer bin Ids counts : int number of elements of x falling in each bin Examples -------- >>> import numpy as np >>> import mapclassify as mc >>> x = np.arange(100, dtype = 'float') >>> bins = [25, 74, 100] >>> binIds, counts = mc.classifiers.bin1d(x, bins) >>> binIds array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]) >>> counts array([26, 49, 25]) """ left = [-float("inf")] left.extend(bins[0:-1]) right = bins cuts = list(zip(left, right)) k = len(bins) binIds = np.zeros(x.shape, dtype='int') while cuts: k -= 1 l, r = cuts.pop(-1) binIds += (x > l) * (x <= r) * k counts = np.bincount(binIds, minlength=len(bins)) return (binIds, counts)
python
def bin1d(x, bins): """ Place values of a 1-d array into bins and determine counts of values in each bin Parameters ---------- x : array (n, 1), values to bin bins : array (k,1), upper bounds of each bin (monotonic) Returns ------- binIds : array 1-d array of integer bin Ids counts : int number of elements of x falling in each bin Examples -------- >>> import numpy as np >>> import mapclassify as mc >>> x = np.arange(100, dtype = 'float') >>> bins = [25, 74, 100] >>> binIds, counts = mc.classifiers.bin1d(x, bins) >>> binIds array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]) >>> counts array([26, 49, 25]) """ left = [-float("inf")] left.extend(bins[0:-1]) right = bins cuts = list(zip(left, right)) k = len(bins) binIds = np.zeros(x.shape, dtype='int') while cuts: k -= 1 l, r = cuts.pop(-1) binIds += (x > l) * (x <= r) * k counts = np.bincount(binIds, minlength=len(bins)) return (binIds, counts)
['def', 'bin1d', '(', 'x', ',', 'bins', ')', ':', 'left', '=', '[', '-', 'float', '(', '"inf"', ')', ']', 'left', '.', 'extend', '(', 'bins', '[', '0', ':', '-', '1', ']', ')', 'right', '=', 'bins', 'cuts', '=', 'list', '(', 'zip', '(', 'left', ',', 'right', ')', ')', 'k', '=', 'len', '(', 'bins', ')', 'binIds', '=', 'np', '.', 'zeros', '(', 'x', '.', 'shape', ',', 'dtype', '=', "'int'", ')', 'while', 'cuts', ':', 'k', '-=', '1', 'l', ',', 'r', '=', 'cuts', '.', 'pop', '(', '-', '1', ')', 'binIds', '+=', '(', 'x', '>', 'l', ')', '*', '(', 'x', '<=', 'r', ')', '*', 'k', 'counts', '=', 'np', '.', 'bincount', '(', 'binIds', ',', 'minlength', '=', 'len', '(', 'bins', ')', ')', 'return', '(', 'binIds', ',', 'counts', ')']
Place values of a 1-d array into bins and determine counts of values in each bin Parameters ---------- x : array (n, 1), values to bin bins : array (k,1), upper bounds of each bin (monotonic) Returns ------- binIds : array 1-d array of integer bin Ids counts : int number of elements of x falling in each bin Examples -------- >>> import numpy as np >>> import mapclassify as mc >>> x = np.arange(100, dtype = 'float') >>> bins = [25, 74, 100] >>> binIds, counts = mc.classifiers.bin1d(x, bins) >>> binIds array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]) >>> counts array([26, 49, 25])
['Place', 'values', 'of', 'a', '1', '-', 'd', 'array', 'into', 'bins', 'and', 'determine', 'counts', 'of', 'values', 'in', 'each', 'bin']
train
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L231-L278
5,750
subdownloader/subdownloader
subdownloader/languages/language.py
Language.from_xxx
def from_xxx(cls, xxx): """ Create a new Language instance from a LanguageID string :param xxx: LanguageID as string :return: Language instance with instance.xxx() == xxx if xxx is valid else instance of UnknownLanguage """ xxx = str(xxx).lower() if xxx is 'unknown': return UnknownLanguage(xxx) try: return cls._from_xyz('LanguageID', xxx) except NotALanguageException: log.warning('Unknown LanguageId: {}'.format(xxx)) return UnknownLanguage(xxx)
python
def from_xxx(cls, xxx): """ Create a new Language instance from a LanguageID string :param xxx: LanguageID as string :return: Language instance with instance.xxx() == xxx if xxx is valid else instance of UnknownLanguage """ xxx = str(xxx).lower() if xxx is 'unknown': return UnknownLanguage(xxx) try: return cls._from_xyz('LanguageID', xxx) except NotALanguageException: log.warning('Unknown LanguageId: {}'.format(xxx)) return UnknownLanguage(xxx)
['def', 'from_xxx', '(', 'cls', ',', 'xxx', ')', ':', 'xxx', '=', 'str', '(', 'xxx', ')', '.', 'lower', '(', ')', 'if', 'xxx', 'is', "'unknown'", ':', 'return', 'UnknownLanguage', '(', 'xxx', ')', 'try', ':', 'return', 'cls', '.', '_from_xyz', '(', "'LanguageID'", ',', 'xxx', ')', 'except', 'NotALanguageException', ':', 'log', '.', 'warning', '(', "'Unknown LanguageId: {}'", '.', 'format', '(', 'xxx', ')', ')', 'return', 'UnknownLanguage', '(', 'xxx', ')']
Create a new Language instance from a LanguageID string :param xxx: LanguageID as string :return: Language instance with instance.xxx() == xxx if xxx is valid else instance of UnknownLanguage
['Create', 'a', 'new', 'Language', 'instance', 'from', 'a', 'LanguageID', 'string', ':', 'param', 'xxx', ':', 'LanguageID', 'as', 'string', ':', 'return', ':', 'Language', 'instance', 'with', 'instance', '.', 'xxx', '()', '==', 'xxx', 'if', 'xxx', 'is', 'valid', 'else', 'instance', 'of', 'UnknownLanguage']
train
https://github.com/subdownloader/subdownloader/blob/bbccedd11b18d925ad4c062b5eb65981e24d0433/subdownloader/languages/language.py#L176-L189
5,751
robotpy/pyfrc
lib/pyfrc/sim/field/user_renderer.py
UserRenderer.draw_pathfinder_trajectory
def draw_pathfinder_trajectory( self, trajectory, color="#ff0000", offset=None, scale=(1, 1), show_dt=False, dt_offset=0.0, **kwargs ): """ Special helper function for drawing trajectories generated by robotpy-pathfinder :param trajectory: A list of pathfinder segment objects :param offset: If specified, should be x/y tuple to add to the path relative to the robot coordinates :param scale: Multiply all points by this (x,y) tuple :param show_dt: draw text every N seconds along path, or False :param dt_offset: add this to each dt shown :param kwargs: Keyword options to pass to tkinter.create_line """ # pathfinder x/y coordinates are switched pts = [(pt.x, -pt.y) for pt in trajectory] robot_coordinates = offset if offset else True self.draw_line( pts, color=color, robot_coordinates=robot_coordinates, relative_to_first=True, arrow=True, scale=scale, ) if show_dt: dt = trajectory[0].dt def _defer_text(): # defer this execution to save effort when drawing px_per_ft = UserRenderer._global_ui.field.px_per_ft line = self._elements[-1] for i in range(0, len(pts), int(show_dt / dt)): text = "t=%.2f" % (dt_offset + i * dt,) el = TextElement( text, line.pts[i], 0, "#000000", int(px_per_ft * 0.5) ) UserRenderer._global_ui.field.add_moving_element(el) self._elements.append(el) self._run(_defer_text)
python
def draw_pathfinder_trajectory( self, trajectory, color="#ff0000", offset=None, scale=(1, 1), show_dt=False, dt_offset=0.0, **kwargs ): """ Special helper function for drawing trajectories generated by robotpy-pathfinder :param trajectory: A list of pathfinder segment objects :param offset: If specified, should be x/y tuple to add to the path relative to the robot coordinates :param scale: Multiply all points by this (x,y) tuple :param show_dt: draw text every N seconds along path, or False :param dt_offset: add this to each dt shown :param kwargs: Keyword options to pass to tkinter.create_line """ # pathfinder x/y coordinates are switched pts = [(pt.x, -pt.y) for pt in trajectory] robot_coordinates = offset if offset else True self.draw_line( pts, color=color, robot_coordinates=robot_coordinates, relative_to_first=True, arrow=True, scale=scale, ) if show_dt: dt = trajectory[0].dt def _defer_text(): # defer this execution to save effort when drawing px_per_ft = UserRenderer._global_ui.field.px_per_ft line = self._elements[-1] for i in range(0, len(pts), int(show_dt / dt)): text = "t=%.2f" % (dt_offset + i * dt,) el = TextElement( text, line.pts[i], 0, "#000000", int(px_per_ft * 0.5) ) UserRenderer._global_ui.field.add_moving_element(el) self._elements.append(el) self._run(_defer_text)
['def', 'draw_pathfinder_trajectory', '(', 'self', ',', 'trajectory', ',', 'color', '=', '"#ff0000"', ',', 'offset', '=', 'None', ',', 'scale', '=', '(', '1', ',', '1', ')', ',', 'show_dt', '=', 'False', ',', 'dt_offset', '=', '0.0', ',', '*', '*', 'kwargs', ')', ':', '# pathfinder x/y coordinates are switched', 'pts', '=', '[', '(', 'pt', '.', 'x', ',', '-', 'pt', '.', 'y', ')', 'for', 'pt', 'in', 'trajectory', ']', 'robot_coordinates', '=', 'offset', 'if', 'offset', 'else', 'True', 'self', '.', 'draw_line', '(', 'pts', ',', 'color', '=', 'color', ',', 'robot_coordinates', '=', 'robot_coordinates', ',', 'relative_to_first', '=', 'True', ',', 'arrow', '=', 'True', ',', 'scale', '=', 'scale', ',', ')', 'if', 'show_dt', ':', 'dt', '=', 'trajectory', '[', '0', ']', '.', 'dt', 'def', '_defer_text', '(', ')', ':', '# defer this execution to save effort when drawing', 'px_per_ft', '=', 'UserRenderer', '.', '_global_ui', '.', 'field', '.', 'px_per_ft', 'line', '=', 'self', '.', '_elements', '[', '-', '1', ']', 'for', 'i', 'in', 'range', '(', '0', ',', 'len', '(', 'pts', ')', ',', 'int', '(', 'show_dt', '/', 'dt', ')', ')', ':', 'text', '=', '"t=%.2f"', '%', '(', 'dt_offset', '+', 'i', '*', 'dt', ',', ')', 'el', '=', 'TextElement', '(', 'text', ',', 'line', '.', 'pts', '[', 'i', ']', ',', '0', ',', '"#000000"', ',', 'int', '(', 'px_per_ft', '*', '0.5', ')', ')', 'UserRenderer', '.', '_global_ui', '.', 'field', '.', 'add_moving_element', '(', 'el', ')', 'self', '.', '_elements', '.', 'append', '(', 'el', ')', 'self', '.', '_run', '(', '_defer_text', ')']
Special helper function for drawing trajectories generated by robotpy-pathfinder :param trajectory: A list of pathfinder segment objects :param offset: If specified, should be x/y tuple to add to the path relative to the robot coordinates :param scale: Multiply all points by this (x,y) tuple :param show_dt: draw text every N seconds along path, or False :param dt_offset: add this to each dt shown :param kwargs: Keyword options to pass to tkinter.create_line
['Special', 'helper', 'function', 'for', 'drawing', 'trajectories', 'generated', 'by', 'robotpy', '-', 'pathfinder', ':', 'param', 'trajectory', ':', 'A', 'list', 'of', 'pathfinder', 'segment', 'objects', ':', 'param', 'offset', ':', 'If', 'specified', 'should', 'be', 'x', '/', 'y', 'tuple', 'to', 'add', 'to', 'the', 'path', 'relative', 'to', 'the', 'robot', 'coordinates', ':', 'param', 'scale', ':', 'Multiply', 'all', 'points', 'by', 'this', '(', 'x', 'y', ')', 'tuple', ':', 'param', 'show_dt', ':', 'draw', 'text', 'every', 'N', 'seconds', 'along', 'path', 'or', 'False', ':', 'param', 'dt_offset', ':', 'add', 'this', 'to', 'each', 'dt', 'shown', ':', 'param', 'kwargs', ':', 'Keyword', 'options', 'to', 'pass', 'to', 'tkinter', '.', 'create_line']
train
https://github.com/robotpy/pyfrc/blob/7672ea3f17c8d4b702a9f18a7372d95feee7e37d/lib/pyfrc/sim/field/user_renderer.py#L41-L91
5,752
wilson-eft/wilson
wilson/translate/wet.py
Fierz_to_Bern_chrom
def Fierz_to_Bern_chrom(C, dd, parameters): """From Fierz to chromomagnetic Bern basis for Class V. dd should be of the form 'sb', 'ds' etc.""" e = sqrt(4 * pi * parameters['alpha_e']) gs = sqrt(4 * pi * parameters['alpha_s']) if dd == 'sb' or dd == 'db': mq = parameters['m_b'] elif dd == 'ds': mq = parameters['m_s'] else: KeyError("Not sure what to do with quark mass for flavour {}".format(dd)) return { '7gamma' + dd : gs**2 / e / mq * C['F7gamma' + dd ], '8g' + dd : gs / mq * C['F8g' + dd ], '7pgamma' + dd : gs**2 / e /mq * C['F7pgamma' + dd], '8pg' + dd : gs / mq * C['F8pg' + dd] }
python
def Fierz_to_Bern_chrom(C, dd, parameters): """From Fierz to chromomagnetic Bern basis for Class V. dd should be of the form 'sb', 'ds' etc.""" e = sqrt(4 * pi * parameters['alpha_e']) gs = sqrt(4 * pi * parameters['alpha_s']) if dd == 'sb' or dd == 'db': mq = parameters['m_b'] elif dd == 'ds': mq = parameters['m_s'] else: KeyError("Not sure what to do with quark mass for flavour {}".format(dd)) return { '7gamma' + dd : gs**2 / e / mq * C['F7gamma' + dd ], '8g' + dd : gs / mq * C['F8g' + dd ], '7pgamma' + dd : gs**2 / e /mq * C['F7pgamma' + dd], '8pg' + dd : gs / mq * C['F8pg' + dd] }
['def', 'Fierz_to_Bern_chrom', '(', 'C', ',', 'dd', ',', 'parameters', ')', ':', 'e', '=', 'sqrt', '(', '4', '*', 'pi', '*', 'parameters', '[', "'alpha_e'", ']', ')', 'gs', '=', 'sqrt', '(', '4', '*', 'pi', '*', 'parameters', '[', "'alpha_s'", ']', ')', 'if', 'dd', '==', "'sb'", 'or', 'dd', '==', "'db'", ':', 'mq', '=', 'parameters', '[', "'m_b'", ']', 'elif', 'dd', '==', "'ds'", ':', 'mq', '=', 'parameters', '[', "'m_s'", ']', 'else', ':', 'KeyError', '(', '"Not sure what to do with quark mass for flavour {}"', '.', 'format', '(', 'dd', ')', ')', 'return', '{', "'7gamma'", '+', 'dd', ':', 'gs', '**', '2', '/', 'e', '/', 'mq', '*', 'C', '[', "'F7gamma'", '+', 'dd', ']', ',', "'8g'", '+', 'dd', ':', 'gs', '/', 'mq', '*', 'C', '[', "'F8g'", '+', 'dd', ']', ',', "'7pgamma'", '+', 'dd', ':', 'gs', '**', '2', '/', 'e', '/', 'mq', '*', 'C', '[', "'F7pgamma'", '+', 'dd', ']', ',', "'8pg'", '+', 'dd', ':', 'gs', '/', 'mq', '*', 'C', '[', "'F8pg'", '+', 'dd', ']', '}']
From Fierz to chromomagnetic Bern basis for Class V. dd should be of the form 'sb', 'ds' etc.
['From', 'Fierz', 'to', 'chromomagnetic', 'Bern', 'basis', 'for', 'Class', 'V', '.', 'dd', 'should', 'be', 'of', 'the', 'form', 'sb', 'ds', 'etc', '.']
train
https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/translate/wet.py#L1352-L1368
5,753
saltstack/salt
salt/modules/pecl.py
_pecl
def _pecl(command, defaults=False): ''' Execute the command passed with pecl ''' cmdline = 'pecl {0}'.format(command) if salt.utils.data.is_true(defaults): cmdline = 'yes ' "''" + ' | ' + cmdline ret = __salt__['cmd.run_all'](cmdline, python_shell=True) if ret['retcode'] == 0: return ret['stdout'] else: log.error('Problem running pecl. Is php-pear installed?') return ''
python
def _pecl(command, defaults=False): ''' Execute the command passed with pecl ''' cmdline = 'pecl {0}'.format(command) if salt.utils.data.is_true(defaults): cmdline = 'yes ' "''" + ' | ' + cmdline ret = __salt__['cmd.run_all'](cmdline, python_shell=True) if ret['retcode'] == 0: return ret['stdout'] else: log.error('Problem running pecl. Is php-pear installed?') return ''
['def', '_pecl', '(', 'command', ',', 'defaults', '=', 'False', ')', ':', 'cmdline', '=', "'pecl {0}'", '.', 'format', '(', 'command', ')', 'if', 'salt', '.', 'utils', '.', 'data', '.', 'is_true', '(', 'defaults', ')', ':', 'cmdline', '=', "'yes '", '"\'\'"', '+', "' | '", '+', 'cmdline', 'ret', '=', '__salt__', '[', "'cmd.run_all'", ']', '(', 'cmdline', ',', 'python_shell', '=', 'True', ')', 'if', 'ret', '[', "'retcode'", ']', '==', '0', ':', 'return', 'ret', '[', "'stdout'", ']', 'else', ':', 'log', '.', 'error', '(', "'Problem running pecl. Is php-pear installed?'", ')', 'return', "''"]
Execute the command passed with pecl
['Execute', 'the', 'command', 'passed', 'with', 'pecl']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pecl.py#L40-L54
5,754
saltant-org/saltant-py
saltant/models/container_task_type.py
ContainerTaskType.put
def put(self): """Updates this task type on the saltant server. Returns: :class:`saltant.models.container_task_type.ContainerTaskType`: A task type model instance representing the task type just updated. """ return self.manager.put( id=self.id, name=self.name, description=self.description, command_to_run=self.command_to_run, environment_variables=self.environment_variables, required_arguments=self.required_arguments, required_arguments_default_values=( self.required_arguments_default_values ), logs_path=self.logs_path, results_path=self.results_path, container_image=self.container_image, container_type=self.container_type, )
python
def put(self): """Updates this task type on the saltant server. Returns: :class:`saltant.models.container_task_type.ContainerTaskType`: A task type model instance representing the task type just updated. """ return self.manager.put( id=self.id, name=self.name, description=self.description, command_to_run=self.command_to_run, environment_variables=self.environment_variables, required_arguments=self.required_arguments, required_arguments_default_values=( self.required_arguments_default_values ), logs_path=self.logs_path, results_path=self.results_path, container_image=self.container_image, container_type=self.container_type, )
['def', 'put', '(', 'self', ')', ':', 'return', 'self', '.', 'manager', '.', 'put', '(', 'id', '=', 'self', '.', 'id', ',', 'name', '=', 'self', '.', 'name', ',', 'description', '=', 'self', '.', 'description', ',', 'command_to_run', '=', 'self', '.', 'command_to_run', ',', 'environment_variables', '=', 'self', '.', 'environment_variables', ',', 'required_arguments', '=', 'self', '.', 'required_arguments', ',', 'required_arguments_default_values', '=', '(', 'self', '.', 'required_arguments_default_values', ')', ',', 'logs_path', '=', 'self', '.', 'logs_path', ',', 'results_path', '=', 'self', '.', 'results_path', ',', 'container_image', '=', 'self', '.', 'container_image', ',', 'container_type', '=', 'self', '.', 'container_type', ',', ')']
Updates this task type on the saltant server. Returns: :class:`saltant.models.container_task_type.ContainerTaskType`: A task type model instance representing the task type just updated.
['Updates', 'this', 'task', 'type', 'on', 'the', 'saltant', 'server', '.']
train
https://github.com/saltant-org/saltant-py/blob/bf3bdbc4ec9c772c7f621f8bd6a76c5932af68be/saltant/models/container_task_type.py#L101-L123
5,755
signalfx/signalfx-python
signalfx/rest.py
SignalFxRestClient.get_dashboard
def get_dashboard(self, id, **kwargs): """"Retrieve a (v2) dashboard by id. """ resp = self._get_object_by_name(self._DASHBOARD_ENDPOINT_SUFFIX, id, **kwargs) return resp
python
def get_dashboard(self, id, **kwargs): """"Retrieve a (v2) dashboard by id. """ resp = self._get_object_by_name(self._DASHBOARD_ENDPOINT_SUFFIX, id, **kwargs) return resp
['def', 'get_dashboard', '(', 'self', ',', 'id', ',', '*', '*', 'kwargs', ')', ':', 'resp', '=', 'self', '.', '_get_object_by_name', '(', 'self', '.', '_DASHBOARD_ENDPOINT_SUFFIX', ',', 'id', ',', '*', '*', 'kwargs', ')', 'return', 'resp']
Retrieve a (v2) dashboard by id.
['Retrieve', 'a', '(', 'v2', ')', 'dashboard', 'by', 'id', '.']
train
https://github.com/signalfx/signalfx-python/blob/650eb9a2b301bcc795e4e3a8c031574ade69849d/signalfx/rest.py#L359-L364
5,756
saltstack/salt
salt/modules/rest_service.py
status
def status(name, sig=None): ''' Return the status for a service via rest_sample. If the name contains globbing, a dict mapping service name to True/False values is returned. .. versionadded:: 2015.8.0 .. versionchanged:: 2018.3.0 The service name can now be a glob (e.g. ``salt*``) Args: name (str): The name of the service to check sig (str): Not implemented Returns: bool: True if running, False otherwise dict: Maps service name to True if running, False otherwise CLI Example: .. code-block:: bash salt '*' service.status <service name> ''' proxy_fn = 'rest_sample.service_status' contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name)) if contains_globbing: services = fnmatch.filter(get_all(), name) else: services = [name] results = {} for service in services: resp = __proxy__[proxy_fn](service) if resp['comment'] == 'running': results[service] = True else: results[service] = False if contains_globbing: return results return results[name]
python
def status(name, sig=None): ''' Return the status for a service via rest_sample. If the name contains globbing, a dict mapping service name to True/False values is returned. .. versionadded:: 2015.8.0 .. versionchanged:: 2018.3.0 The service name can now be a glob (e.g. ``salt*``) Args: name (str): The name of the service to check sig (str): Not implemented Returns: bool: True if running, False otherwise dict: Maps service name to True if running, False otherwise CLI Example: .. code-block:: bash salt '*' service.status <service name> ''' proxy_fn = 'rest_sample.service_status' contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name)) if contains_globbing: services = fnmatch.filter(get_all(), name) else: services = [name] results = {} for service in services: resp = __proxy__[proxy_fn](service) if resp['comment'] == 'running': results[service] = True else: results[service] = False if contains_globbing: return results return results[name]
['def', 'status', '(', 'name', ',', 'sig', '=', 'None', ')', ':', 'proxy_fn', '=', "'rest_sample.service_status'", 'contains_globbing', '=', 'bool', '(', 're', '.', 'search', '(', "r'\\*|\\?|\\[.+\\]'", ',', 'name', ')', ')', 'if', 'contains_globbing', ':', 'services', '=', 'fnmatch', '.', 'filter', '(', 'get_all', '(', ')', ',', 'name', ')', 'else', ':', 'services', '=', '[', 'name', ']', 'results', '=', '{', '}', 'for', 'service', 'in', 'services', ':', 'resp', '=', '__proxy__', '[', 'proxy_fn', ']', '(', 'service', ')', 'if', 'resp', '[', "'comment'", ']', '==', "'running'", ':', 'results', '[', 'service', ']', '=', 'True', 'else', ':', 'results', '[', 'service', ']', '=', 'False', 'if', 'contains_globbing', ':', 'return', 'results', 'return', 'results', '[', 'name', ']']
Return the status for a service via rest_sample. If the name contains globbing, a dict mapping service name to True/False values is returned. .. versionadded:: 2015.8.0 .. versionchanged:: 2018.3.0 The service name can now be a glob (e.g. ``salt*``) Args: name (str): The name of the service to check sig (str): Not implemented Returns: bool: True if running, False otherwise dict: Maps service name to True if running, False otherwise CLI Example: .. code-block:: bash salt '*' service.status <service name>
['Return', 'the', 'status', 'for', 'a', 'service', 'via', 'rest_sample', '.', 'If', 'the', 'name', 'contains', 'globbing', 'a', 'dict', 'mapping', 'service', 'name', 'to', 'True', '/', 'False', 'values', 'is', 'returned', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rest_service.py#L128-L169
5,757
spookey/photon
photon/util/structures.py
dict_merge
def dict_merge(o, v): ''' Recursively climbs through dictionaries and merges them together. :param o: The first dictionary :param v: The second dictionary :returns: A dictionary (who would have guessed?) .. note:: Make sure `o` & `v` are indeed dictionaries, bad things will happen otherwise! ''' if not isinstance(v, dict): return v res = _deepcopy(o) for key in v.keys(): if res.get(key) and isinstance(res[key], dict): res[key] = dict_merge(res[key], v[key]) else: res[key] = _deepcopy(v[key]) return res
python
def dict_merge(o, v): ''' Recursively climbs through dictionaries and merges them together. :param o: The first dictionary :param v: The second dictionary :returns: A dictionary (who would have guessed?) .. note:: Make sure `o` & `v` are indeed dictionaries, bad things will happen otherwise! ''' if not isinstance(v, dict): return v res = _deepcopy(o) for key in v.keys(): if res.get(key) and isinstance(res[key], dict): res[key] = dict_merge(res[key], v[key]) else: res[key] = _deepcopy(v[key]) return res
['def', 'dict_merge', '(', 'o', ',', 'v', ')', ':', 'if', 'not', 'isinstance', '(', 'v', ',', 'dict', ')', ':', 'return', 'v', 'res', '=', '_deepcopy', '(', 'o', ')', 'for', 'key', 'in', 'v', '.', 'keys', '(', ')', ':', 'if', 'res', '.', 'get', '(', 'key', ')', 'and', 'isinstance', '(', 'res', '[', 'key', ']', ',', 'dict', ')', ':', 'res', '[', 'key', ']', '=', 'dict_merge', '(', 'res', '[', 'key', ']', ',', 'v', '[', 'key', ']', ')', 'else', ':', 'res', '[', 'key', ']', '=', '_deepcopy', '(', 'v', '[', 'key', ']', ')', 'return', 'res']
Recursively climbs through dictionaries and merges them together. :param o: The first dictionary :param v: The second dictionary :returns: A dictionary (who would have guessed?) .. note:: Make sure `o` & `v` are indeed dictionaries, bad things will happen otherwise!
['Recursively', 'climbs', 'through', 'dictionaries', 'and', 'merges', 'them', 'together', '.']
train
https://github.com/spookey/photon/blob/57212a26ce713ab7723910ee49e3d0ba1697799f/photon/util/structures.py#L66-L90
5,758
savvastj/nbashots
nbashots/api.py
get_team_id
def get_team_id(team_name): """ Returns the team ID associated with the team name that is passed in. Parameters ---------- team_name : str The team name whose ID we want. NOTE: Only pass in the team name (e.g. "Lakers"), not the city, or city and team name, or the team abbreviation. Returns ------- team_id : int The team ID associated with the team name. """ df = get_all_team_ids() df = df[df.TEAM_NAME == team_name] if len(df) == 0: er = "Invalid team name or there is no team with that name." raise ValueError(er) team_id = df.TEAM_ID.iloc[0] return team_id
python
def get_team_id(team_name): """ Returns the team ID associated with the team name that is passed in. Parameters ---------- team_name : str The team name whose ID we want. NOTE: Only pass in the team name (e.g. "Lakers"), not the city, or city and team name, or the team abbreviation. Returns ------- team_id : int The team ID associated with the team name. """ df = get_all_team_ids() df = df[df.TEAM_NAME == team_name] if len(df) == 0: er = "Invalid team name or there is no team with that name." raise ValueError(er) team_id = df.TEAM_ID.iloc[0] return team_id
['def', 'get_team_id', '(', 'team_name', ')', ':', 'df', '=', 'get_all_team_ids', '(', ')', 'df', '=', 'df', '[', 'df', '.', 'TEAM_NAME', '==', 'team_name', ']', 'if', 'len', '(', 'df', ')', '==', '0', ':', 'er', '=', '"Invalid team name or there is no team with that name."', 'raise', 'ValueError', '(', 'er', ')', 'team_id', '=', 'df', '.', 'TEAM_ID', '.', 'iloc', '[', '0', ']', 'return', 'team_id']
Returns the team ID associated with the team name that is passed in. Parameters ---------- team_name : str The team name whose ID we want. NOTE: Only pass in the team name (e.g. "Lakers"), not the city, or city and team name, or the team abbreviation. Returns ------- team_id : int The team ID associated with the team name.
['Returns', 'the', 'team', 'ID', 'associated', 'with', 'the', 'team', 'name', 'that', 'is', 'passed', 'in', '.']
train
https://github.com/savvastj/nbashots/blob/76ece28d717f10b25eb0fc681b317df6ef6b5157/nbashots/api.py#L361-L383
5,759
log2timeline/dfvfs
dfvfs/resolver_helpers/manager.py
ResolverHelperManager.RegisterHelper
def RegisterHelper(cls, resolver_helper): """Registers a path specification resolver helper. Args: resolver_helper (ResolverHelper): resolver helper. Raises: KeyError: if resolver helper object is already set for the corresponding type indicator. """ if resolver_helper.type_indicator in cls._resolver_helpers: raise KeyError(( 'Resolver helper object already set for type indicator: ' '{0!s}.').format(resolver_helper.type_indicator)) cls._resolver_helpers[resolver_helper.type_indicator] = resolver_helper
python
def RegisterHelper(cls, resolver_helper): """Registers a path specification resolver helper. Args: resolver_helper (ResolverHelper): resolver helper. Raises: KeyError: if resolver helper object is already set for the corresponding type indicator. """ if resolver_helper.type_indicator in cls._resolver_helpers: raise KeyError(( 'Resolver helper object already set for type indicator: ' '{0!s}.').format(resolver_helper.type_indicator)) cls._resolver_helpers[resolver_helper.type_indicator] = resolver_helper
['def', 'RegisterHelper', '(', 'cls', ',', 'resolver_helper', ')', ':', 'if', 'resolver_helper', '.', 'type_indicator', 'in', 'cls', '.', '_resolver_helpers', ':', 'raise', 'KeyError', '(', '(', "'Resolver helper object already set for type indicator: '", "'{0!s}.'", ')', '.', 'format', '(', 'resolver_helper', '.', 'type_indicator', ')', ')', 'cls', '.', '_resolver_helpers', '[', 'resolver_helper', '.', 'type_indicator', ']', '=', 'resolver_helper']
Registers a path specification resolver helper. Args: resolver_helper (ResolverHelper): resolver helper. Raises: KeyError: if resolver helper object is already set for the corresponding type indicator.
['Registers', 'a', 'path', 'specification', 'resolver', 'helper', '.']
train
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/resolver_helpers/manager.py#L52-L67
5,760
saltstack/salt
salt/modules/redismod.py
sentinel_get_master_ip
def sentinel_get_master_ip(master, host=None, port=None, password=None): ''' Get ip for sentinel master .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.sentinel_get_master_ip 'mymaster' ''' server = _sconnect(host, port, password) ret = server.sentinel_get_master_addr_by_name(master) return dict(list(zip(('master_host', 'master_port'), ret)))
python
def sentinel_get_master_ip(master, host=None, port=None, password=None): ''' Get ip for sentinel master .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.sentinel_get_master_ip 'mymaster' ''' server = _sconnect(host, port, password) ret = server.sentinel_get_master_addr_by_name(master) return dict(list(zip(('master_host', 'master_port'), ret)))
['def', 'sentinel_get_master_ip', '(', 'master', ',', 'host', '=', 'None', ',', 'port', '=', 'None', ',', 'password', '=', 'None', ')', ':', 'server', '=', '_sconnect', '(', 'host', ',', 'port', ',', 'password', ')', 'ret', '=', 'server', '.', 'sentinel_get_master_addr_by_name', '(', 'master', ')', 'return', 'dict', '(', 'list', '(', 'zip', '(', '(', "'master_host'", ',', "'master_port'", ')', ',', 'ret', ')', ')', ')']
Get ip for sentinel master .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.sentinel_get_master_ip 'mymaster'
['Get', 'ip', 'for', 'sentinel', 'master']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/redismod.py#L705-L719
5,761
Kozea/cairocffi
cairocffi/fonts.py
FontOptions.set_antialias
def set_antialias(self, antialias): """Changes the :ref:`ANTIALIAS` for the font options object. This specifies the type of antialiasing to do when rendering text. """ cairo.cairo_font_options_set_antialias(self._pointer, antialias) self._check_status()
python
def set_antialias(self, antialias): """Changes the :ref:`ANTIALIAS` for the font options object. This specifies the type of antialiasing to do when rendering text. """ cairo.cairo_font_options_set_antialias(self._pointer, antialias) self._check_status()
['def', 'set_antialias', '(', 'self', ',', 'antialias', ')', ':', 'cairo', '.', 'cairo_font_options_set_antialias', '(', 'self', '.', '_pointer', ',', 'antialias', ')', 'self', '.', '_check_status', '(', ')']
Changes the :ref:`ANTIALIAS` for the font options object. This specifies the type of antialiasing to do when rendering text.
['Changes', 'the', ':', 'ref', ':', 'ANTIALIAS', 'for', 'the', 'font', 'options', 'object', '.', 'This', 'specifies', 'the', 'type', 'of', 'antialiasing', 'to', 'do', 'when', 'rendering', 'text', '.']
train
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/fonts.py#L434-L440
5,762
splunk/splunk-sdk-python
examples/analytics/bottle.py
_reloader_observer
def _reloader_observer(server, app, interval): ''' Start a child process with identical commandline arguments and restart it as long as it exists with status code 3. Also create a lockfile and touch it (update mtime) every interval seconds. ''' fd, lockfile = tempfile.mkstemp(prefix='bottle-reloader.', suffix='.lock') os.close(fd) # We only need this file to exist. We never write to it try: while os.path.exists(lockfile): args = [sys.executable] + sys.argv environ = os.environ.copy() environ['BOTTLE_CHILD'] = 'true' environ['BOTTLE_LOCKFILE'] = lockfile p = subprocess.Popen(args, env=environ) while p.poll() is None: # Busy wait... os.utime(lockfile, None) # I am alive! time.sleep(interval) if p.poll() != 3: if os.path.exists(lockfile): os.unlink(lockfile) sys.exit(p.poll()) elif not server.quiet: print("Reloading server...") except KeyboardInterrupt: pass if os.path.exists(lockfile): os.unlink(lockfile)
python
def _reloader_observer(server, app, interval): ''' Start a child process with identical commandline arguments and restart it as long as it exists with status code 3. Also create a lockfile and touch it (update mtime) every interval seconds. ''' fd, lockfile = tempfile.mkstemp(prefix='bottle-reloader.', suffix='.lock') os.close(fd) # We only need this file to exist. We never write to it try: while os.path.exists(lockfile): args = [sys.executable] + sys.argv environ = os.environ.copy() environ['BOTTLE_CHILD'] = 'true' environ['BOTTLE_LOCKFILE'] = lockfile p = subprocess.Popen(args, env=environ) while p.poll() is None: # Busy wait... os.utime(lockfile, None) # I am alive! time.sleep(interval) if p.poll() != 3: if os.path.exists(lockfile): os.unlink(lockfile) sys.exit(p.poll()) elif not server.quiet: print("Reloading server...") except KeyboardInterrupt: pass if os.path.exists(lockfile): os.unlink(lockfile)
['def', '_reloader_observer', '(', 'server', ',', 'app', ',', 'interval', ')', ':', 'fd', ',', 'lockfile', '=', 'tempfile', '.', 'mkstemp', '(', 'prefix', '=', "'bottle-reloader.'", ',', 'suffix', '=', "'.lock'", ')', 'os', '.', 'close', '(', 'fd', ')', '# We only need this file to exist. We never write to it', 'try', ':', 'while', 'os', '.', 'path', '.', 'exists', '(', 'lockfile', ')', ':', 'args', '=', '[', 'sys', '.', 'executable', ']', '+', 'sys', '.', 'argv', 'environ', '=', 'os', '.', 'environ', '.', 'copy', '(', ')', 'environ', '[', "'BOTTLE_CHILD'", ']', '=', "'true'", 'environ', '[', "'BOTTLE_LOCKFILE'", ']', '=', 'lockfile', 'p', '=', 'subprocess', '.', 'Popen', '(', 'args', ',', 'env', '=', 'environ', ')', 'while', 'p', '.', 'poll', '(', ')', 'is', 'None', ':', '# Busy wait...', 'os', '.', 'utime', '(', 'lockfile', ',', 'None', ')', '# I am alive!', 'time', '.', 'sleep', '(', 'interval', ')', 'if', 'p', '.', 'poll', '(', ')', '!=', '3', ':', 'if', 'os', '.', 'path', '.', 'exists', '(', 'lockfile', ')', ':', 'os', '.', 'unlink', '(', 'lockfile', ')', 'sys', '.', 'exit', '(', 'p', '.', 'poll', '(', ')', ')', 'elif', 'not', 'server', '.', 'quiet', ':', 'print', '(', '"Reloading server..."', ')', 'except', 'KeyboardInterrupt', ':', 'pass', 'if', 'os', '.', 'path', '.', 'exists', '(', 'lockfile', ')', ':', 'os', '.', 'unlink', '(', 'lockfile', ')']
Start a child process with identical commandline arguments and restart it as long as it exists with status code 3. Also create a lockfile and touch it (update mtime) every interval seconds.
['Start', 'a', 'child', 'process', 'with', 'identical', 'commandline', 'arguments', 'and', 'restart', 'it', 'as', 'long', 'as', 'it', 'exists', 'with', 'status', 'code', '3', '.', 'Also', 'create', 'a', 'lockfile', 'and', 'touch', 'it', '(', 'update', 'mtime', ')', 'every', 'interval', 'seconds', '.']
train
https://github.com/splunk/splunk-sdk-python/blob/a245a4eeb93b3621730418008e31715912bcdcd8/examples/analytics/bottle.py#L2050-L2074
5,763
aiogram/aiogram
aiogram/bot/api.py
check_token
def check_token(token: str) -> bool: """ Validate BOT token :param token: :return: """ if any(x.isspace() for x in token): raise exceptions.ValidationError('Token is invalid!') left, sep, right = token.partition(':') if (not sep) or (not left.isdigit()) or (len(left) < 3): raise exceptions.ValidationError('Token is invalid!') return True
python
def check_token(token: str) -> bool: """ Validate BOT token :param token: :return: """ if any(x.isspace() for x in token): raise exceptions.ValidationError('Token is invalid!') left, sep, right = token.partition(':') if (not sep) or (not left.isdigit()) or (len(left) < 3): raise exceptions.ValidationError('Token is invalid!') return True
['def', 'check_token', '(', 'token', ':', 'str', ')', '->', 'bool', ':', 'if', 'any', '(', 'x', '.', 'isspace', '(', ')', 'for', 'x', 'in', 'token', ')', ':', 'raise', 'exceptions', '.', 'ValidationError', '(', "'Token is invalid!'", ')', 'left', ',', 'sep', ',', 'right', '=', 'token', '.', 'partition', '(', "':'", ')', 'if', '(', 'not', 'sep', ')', 'or', '(', 'not', 'left', '.', 'isdigit', '(', ')', ')', 'or', '(', 'len', '(', 'left', ')', '<', '3', ')', ':', 'raise', 'exceptions', '.', 'ValidationError', '(', "'Token is invalid!'", ')', 'return', 'True']
Validate BOT token :param token: :return:
['Validate', 'BOT', 'token']
train
https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/bot/api.py#L20-L34
5,764
tcalmant/ipopo
pelix/rsa/__init__.py
get_string_plus_property
def get_string_plus_property(name, props, default=None): # type: (str, Dict[str, Any], Optional[Any]) -> Any """ Returns the value of the given property or the default value :param name: A property name :param props: A dictionary of properties :param default: Value to return if the property doesn't exist :return: The property value or the default one """ val = get_string_plus_property_value(get_prop_value(name, props, default)) return default if val is None else val
python
def get_string_plus_property(name, props, default=None): # type: (str, Dict[str, Any], Optional[Any]) -> Any """ Returns the value of the given property or the default value :param name: A property name :param props: A dictionary of properties :param default: Value to return if the property doesn't exist :return: The property value or the default one """ val = get_string_plus_property_value(get_prop_value(name, props, default)) return default if val is None else val
['def', 'get_string_plus_property', '(', 'name', ',', 'props', ',', 'default', '=', 'None', ')', ':', '# type: (str, Dict[str, Any], Optional[Any]) -> Any', 'val', '=', 'get_string_plus_property_value', '(', 'get_prop_value', '(', 'name', ',', 'props', ',', 'default', ')', ')', 'return', 'default', 'if', 'val', 'is', 'None', 'else', 'val']
Returns the value of the given property or the default value :param name: A property name :param props: A dictionary of properties :param default: Value to return if the property doesn't exist :return: The property value or the default one
['Returns', 'the', 'value', 'of', 'the', 'given', 'property', 'or', 'the', 'default', 'value']
train
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/rsa/__init__.py#L1211-L1222
5,765
tensorflow/tensorboard
tensorboard/plugins/beholder/visualizer.py
Visualizer._sections_to_variance_sections
def _sections_to_variance_sections(self, sections_over_time): '''Computes the variance of corresponding sections over time. Returns: a list of np arrays. ''' variance_sections = [] for i in range(len(sections_over_time[0])): time_sections = [sections[i] for sections in sections_over_time] variance = np.var(time_sections, axis=0) variance_sections.append(variance) return variance_sections
python
def _sections_to_variance_sections(self, sections_over_time): '''Computes the variance of corresponding sections over time. Returns: a list of np arrays. ''' variance_sections = [] for i in range(len(sections_over_time[0])): time_sections = [sections[i] for sections in sections_over_time] variance = np.var(time_sections, axis=0) variance_sections.append(variance) return variance_sections
['def', '_sections_to_variance_sections', '(', 'self', ',', 'sections_over_time', ')', ':', 'variance_sections', '=', '[', ']', 'for', 'i', 'in', 'range', '(', 'len', '(', 'sections_over_time', '[', '0', ']', ')', ')', ':', 'time_sections', '=', '[', 'sections', '[', 'i', ']', 'for', 'sections', 'in', 'sections_over_time', ']', 'variance', '=', 'np', '.', 'var', '(', 'time_sections', ',', 'axis', '=', '0', ')', 'variance_sections', '.', 'append', '(', 'variance', ')', 'return', 'variance_sections']
Computes the variance of corresponding sections over time. Returns: a list of np arrays.
['Computes', 'the', 'variance', 'of', 'corresponding', 'sections', 'over', 'time', '.']
train
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/beholder/visualizer.py#L225-L238
5,766
dmlc/gluon-nlp
scripts/bert/embedding.py
BertEmbedding.data_loader
def data_loader(self, sentences, shuffle=False): """Load, tokenize and prepare the input sentences.""" dataset = BertEmbeddingDataset(sentences, self.transform) return DataLoader(dataset=dataset, batch_size=self.batch_size, shuffle=shuffle)
python
def data_loader(self, sentences, shuffle=False): """Load, tokenize and prepare the input sentences.""" dataset = BertEmbeddingDataset(sentences, self.transform) return DataLoader(dataset=dataset, batch_size=self.batch_size, shuffle=shuffle)
['def', 'data_loader', '(', 'self', ',', 'sentences', ',', 'shuffle', '=', 'False', ')', ':', 'dataset', '=', 'BertEmbeddingDataset', '(', 'sentences', ',', 'self', '.', 'transform', ')', 'return', 'DataLoader', '(', 'dataset', '=', 'dataset', ',', 'batch_size', '=', 'self', '.', 'batch_size', ',', 'shuffle', '=', 'shuffle', ')']
Load, tokenize and prepare the input sentences.
['Load', 'tokenize', 'and', 'prepare', 'the', 'input', 'sentences', '.']
train
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/bert/embedding.py#L141-L144
5,767
farshidce/touchworks-python
touchworks/api/http.py
TouchWorks.get_providers
def get_providers(self, security_filter, name_filter='%', only_providers_flag='Y', internal_external='I', ordering_authority='', real_provider='N'): """ invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action :param security_filter - This is the EntryCode of the Security_Code_DE dictionary for the providers being sought. A list of valid security codes can be obtained from GetDictionary on the Security_Code_DE dictionary. :param name_filter :param only_providers_flag :param internal_external :param ordering_authority :param real_provider :return: JSON response """ magic = self._magic_json( action=TouchWorksMagicConstants.ACTION_GET_PROVIDERS, parameter1=security_filter, parameter2=name_filter, parameter3=only_providers_flag, parameter4=internal_external, parameter5=ordering_authority, parameter6=real_provider) response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic) result = self._get_results_or_raise_if_magic_invalid( magic, response, TouchWorksMagicConstants.RESULT_GET_PROVIDERS) return result
python
def get_providers(self, security_filter, name_filter='%', only_providers_flag='Y', internal_external='I', ordering_authority='', real_provider='N'): """ invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action :param security_filter - This is the EntryCode of the Security_Code_DE dictionary for the providers being sought. A list of valid security codes can be obtained from GetDictionary on the Security_Code_DE dictionary. :param name_filter :param only_providers_flag :param internal_external :param ordering_authority :param real_provider :return: JSON response """ magic = self._magic_json( action=TouchWorksMagicConstants.ACTION_GET_PROVIDERS, parameter1=security_filter, parameter2=name_filter, parameter3=only_providers_flag, parameter4=internal_external, parameter5=ordering_authority, parameter6=real_provider) response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic) result = self._get_results_or_raise_if_magic_invalid( magic, response, TouchWorksMagicConstants.RESULT_GET_PROVIDERS) return result
['def', 'get_providers', '(', 'self', ',', 'security_filter', ',', 'name_filter', '=', "'%'", ',', 'only_providers_flag', '=', "'Y'", ',', 'internal_external', '=', "'I'", ',', 'ordering_authority', '=', "''", ',', 'real_provider', '=', "'N'", ')', ':', 'magic', '=', 'self', '.', '_magic_json', '(', 'action', '=', 'TouchWorksMagicConstants', '.', 'ACTION_GET_PROVIDERS', ',', 'parameter1', '=', 'security_filter', ',', 'parameter2', '=', 'name_filter', ',', 'parameter3', '=', 'only_providers_flag', ',', 'parameter4', '=', 'internal_external', ',', 'parameter5', '=', 'ordering_authority', ',', 'parameter6', '=', 'real_provider', ')', 'response', '=', 'self', '.', '_http_request', '(', 'TouchWorksEndPoints', '.', 'MAGIC_JSON', ',', 'data', '=', 'magic', ')', 'result', '=', 'self', '.', '_get_results_or_raise_if_magic_invalid', '(', 'magic', ',', 'response', ',', 'TouchWorksMagicConstants', '.', 'RESULT_GET_PROVIDERS', ')', 'return', 'result']
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action :param security_filter - This is the EntryCode of the Security_Code_DE dictionary for the providers being sought. A list of valid security codes can be obtained from GetDictionary on the Security_Code_DE dictionary. :param name_filter :param only_providers_flag :param internal_external :param ordering_authority :param real_provider :return: JSON response
['invokes', 'TouchWorksMagicConstants', '.', 'ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT', 'action', ':', 'param', 'security_filter', '-', 'This', 'is', 'the', 'EntryCode', 'of', 'the', 'Security_Code_DE', 'dictionary', 'for', 'the', 'providers', 'being', 'sought', '.', 'A', 'list', 'of', 'valid', 'security', 'codes', 'can', 'be', 'obtained', 'from', 'GetDictionary', 'on', 'the', 'Security_Code_DE', 'dictionary', '.', ':', 'param', 'name_filter', ':', 'param', 'only_providers_flag', ':', 'param', 'internal_external', ':', 'param', 'ordering_authority', ':', 'param', 'real_provider', ':', 'return', ':', 'JSON', 'response']
train
https://github.com/farshidce/touchworks-python/blob/ea8f93a0f4273de1317a318e945a571f5038ba62/touchworks/api/http.py#L593-L625
5,768
FNNDSC/pfurl
pfurl/pfurl.py
Pfurl.pullPath_copy
def pullPath_copy(self, d_msg, **kwargs): """ Handle the "copy" pull operation """ # Parse "header" information d_meta = d_msg['meta'] d_local = d_meta['local'] str_localPath = d_local['path'] d_remote = d_meta['remote'] d_transport = d_meta['transport'] d_copy = d_transport['copy'] # Pull the actual data into a dictionary holder d_curl = {} d_curl['remoteServer'] = self.pullPath_core() d_curl['copy'] = {} d_curl['copy']['status'] = d_curl['remoteServer']['status'] if not d_curl['copy']['status']: d_curl['copy']['msg'] = "Copy on remote server failed!" else: d_curl['copy']['msg'] = "Copy on remote server success!" return d_curl
python
def pullPath_copy(self, d_msg, **kwargs): """ Handle the "copy" pull operation """ # Parse "header" information d_meta = d_msg['meta'] d_local = d_meta['local'] str_localPath = d_local['path'] d_remote = d_meta['remote'] d_transport = d_meta['transport'] d_copy = d_transport['copy'] # Pull the actual data into a dictionary holder d_curl = {} d_curl['remoteServer'] = self.pullPath_core() d_curl['copy'] = {} d_curl['copy']['status'] = d_curl['remoteServer']['status'] if not d_curl['copy']['status']: d_curl['copy']['msg'] = "Copy on remote server failed!" else: d_curl['copy']['msg'] = "Copy on remote server success!" return d_curl
['def', 'pullPath_copy', '(', 'self', ',', 'd_msg', ',', '*', '*', 'kwargs', ')', ':', '# Parse "header" information', 'd_meta', '=', 'd_msg', '[', "'meta'", ']', 'd_local', '=', 'd_meta', '[', "'local'", ']', 'str_localPath', '=', 'd_local', '[', "'path'", ']', 'd_remote', '=', 'd_meta', '[', "'remote'", ']', 'd_transport', '=', 'd_meta', '[', "'transport'", ']', 'd_copy', '=', 'd_transport', '[', "'copy'", ']', '# Pull the actual data into a dictionary holder', 'd_curl', '=', '{', '}', 'd_curl', '[', "'remoteServer'", ']', '=', 'self', '.', 'pullPath_core', '(', ')', 'd_curl', '[', "'copy'", ']', '=', '{', '}', 'd_curl', '[', "'copy'", ']', '[', "'status'", ']', '=', 'd_curl', '[', "'remoteServer'", ']', '[', "'status'", ']', 'if', 'not', 'd_curl', '[', "'copy'", ']', '[', "'status'", ']', ':', 'd_curl', '[', "'copy'", ']', '[', "'msg'", ']', '=', '"Copy on remote server failed!"', 'else', ':', 'd_curl', '[', "'copy'", ']', '[', "'msg'", ']', '=', '"Copy on remote server success!"', 'return', 'd_curl']
Handle the "copy" pull operation
['Handle', 'the', 'copy', 'pull', 'operation']
train
https://github.com/FNNDSC/pfurl/blob/572f634ab582b7b7b7a3fbfd5bf12aadc1ba7958/pfurl/pfurl.py#L776-L799
5,769
aloetesting/aloe_django
aloe_django/steps/mail.py
mail_sent_count
def mail_sent_count(self, count): """ Test that `count` mails have been sent. Syntax: I have sent `count` emails Example: .. code-block:: gherkin Then I have sent 2 emails """ expected = int(count) actual = len(mail.outbox) assert expected == actual, \ "Expected to send {0} email(s), got {1}.".format(expected, actual)
python
def mail_sent_count(self, count): """ Test that `count` mails have been sent. Syntax: I have sent `count` emails Example: .. code-block:: gherkin Then I have sent 2 emails """ expected = int(count) actual = len(mail.outbox) assert expected == actual, \ "Expected to send {0} email(s), got {1}.".format(expected, actual)
['def', 'mail_sent_count', '(', 'self', ',', 'count', ')', ':', 'expected', '=', 'int', '(', 'count', ')', 'actual', '=', 'len', '(', 'mail', '.', 'outbox', ')', 'assert', 'expected', '==', 'actual', ',', '"Expected to send {0} email(s), got {1}."', '.', 'format', '(', 'expected', ',', 'actual', ')']
Test that `count` mails have been sent. Syntax: I have sent `count` emails Example: .. code-block:: gherkin Then I have sent 2 emails
['Test', 'that', 'count', 'mails', 'have', 'been', 'sent', '.']
train
https://github.com/aloetesting/aloe_django/blob/672eac97c97644bfe334e70696a6dc5ddf4ced02/aloe_django/steps/mail.py#L25-L42
5,770
softlayer/softlayer-python
SoftLayer/CLI/order/item_list.py
sort_items
def sort_items(items): """sorts the items into a dictionary of categories, with a list of items""" sorted_items = {} for item in items: category = lookup(item, 'itemCategory', 'categoryCode') if sorted_items.get(category) is None: sorted_items[category] = [] sorted_items[category].append(item) return sorted_items
python
def sort_items(items): """sorts the items into a dictionary of categories, with a list of items""" sorted_items = {} for item in items: category = lookup(item, 'itemCategory', 'categoryCode') if sorted_items.get(category) is None: sorted_items[category] = [] sorted_items[category].append(item) return sorted_items
['def', 'sort_items', '(', 'items', ')', ':', 'sorted_items', '=', '{', '}', 'for', 'item', 'in', 'items', ':', 'category', '=', 'lookup', '(', 'item', ',', "'itemCategory'", ',', "'categoryCode'", ')', 'if', 'sorted_items', '.', 'get', '(', 'category', ')', 'is', 'None', ':', 'sorted_items', '[', 'category', ']', '=', '[', ']', 'sorted_items', '[', 'category', ']', '.', 'append', '(', 'item', ')', 'return', 'sorted_items']
sorts the items into a dictionary of categories, with a list of items
['sorts', 'the', 'items', 'into', 'a', 'dictionary', 'of', 'categories', 'with', 'a', 'list', 'of', 'items']
train
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/order/item_list.py#L56-L66
5,771
delph-in/pydelphin
delphin/tdl.py
iterparse
def iterparse(source, encoding='utf-8'): """ Parse the TDL file *source* and iteratively yield parse events. If *source* is a filename, the file is opened and closed when the generator has finished, otherwise *source* is an open file object and will not be closed when the generator has finished. Parse events are `(event, object, lineno)` tuples, where `event` is a string (`"TypeDefinition"`, `"TypeAddendum"`, `"LexicalRuleDefinition"`, `"LetterSet"`, `"WildCard"`, `"LineComment"`, or `"BlockComment"`), `object` is the interpreted TDL object, and `lineno` is the line number where the entity began in *source*. Args: source (str, file): a filename or open file object encoding (str): the encoding of the file (default: `"utf-8"`; ignored if *source* is an open file) Yields: `(event, object, lineno)` tuples Example: >>> lex = {} >>> for event, obj, lineno in tdl.iterparse('erg/lexicon.tdl'): ... if event == 'TypeDefinition': ... lex[obj.identifier] = obj ... >>> lex['eucalyptus_n1']['SYNSEM.LKEYS.KEYREL.PRED'] <String object (_eucalyptus_n_1_rel) at 140625748595960> """ if hasattr(source, 'read'): for event in _parse2(source): yield event else: with io.open(source, encoding=encoding) as fh: for event in _parse2(fh): yield event
python
def iterparse(source, encoding='utf-8'): """ Parse the TDL file *source* and iteratively yield parse events. If *source* is a filename, the file is opened and closed when the generator has finished, otherwise *source* is an open file object and will not be closed when the generator has finished. Parse events are `(event, object, lineno)` tuples, where `event` is a string (`"TypeDefinition"`, `"TypeAddendum"`, `"LexicalRuleDefinition"`, `"LetterSet"`, `"WildCard"`, `"LineComment"`, or `"BlockComment"`), `object` is the interpreted TDL object, and `lineno` is the line number where the entity began in *source*. Args: source (str, file): a filename or open file object encoding (str): the encoding of the file (default: `"utf-8"`; ignored if *source* is an open file) Yields: `(event, object, lineno)` tuples Example: >>> lex = {} >>> for event, obj, lineno in tdl.iterparse('erg/lexicon.tdl'): ... if event == 'TypeDefinition': ... lex[obj.identifier] = obj ... >>> lex['eucalyptus_n1']['SYNSEM.LKEYS.KEYREL.PRED'] <String object (_eucalyptus_n_1_rel) at 140625748595960> """ if hasattr(source, 'read'): for event in _parse2(source): yield event else: with io.open(source, encoding=encoding) as fh: for event in _parse2(fh): yield event
['def', 'iterparse', '(', 'source', ',', 'encoding', '=', "'utf-8'", ')', ':', 'if', 'hasattr', '(', 'source', ',', "'read'", ')', ':', 'for', 'event', 'in', '_parse2', '(', 'source', ')', ':', 'yield', 'event', 'else', ':', 'with', 'io', '.', 'open', '(', 'source', ',', 'encoding', '=', 'encoding', ')', 'as', 'fh', ':', 'for', 'event', 'in', '_parse2', '(', 'fh', ')', ':', 'yield', 'event']
Parse the TDL file *source* and iteratively yield parse events. If *source* is a filename, the file is opened and closed when the generator has finished, otherwise *source* is an open file object and will not be closed when the generator has finished. Parse events are `(event, object, lineno)` tuples, where `event` is a string (`"TypeDefinition"`, `"TypeAddendum"`, `"LexicalRuleDefinition"`, `"LetterSet"`, `"WildCard"`, `"LineComment"`, or `"BlockComment"`), `object` is the interpreted TDL object, and `lineno` is the line number where the entity began in *source*. Args: source (str, file): a filename or open file object encoding (str): the encoding of the file (default: `"utf-8"`; ignored if *source* is an open file) Yields: `(event, object, lineno)` tuples Example: >>> lex = {} >>> for event, obj, lineno in tdl.iterparse('erg/lexicon.tdl'): ... if event == 'TypeDefinition': ... lex[obj.identifier] = obj ... >>> lex['eucalyptus_n1']['SYNSEM.LKEYS.KEYREL.PRED'] <String object (_eucalyptus_n_1_rel) at 140625748595960>
['Parse', 'the', 'TDL', 'file', '*', 'source', '*', 'and', 'iteratively', 'yield', 'parse', 'events', '.']
train
https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/tdl.py#L1180-L1216
5,772
pantsbuild/pants
src/python/pants/backend/jvm/tasks/jvm_compile/jvm_compile.py
JvmCompile._compile_vts
def _compile_vts(self, vts, ctx, upstream_analysis, dependency_classpath, progress_message, settings, compiler_option_sets, zinc_file_manager, counter): """Compiles sources for the given vts into the given output dir. :param vts: VersionedTargetSet with one entry for the target. :param ctx: - A CompileContext instance for the target. :param dependency_classpath: A list of classpath entries of type ClasspathEntry for dependencies May be invoked concurrently on independent target sets. Postcondition: The individual targets in vts are up-to-date, as if each were compiled individually. """ if not ctx.sources: self.context.log.warn('Skipping {} compile for targets with no sources:\n {}' .format(self.name(), vts.targets)) else: counter_val = str(counter()).rjust(counter.format_length(), ' ') counter_str = '[{}/{}] '.format(counter_val, counter.size) # Do some reporting. self.context.log.info( counter_str, 'Compiling ', items_to_report_element(ctx.sources, '{} source'.format(self.name())), ' in ', items_to_report_element([t.address.reference() for t in vts.targets], 'target'), ' (', progress_message, ').') with self.context.new_workunit('compile', labels=[WorkUnitLabel.COMPILER]) as compile_workunit: try: directory_digest = self.compile( ctx, self._args, dependency_classpath, upstream_analysis, settings, compiler_option_sets, zinc_file_manager, self._get_plugin_map('javac', Java.global_instance(), ctx.target), self._get_plugin_map('scalac', ScalaPlatform.global_instance(), ctx.target), ) self._capture_logs(compile_workunit, ctx.log_dir) return directory_digest except TaskError: if self.get_options().suggest_missing_deps: logs = [path for _, name, _, path in self._find_logs(compile_workunit) if name == self.name()] if logs: self._find_missing_deps(logs, ctx.target) raise
python
def _compile_vts(self, vts, ctx, upstream_analysis, dependency_classpath, progress_message, settings, compiler_option_sets, zinc_file_manager, counter): """Compiles sources for the given vts into the given output dir. :param vts: VersionedTargetSet with one entry for the target. :param ctx: - A CompileContext instance for the target. :param dependency_classpath: A list of classpath entries of type ClasspathEntry for dependencies May be invoked concurrently on independent target sets. Postcondition: The individual targets in vts are up-to-date, as if each were compiled individually. """ if not ctx.sources: self.context.log.warn('Skipping {} compile for targets with no sources:\n {}' .format(self.name(), vts.targets)) else: counter_val = str(counter()).rjust(counter.format_length(), ' ') counter_str = '[{}/{}] '.format(counter_val, counter.size) # Do some reporting. self.context.log.info( counter_str, 'Compiling ', items_to_report_element(ctx.sources, '{} source'.format(self.name())), ' in ', items_to_report_element([t.address.reference() for t in vts.targets], 'target'), ' (', progress_message, ').') with self.context.new_workunit('compile', labels=[WorkUnitLabel.COMPILER]) as compile_workunit: try: directory_digest = self.compile( ctx, self._args, dependency_classpath, upstream_analysis, settings, compiler_option_sets, zinc_file_manager, self._get_plugin_map('javac', Java.global_instance(), ctx.target), self._get_plugin_map('scalac', ScalaPlatform.global_instance(), ctx.target), ) self._capture_logs(compile_workunit, ctx.log_dir) return directory_digest except TaskError: if self.get_options().suggest_missing_deps: logs = [path for _, name, _, path in self._find_logs(compile_workunit) if name == self.name()] if logs: self._find_missing_deps(logs, ctx.target) raise
['def', '_compile_vts', '(', 'self', ',', 'vts', ',', 'ctx', ',', 'upstream_analysis', ',', 'dependency_classpath', ',', 'progress_message', ',', 'settings', ',', 'compiler_option_sets', ',', 'zinc_file_manager', ',', 'counter', ')', ':', 'if', 'not', 'ctx', '.', 'sources', ':', 'self', '.', 'context', '.', 'log', '.', 'warn', '(', "'Skipping {} compile for targets with no sources:\\n {}'", '.', 'format', '(', 'self', '.', 'name', '(', ')', ',', 'vts', '.', 'targets', ')', ')', 'else', ':', 'counter_val', '=', 'str', '(', 'counter', '(', ')', ')', '.', 'rjust', '(', 'counter', '.', 'format_length', '(', ')', ',', "' '", ')', 'counter_str', '=', "'[{}/{}] '", '.', 'format', '(', 'counter_val', ',', 'counter', '.', 'size', ')', '# Do some reporting.', 'self', '.', 'context', '.', 'log', '.', 'info', '(', 'counter_str', ',', "'Compiling '", ',', 'items_to_report_element', '(', 'ctx', '.', 'sources', ',', "'{} source'", '.', 'format', '(', 'self', '.', 'name', '(', ')', ')', ')', ',', "' in '", ',', 'items_to_report_element', '(', '[', 't', '.', 'address', '.', 'reference', '(', ')', 'for', 't', 'in', 'vts', '.', 'targets', ']', ',', "'target'", ')', ',', "' ('", ',', 'progress_message', ',', "').'", ')', 'with', 'self', '.', 'context', '.', 'new_workunit', '(', "'compile'", ',', 'labels', '=', '[', 'WorkUnitLabel', '.', 'COMPILER', ']', ')', 'as', 'compile_workunit', ':', 'try', ':', 'directory_digest', '=', 'self', '.', 'compile', '(', 'ctx', ',', 'self', '.', '_args', ',', 'dependency_classpath', ',', 'upstream_analysis', ',', 'settings', ',', 'compiler_option_sets', ',', 'zinc_file_manager', ',', 'self', '.', '_get_plugin_map', '(', "'javac'", ',', 'Java', '.', 'global_instance', '(', ')', ',', 'ctx', '.', 'target', ')', ',', 'self', '.', '_get_plugin_map', '(', "'scalac'", ',', 'ScalaPlatform', '.', 'global_instance', '(', ')', ',', 'ctx', '.', 'target', ')', ',', ')', 'self', '.', '_capture_logs', '(', 'compile_workunit', ',', 'ctx', '.', 'log_dir', ')', 'return', 'directory_digest', 'except', 'TaskError', ':', 'if', 'self', '.', 'get_options', '(', ')', '.', 'suggest_missing_deps', ':', 'logs', '=', '[', 'path', 'for', '_', ',', 'name', ',', '_', ',', 'path', 'in', 'self', '.', '_find_logs', '(', 'compile_workunit', ')', 'if', 'name', '==', 'self', '.', 'name', '(', ')', ']', 'if', 'logs', ':', 'self', '.', '_find_missing_deps', '(', 'logs', ',', 'ctx', '.', 'target', ')', 'raise']
Compiles sources for the given vts into the given output dir. :param vts: VersionedTargetSet with one entry for the target. :param ctx: - A CompileContext instance for the target. :param dependency_classpath: A list of classpath entries of type ClasspathEntry for dependencies May be invoked concurrently on independent target sets. Postcondition: The individual targets in vts are up-to-date, as if each were compiled individually.
['Compiles', 'sources', 'for', 'the', 'given', 'vts', 'into', 'the', 'given', 'output', 'dir', '.']
train
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/jvm/tasks/jvm_compile/jvm_compile.py#L472-L523
5,773
readbeyond/aeneas
aeneas/syncmap/smfgxml.py
SyncMapFormatGenericXML._get_lines_from_node_text
def _get_lines_from_node_text(cls, node): """ Given an ``lxml`` node, get lines from ``node.text``, where the line separator is ``<br xmlns=... />``. """ # TODO more robust parsing from lxml import etree parts = ([node.text] + list(chain(*([etree.tostring(c, with_tail=False), c.tail] for c in node.getchildren()))) + [node.tail]) parts = [gf.safe_unicode(p) for p in parts] parts = [p.strip() for p in parts if not p.startswith(u"<br ")] parts = [p for p in parts if len(p) > 0] uparts = [] for part in parts: uparts.append(gf.safe_unicode(part)) return uparts
python
def _get_lines_from_node_text(cls, node): """ Given an ``lxml`` node, get lines from ``node.text``, where the line separator is ``<br xmlns=... />``. """ # TODO more robust parsing from lxml import etree parts = ([node.text] + list(chain(*([etree.tostring(c, with_tail=False), c.tail] for c in node.getchildren()))) + [node.tail]) parts = [gf.safe_unicode(p) for p in parts] parts = [p.strip() for p in parts if not p.startswith(u"<br ")] parts = [p for p in parts if len(p) > 0] uparts = [] for part in parts: uparts.append(gf.safe_unicode(part)) return uparts
['def', '_get_lines_from_node_text', '(', 'cls', ',', 'node', ')', ':', '# TODO more robust parsing', 'from', 'lxml', 'import', 'etree', 'parts', '=', '(', '[', 'node', '.', 'text', ']', '+', 'list', '(', 'chain', '(', '*', '(', '[', 'etree', '.', 'tostring', '(', 'c', ',', 'with_tail', '=', 'False', ')', ',', 'c', '.', 'tail', ']', 'for', 'c', 'in', 'node', '.', 'getchildren', '(', ')', ')', ')', ')', '+', '[', 'node', '.', 'tail', ']', ')', 'parts', '=', '[', 'gf', '.', 'safe_unicode', '(', 'p', ')', 'for', 'p', 'in', 'parts', ']', 'parts', '=', '[', 'p', '.', 'strip', '(', ')', 'for', 'p', 'in', 'parts', 'if', 'not', 'p', '.', 'startswith', '(', 'u"<br "', ')', ']', 'parts', '=', '[', 'p', 'for', 'p', 'in', 'parts', 'if', 'len', '(', 'p', ')', '>', '0', ']', 'uparts', '=', '[', ']', 'for', 'part', 'in', 'parts', ':', 'uparts', '.', 'append', '(', 'gf', '.', 'safe_unicode', '(', 'part', ')', ')', 'return', 'uparts']
Given an ``lxml`` node, get lines from ``node.text``, where the line separator is ``<br xmlns=... />``.
['Given', 'an', 'lxml', 'node', 'get', 'lines', 'from', 'node', '.', 'text', 'where', 'the', 'line', 'separator', 'is', '<br', 'xmlns', '=', '...', '/', '>', '.']
train
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/syncmap/smfgxml.py#L46-L60
5,774
pahaz/sshtunnel
sshtunnel.py
SSHTunnelForwarder._make_ssh_forward_server
def _make_ssh_forward_server(self, remote_address, local_bind_address): """ Make SSH forward proxy Server class """ _Handler = self._make_ssh_forward_handler_class(remote_address) try: if isinstance(local_bind_address, string_types): forward_maker_class = self._make_unix_ssh_forward_server_class else: forward_maker_class = self._make_ssh_forward_server_class _Server = forward_maker_class(remote_address) ssh_forward_server = _Server( local_bind_address, _Handler, logger=self.logger, ) if ssh_forward_server: ssh_forward_server.daemon_threads = self.daemon_forward_servers self._server_list.append(ssh_forward_server) self.tunnel_is_up[ssh_forward_server.server_address] = False else: self._raise( BaseSSHTunnelForwarderError, 'Problem setting up ssh {0} <> {1} forwarder. You can ' 'suppress this exception by using the `mute_exceptions`' 'argument'.format(address_to_str(local_bind_address), address_to_str(remote_address)) ) except IOError: self._raise( BaseSSHTunnelForwarderError, "Couldn't open tunnel {0} <> {1} might be in use or " "destination not reachable".format( address_to_str(local_bind_address), address_to_str(remote_address) ) )
python
def _make_ssh_forward_server(self, remote_address, local_bind_address): """ Make SSH forward proxy Server class """ _Handler = self._make_ssh_forward_handler_class(remote_address) try: if isinstance(local_bind_address, string_types): forward_maker_class = self._make_unix_ssh_forward_server_class else: forward_maker_class = self._make_ssh_forward_server_class _Server = forward_maker_class(remote_address) ssh_forward_server = _Server( local_bind_address, _Handler, logger=self.logger, ) if ssh_forward_server: ssh_forward_server.daemon_threads = self.daemon_forward_servers self._server_list.append(ssh_forward_server) self.tunnel_is_up[ssh_forward_server.server_address] = False else: self._raise( BaseSSHTunnelForwarderError, 'Problem setting up ssh {0} <> {1} forwarder. You can ' 'suppress this exception by using the `mute_exceptions`' 'argument'.format(address_to_str(local_bind_address), address_to_str(remote_address)) ) except IOError: self._raise( BaseSSHTunnelForwarderError, "Couldn't open tunnel {0} <> {1} might be in use or " "destination not reachable".format( address_to_str(local_bind_address), address_to_str(remote_address) ) )
['def', '_make_ssh_forward_server', '(', 'self', ',', 'remote_address', ',', 'local_bind_address', ')', ':', '_Handler', '=', 'self', '.', '_make_ssh_forward_handler_class', '(', 'remote_address', ')', 'try', ':', 'if', 'isinstance', '(', 'local_bind_address', ',', 'string_types', ')', ':', 'forward_maker_class', '=', 'self', '.', '_make_unix_ssh_forward_server_class', 'else', ':', 'forward_maker_class', '=', 'self', '.', '_make_ssh_forward_server_class', '_Server', '=', 'forward_maker_class', '(', 'remote_address', ')', 'ssh_forward_server', '=', '_Server', '(', 'local_bind_address', ',', '_Handler', ',', 'logger', '=', 'self', '.', 'logger', ',', ')', 'if', 'ssh_forward_server', ':', 'ssh_forward_server', '.', 'daemon_threads', '=', 'self', '.', 'daemon_forward_servers', 'self', '.', '_server_list', '.', 'append', '(', 'ssh_forward_server', ')', 'self', '.', 'tunnel_is_up', '[', 'ssh_forward_server', '.', 'server_address', ']', '=', 'False', 'else', ':', 'self', '.', '_raise', '(', 'BaseSSHTunnelForwarderError', ',', "'Problem setting up ssh {0} <> {1} forwarder. You can '", "'suppress this exception by using the `mute_exceptions`'", "'argument'", '.', 'format', '(', 'address_to_str', '(', 'local_bind_address', ')', ',', 'address_to_str', '(', 'remote_address', ')', ')', ')', 'except', 'IOError', ':', 'self', '.', '_raise', '(', 'BaseSSHTunnelForwarderError', ',', '"Couldn\'t open tunnel {0} <> {1} might be in use or "', '"destination not reachable"', '.', 'format', '(', 'address_to_str', '(', 'local_bind_address', ')', ',', 'address_to_str', '(', 'remote_address', ')', ')', ')']
Make SSH forward proxy Server class
['Make', 'SSH', 'forward', 'proxy', 'Server', 'class']
train
https://github.com/pahaz/sshtunnel/blob/66a923e4c6c8e41b8348420523fbf5ddfd53176c/sshtunnel.py#L773-L810
5,775
rstoneback/pysat
pysat/_orbits.py
Orbits.load
def load(self, orbit=None): """Load a particular orbit into .data for loaded day. Parameters ---------- orbit : int orbit number, 1 indexed Note ---- A day of data must be loaded before this routine functions properly. If the last orbit of the day is requested, it will automatically be padded with data from the next day. The orbit counter will be reset to 1. """ if not self.sat.empty: # ensure data exists # set up orbit metadata self._calcOrbits() # ensure user supplied an orbit if orbit is not None: # pull out requested orbit if orbit < 0: # negative indexing consistent with numpy, -1 last, # -2 second to last, etc. orbit = self.num + 1 + orbit if orbit == 1: # change from orig copied from _core, didn't look correct. # self._getBasicOrbit(orbit=2) try: true_date = self.sat.date # .copy() self.sat.prev() # if and else added becuase of CINDI turn off # 6/5/2013, turn on 10/22/2014 # crashed when starting on 10/22/2014 # prev returned empty data if not self.sat.empty: self.load(orbit=-1) else: self.sat.next() self._getBasicOrbit(orbit=1) # check that this orbit should end on the current day delta = true_date - self.sat.data.index[0] # print 'checking if first orbit should land on requested day' # print self.sat.date, self.sat.data.index[0], delta, delta >= self.orbit_period if delta >= self.orbit_period: # the orbit loaded isn't close enough to date # to be the first orbit of the day, move forward self.next() except StopIteration: # print 'going for basic orbit' self._getBasicOrbit(orbit=1) # includes hack to appear to be zero indexed print('Loaded Orbit:%i' % (self._current - 1)) # check if the first orbit is also the last orbit elif orbit == self.num: # we get here if user asks for last orbit # make sure that orbit data goes across daybreak as needed # load previous orbit if self.num != 1: self._getBasicOrbit(self.num - 1) self.next() else: self._getBasicOrbit(orbit=-1) elif orbit < self.num: # load orbit data into data self._getBasicOrbit(orbit) # includes hack to appear to be zero indexed print('Loaded Orbit:%i' % (self._current - 1)) else: # gone too far self.sat.data = DataFrame() raise Exception('Requested an orbit past total orbits for day') else: raise Exception('Must set an orbit') else: print('No data loaded in instrument object to determine orbits.')
python
def load(self, orbit=None): """Load a particular orbit into .data for loaded day. Parameters ---------- orbit : int orbit number, 1 indexed Note ---- A day of data must be loaded before this routine functions properly. If the last orbit of the day is requested, it will automatically be padded with data from the next day. The orbit counter will be reset to 1. """ if not self.sat.empty: # ensure data exists # set up orbit metadata self._calcOrbits() # ensure user supplied an orbit if orbit is not None: # pull out requested orbit if orbit < 0: # negative indexing consistent with numpy, -1 last, # -2 second to last, etc. orbit = self.num + 1 + orbit if orbit == 1: # change from orig copied from _core, didn't look correct. # self._getBasicOrbit(orbit=2) try: true_date = self.sat.date # .copy() self.sat.prev() # if and else added becuase of CINDI turn off # 6/5/2013, turn on 10/22/2014 # crashed when starting on 10/22/2014 # prev returned empty data if not self.sat.empty: self.load(orbit=-1) else: self.sat.next() self._getBasicOrbit(orbit=1) # check that this orbit should end on the current day delta = true_date - self.sat.data.index[0] # print 'checking if first orbit should land on requested day' # print self.sat.date, self.sat.data.index[0], delta, delta >= self.orbit_period if delta >= self.orbit_period: # the orbit loaded isn't close enough to date # to be the first orbit of the day, move forward self.next() except StopIteration: # print 'going for basic orbit' self._getBasicOrbit(orbit=1) # includes hack to appear to be zero indexed print('Loaded Orbit:%i' % (self._current - 1)) # check if the first orbit is also the last orbit elif orbit == self.num: # we get here if user asks for last orbit # make sure that orbit data goes across daybreak as needed # load previous orbit if self.num != 1: self._getBasicOrbit(self.num - 1) self.next() else: self._getBasicOrbit(orbit=-1) elif orbit < self.num: # load orbit data into data self._getBasicOrbit(orbit) # includes hack to appear to be zero indexed print('Loaded Orbit:%i' % (self._current - 1)) else: # gone too far self.sat.data = DataFrame() raise Exception('Requested an orbit past total orbits for day') else: raise Exception('Must set an orbit') else: print('No data loaded in instrument object to determine orbits.')
['def', 'load', '(', 'self', ',', 'orbit', '=', 'None', ')', ':', 'if', 'not', 'self', '.', 'sat', '.', 'empty', ':', '# ensure data exists', '# set up orbit metadata', 'self', '.', '_calcOrbits', '(', ')', '# ensure user supplied an orbit', 'if', 'orbit', 'is', 'not', 'None', ':', '# pull out requested orbit', 'if', 'orbit', '<', '0', ':', '# negative indexing consistent with numpy, -1 last,', '# -2 second to last, etc.', 'orbit', '=', 'self', '.', 'num', '+', '1', '+', 'orbit', 'if', 'orbit', '==', '1', ':', "# change from orig copied from _core, didn't look correct.", '# self._getBasicOrbit(orbit=2)', 'try', ':', 'true_date', '=', 'self', '.', 'sat', '.', 'date', '# .copy()', 'self', '.', 'sat', '.', 'prev', '(', ')', '# if and else added becuase of CINDI turn off ', '# 6/5/2013, turn on 10/22/2014', '# crashed when starting on 10/22/2014', '# prev returned empty data', 'if', 'not', 'self', '.', 'sat', '.', 'empty', ':', 'self', '.', 'load', '(', 'orbit', '=', '-', '1', ')', 'else', ':', 'self', '.', 'sat', '.', 'next', '(', ')', 'self', '.', '_getBasicOrbit', '(', 'orbit', '=', '1', ')', '# check that this orbit should end on the current day', 'delta', '=', 'true_date', '-', 'self', '.', 'sat', '.', 'data', '.', 'index', '[', '0', ']', "# print 'checking if first orbit should land on requested day'", '# print self.sat.date, self.sat.data.index[0], delta, delta >= self.orbit_period', 'if', 'delta', '>=', 'self', '.', 'orbit_period', ':', "# the orbit loaded isn't close enough to date", '# to be the first orbit of the day, move forward', 'self', '.', 'next', '(', ')', 'except', 'StopIteration', ':', "# print 'going for basic orbit'", 'self', '.', '_getBasicOrbit', '(', 'orbit', '=', '1', ')', '# includes hack to appear to be zero indexed', 'print', '(', "'Loaded Orbit:%i'", '%', '(', 'self', '.', '_current', '-', '1', ')', ')', '# check if the first orbit is also the last orbit', 'elif', 'orbit', '==', 'self', '.', 'num', ':', '# we get here if user asks for last orbit', '# make sure that orbit data goes across daybreak as needed', '# load previous orbit', 'if', 'self', '.', 'num', '!=', '1', ':', 'self', '.', '_getBasicOrbit', '(', 'self', '.', 'num', '-', '1', ')', 'self', '.', 'next', '(', ')', 'else', ':', 'self', '.', '_getBasicOrbit', '(', 'orbit', '=', '-', '1', ')', 'elif', 'orbit', '<', 'self', '.', 'num', ':', '# load orbit data into data', 'self', '.', '_getBasicOrbit', '(', 'orbit', ')', '# includes hack to appear to be zero indexed', 'print', '(', "'Loaded Orbit:%i'", '%', '(', 'self', '.', '_current', '-', '1', ')', ')', 'else', ':', '# gone too far', 'self', '.', 'sat', '.', 'data', '=', 'DataFrame', '(', ')', 'raise', 'Exception', '(', "'Requested an orbit past total orbits for day'", ')', 'else', ':', 'raise', 'Exception', '(', "'Must set an orbit'", ')', 'else', ':', 'print', '(', "'No data loaded in instrument object to determine orbits.'", ')']
Load a particular orbit into .data for loaded day. Parameters ---------- orbit : int orbit number, 1 indexed Note ---- A day of data must be loaded before this routine functions properly. If the last orbit of the day is requested, it will automatically be padded with data from the next day. The orbit counter will be reset to 1.
['Load', 'a', 'particular', 'orbit', 'into', '.', 'data', 'for', 'loaded', 'day', '.']
train
https://github.com/rstoneback/pysat/blob/4ae1afd80e15e4449397d39dce8c3e969c32c422/pysat/_orbits.py#L436-L516
5,776
ianmiell/shutit
shutit_class.py
LayerConfigParser.get_config_set
def get_config_set(self, section, option): """Returns a set with each value per config file in it. """ values = set() for cp, filename, fp in self.layers: filename = filename # pylint fp = fp # pylint if cp.has_option(section, option): values.add(cp.get(section, option)) return values
python
def get_config_set(self, section, option): """Returns a set with each value per config file in it. """ values = set() for cp, filename, fp in self.layers: filename = filename # pylint fp = fp # pylint if cp.has_option(section, option): values.add(cp.get(section, option)) return values
['def', 'get_config_set', '(', 'self', ',', 'section', ',', 'option', ')', ':', 'values', '=', 'set', '(', ')', 'for', 'cp', ',', 'filename', ',', 'fp', 'in', 'self', '.', 'layers', ':', 'filename', '=', 'filename', '# pylint', 'fp', '=', 'fp', '# pylint', 'if', 'cp', '.', 'has_option', '(', 'section', ',', 'option', ')', ':', 'values', '.', 'add', '(', 'cp', '.', 'get', '(', 'section', ',', 'option', ')', ')', 'return', 'values']
Returns a set with each value per config file in it.
['Returns', 'a', 'set', 'with', 'each', 'value', 'per', 'config', 'file', 'in', 'it', '.']
train
https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_class.py#L117-L126
5,777
dw/mitogen
ansible_mitogen/runner.py
Runner._setup_environ
def _setup_environ(self): """ Apply changes from /etc/environment files before creating a TemporaryEnvironment to snapshot environment state prior to module run. """ _pam_env_watcher.check() _etc_env_watcher.check() env = dict(self.extra_env or {}) if self.env: env.update(self.env) self._env = TemporaryEnvironment(env)
python
def _setup_environ(self): """ Apply changes from /etc/environment files before creating a TemporaryEnvironment to snapshot environment state prior to module run. """ _pam_env_watcher.check() _etc_env_watcher.check() env = dict(self.extra_env or {}) if self.env: env.update(self.env) self._env = TemporaryEnvironment(env)
['def', '_setup_environ', '(', 'self', ')', ':', '_pam_env_watcher', '.', 'check', '(', ')', '_etc_env_watcher', '.', 'check', '(', ')', 'env', '=', 'dict', '(', 'self', '.', 'extra_env', 'or', '{', '}', ')', 'if', 'self', '.', 'env', ':', 'env', '.', 'update', '(', 'self', '.', 'env', ')', 'self', '.', '_env', '=', 'TemporaryEnvironment', '(', 'env', ')']
Apply changes from /etc/environment files before creating a TemporaryEnvironment to snapshot environment state prior to module run.
['Apply', 'changes', 'from', '/', 'etc', '/', 'environment', 'files', 'before', 'creating', 'a', 'TemporaryEnvironment', 'to', 'snapshot', 'environment', 'state', 'prior', 'to', 'module', 'run', '.']
train
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/ansible_mitogen/runner.py#L335-L345
5,778
inveniosoftware/invenio-collections
invenio_collections/cli.py
delete
def delete(name, dry_run, verbose): """Delete a collection.""" collection = Collection.query.filter_by(name=name).one() if verbose: tr = LeftAligned(traverse=AttributeTraversal()) click.secho(tr(collection), fg='red') db.session.delete(collection)
python
def delete(name, dry_run, verbose): """Delete a collection.""" collection = Collection.query.filter_by(name=name).one() if verbose: tr = LeftAligned(traverse=AttributeTraversal()) click.secho(tr(collection), fg='red') db.session.delete(collection)
['def', 'delete', '(', 'name', ',', 'dry_run', ',', 'verbose', ')', ':', 'collection', '=', 'Collection', '.', 'query', '.', 'filter_by', '(', 'name', '=', 'name', ')', '.', 'one', '(', ')', 'if', 'verbose', ':', 'tr', '=', 'LeftAligned', '(', 'traverse', '=', 'AttributeTraversal', '(', ')', ')', 'click', '.', 'secho', '(', 'tr', '(', 'collection', ')', ',', 'fg', '=', "'red'", ')', 'db', '.', 'session', '.', 'delete', '(', 'collection', ')']
Delete a collection.
['Delete', 'a', 'collection', '.']
train
https://github.com/inveniosoftware/invenio-collections/blob/f3adca45c6d00a4dbf1f48fd501e8a68fe347f2f/invenio_collections/cli.py#L158-L164
5,779
MisterY/gnucash-portfolio
gnucash_portfolio/bookaggregate.py
BookAggregate.transactions
def transactions(self) -> TransactionsAggregate: """ Transactions aggregate """ if not self.__transactions_aggregate: self.__transactions_aggregate = TransactionsAggregate(self.book) return self.__transactions_aggregate
python
def transactions(self) -> TransactionsAggregate: """ Transactions aggregate """ if not self.__transactions_aggregate: self.__transactions_aggregate = TransactionsAggregate(self.book) return self.__transactions_aggregate
['def', 'transactions', '(', 'self', ')', '->', 'TransactionsAggregate', ':', 'if', 'not', 'self', '.', '__transactions_aggregate', ':', 'self', '.', '__transactions_aggregate', '=', 'TransactionsAggregate', '(', 'self', '.', 'book', ')', 'return', 'self', '.', '__transactions_aggregate']
Transactions aggregate
['Transactions', 'aggregate']
train
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/bookaggregate.py#L125-L129
5,780
gccxml/pygccxml
pygccxml/declarations/type_traits.py
is_std_string
def is_std_string(type_): """ Returns True, if type represents C++ `std::string`, False otherwise. """ if utils.is_str(type_): return type_ in string_equivalences type_ = remove_alias(type_) type_ = remove_reference(type_) type_ = remove_cv(type_) return type_.decl_string in string_equivalences
python
def is_std_string(type_): """ Returns True, if type represents C++ `std::string`, False otherwise. """ if utils.is_str(type_): return type_ in string_equivalences type_ = remove_alias(type_) type_ = remove_reference(type_) type_ = remove_cv(type_) return type_.decl_string in string_equivalences
['def', 'is_std_string', '(', 'type_', ')', ':', 'if', 'utils', '.', 'is_str', '(', 'type_', ')', ':', 'return', 'type_', 'in', 'string_equivalences', 'type_', '=', 'remove_alias', '(', 'type_', ')', 'type_', '=', 'remove_reference', '(', 'type_', ')', 'type_', '=', 'remove_cv', '(', 'type_', ')', 'return', 'type_', '.', 'decl_string', 'in', 'string_equivalences']
Returns True, if type represents C++ `std::string`, False otherwise.
['Returns', 'True', 'if', 'type', 'represents', 'C', '++', 'std', '::', 'string', 'False', 'otherwise', '.']
train
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/declarations/type_traits.py#L512-L524
5,781
diux-dev/ncluster
ncluster/aws_util.py
extract_attr_for_match
def extract_attr_for_match(items, **kwargs): """Helper method to get attribute value for an item matching some criterion. Specify target criteria value as dict, with target attribute having value -1 Example: to extract state of vpc matching given vpc id response = [{'State': 'available', 'VpcId': 'vpc-2bb1584c'}] extract_attr_for_match(response, State=-1, VpcId='vpc-2bb1584c') #=> 'available'""" # find the value of attribute to return query_arg = None for arg, value in kwargs.items(): if value == -1: assert query_arg is None, "Only single query arg (-1 valued) is allowed" query_arg = arg result = [] filterset = set(kwargs.keys()) for item in items: match = True assert filterset.issubset( item.keys()), "Filter set contained %s which was not in record %s" % ( filterset.difference(item.keys()), item) for arg in item: if arg == query_arg: continue if arg in kwargs: if item[arg] != kwargs[arg]: match = False break if match: result.append(item[query_arg]) assert len(result) <= 1, "%d values matched %s, only allow 1" % ( len(result), kwargs) if result: return result[0] return None
python
def extract_attr_for_match(items, **kwargs): """Helper method to get attribute value for an item matching some criterion. Specify target criteria value as dict, with target attribute having value -1 Example: to extract state of vpc matching given vpc id response = [{'State': 'available', 'VpcId': 'vpc-2bb1584c'}] extract_attr_for_match(response, State=-1, VpcId='vpc-2bb1584c') #=> 'available'""" # find the value of attribute to return query_arg = None for arg, value in kwargs.items(): if value == -1: assert query_arg is None, "Only single query arg (-1 valued) is allowed" query_arg = arg result = [] filterset = set(kwargs.keys()) for item in items: match = True assert filterset.issubset( item.keys()), "Filter set contained %s which was not in record %s" % ( filterset.difference(item.keys()), item) for arg in item: if arg == query_arg: continue if arg in kwargs: if item[arg] != kwargs[arg]: match = False break if match: result.append(item[query_arg]) assert len(result) <= 1, "%d values matched %s, only allow 1" % ( len(result), kwargs) if result: return result[0] return None
['def', 'extract_attr_for_match', '(', 'items', ',', '*', '*', 'kwargs', ')', ':', '# find the value of attribute to return', 'query_arg', '=', 'None', 'for', 'arg', ',', 'value', 'in', 'kwargs', '.', 'items', '(', ')', ':', 'if', 'value', '==', '-', '1', ':', 'assert', 'query_arg', 'is', 'None', ',', '"Only single query arg (-1 valued) is allowed"', 'query_arg', '=', 'arg', 'result', '=', '[', ']', 'filterset', '=', 'set', '(', 'kwargs', '.', 'keys', '(', ')', ')', 'for', 'item', 'in', 'items', ':', 'match', '=', 'True', 'assert', 'filterset', '.', 'issubset', '(', 'item', '.', 'keys', '(', ')', ')', ',', '"Filter set contained %s which was not in record %s"', '%', '(', 'filterset', '.', 'difference', '(', 'item', '.', 'keys', '(', ')', ')', ',', 'item', ')', 'for', 'arg', 'in', 'item', ':', 'if', 'arg', '==', 'query_arg', ':', 'continue', 'if', 'arg', 'in', 'kwargs', ':', 'if', 'item', '[', 'arg', ']', '!=', 'kwargs', '[', 'arg', ']', ':', 'match', '=', 'False', 'break', 'if', 'match', ':', 'result', '.', 'append', '(', 'item', '[', 'query_arg', ']', ')', 'assert', 'len', '(', 'result', ')', '<=', '1', ',', '"%d values matched %s, only allow 1"', '%', '(', 'len', '(', 'result', ')', ',', 'kwargs', ')', 'if', 'result', ':', 'return', 'result', '[', '0', ']', 'return', 'None']
Helper method to get attribute value for an item matching some criterion. Specify target criteria value as dict, with target attribute having value -1 Example: to extract state of vpc matching given vpc id response = [{'State': 'available', 'VpcId': 'vpc-2bb1584c'}] extract_attr_for_match(response, State=-1, VpcId='vpc-2bb1584c') #=> 'available
['Helper', 'method', 'to', 'get', 'attribute', 'value', 'for', 'an', 'item', 'matching', 'some', 'criterion', '.', 'Specify', 'target', 'criteria', 'value', 'as', 'dict', 'with', 'target', 'attribute', 'having', 'value', '-', '1']
train
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/aws_util.py#L510-L548
5,782
peshay/tpm
tpm.py
TpmApi.convert_user_to_ldap
def convert_user_to_ldap(self, ID, DN): """Convert a normal user to a LDAP user.""" # http://teampasswordmanager.com/docs/api-users/#convert_to_ldap data = {'login_dn': DN} log.info('Convert User %s to LDAP DN %s' % (ID, DN)) self.put('users/%s/convert_to_ldap.json' % ID, data)
python
def convert_user_to_ldap(self, ID, DN): """Convert a normal user to a LDAP user.""" # http://teampasswordmanager.com/docs/api-users/#convert_to_ldap data = {'login_dn': DN} log.info('Convert User %s to LDAP DN %s' % (ID, DN)) self.put('users/%s/convert_to_ldap.json' % ID, data)
['def', 'convert_user_to_ldap', '(', 'self', ',', 'ID', ',', 'DN', ')', ':', '# http://teampasswordmanager.com/docs/api-users/#convert_to_ldap', 'data', '=', '{', "'login_dn'", ':', 'DN', '}', 'log', '.', 'info', '(', "'Convert User %s to LDAP DN %s'", '%', '(', 'ID', ',', 'DN', ')', ')', 'self', '.', 'put', '(', "'users/%s/convert_to_ldap.json'", '%', 'ID', ',', 'data', ')']
Convert a normal user to a LDAP user.
['Convert', 'a', 'normal', 'user', 'to', 'a', 'LDAP', 'user', '.']
train
https://github.com/peshay/tpm/blob/8e64a4d8b89d54bdd2c92d965463a7508aa3d0bc/tpm.py#L528-L533
5,783
ArangoDB-Community/pyArango
pyArango/document.py
DocumentStore.getStore
def getStore(self) : """get the inner store as dictionary""" res = {} res.update(self.store) for k, v in self.subStores.items() : res[k] = v.getStore() return res
python
def getStore(self) : """get the inner store as dictionary""" res = {} res.update(self.store) for k, v in self.subStores.items() : res[k] = v.getStore() return res
['def', 'getStore', '(', 'self', ')', ':', 'res', '=', '{', '}', 'res', '.', 'update', '(', 'self', '.', 'store', ')', 'for', 'k', ',', 'v', 'in', 'self', '.', 'subStores', '.', 'items', '(', ')', ':', 'res', '[', 'k', ']', '=', 'v', '.', 'getStore', '(', ')', 'return', 'res']
get the inner store as dictionary
['get', 'the', 'inner', 'store', 'as', 'dictionary']
train
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/document.py#L50-L57
5,784
swift-nav/libsbp
python/sbp/client/drivers/pyserial_driver.py
PySerialDriver.write
def write(self, s): """ Write wrapper. Parameters ---------- s : bytes Bytes to write """ try: return self.handle.write(s) except (OSError, serial.SerialException, serial.writeTimeoutError) as e: if e == serial.writeTimeoutError: print("sbp pyserial_driver: writeTimeoutError") return 0 else: print() print("Piksi disconnected") print() self.handle.close() raise IOError
python
def write(self, s): """ Write wrapper. Parameters ---------- s : bytes Bytes to write """ try: return self.handle.write(s) except (OSError, serial.SerialException, serial.writeTimeoutError) as e: if e == serial.writeTimeoutError: print("sbp pyserial_driver: writeTimeoutError") return 0 else: print() print("Piksi disconnected") print() self.handle.close() raise IOError
['def', 'write', '(', 'self', ',', 's', ')', ':', 'try', ':', 'return', 'self', '.', 'handle', '.', 'write', '(', 's', ')', 'except', '(', 'OSError', ',', 'serial', '.', 'SerialException', ',', 'serial', '.', 'writeTimeoutError', ')', 'as', 'e', ':', 'if', 'e', '==', 'serial', '.', 'writeTimeoutError', ':', 'print', '(', '"sbp pyserial_driver: writeTimeoutError"', ')', 'return', '0', 'else', ':', 'print', '(', ')', 'print', '(', '"Piksi disconnected"', ')', 'print', '(', ')', 'self', '.', 'handle', '.', 'close', '(', ')', 'raise', 'IOError']
Write wrapper. Parameters ---------- s : bytes Bytes to write
['Write', 'wrapper', '.']
train
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/client/drivers/pyserial_driver.py#L87-L108
5,785
ph4r05/monero-serialize
monero_serialize/xmrrpc.py
Modeler.dump_tuple
async def dump_tuple(self, elem, elem_type, params=None, obj=None): """ Dumps tuple of elements to the writer. :param elem: :param elem_type: :param params: :param obj: :return: """ if len(elem) != len(elem_type.f_specs()): raise ValueError('Fixed size tuple has not defined size: %s' % len(elem_type.f_specs())) elem_fields = params[0] if params else None if elem_fields is None: elem_fields = elem_type.f_specs() obj = [] if obj is None else x.get_elem(obj) for idx, elem in enumerate(elem): try: self.tracker.push_index(idx) fvalue = await self._dump_field(elem, elem_fields[idx], params[1:] if params else None) obj.append(fvalue) self.tracker.pop() except Exception as e: raise helpers.ArchiveException(e, tracker=self.tracker) from e return obj
python
async def dump_tuple(self, elem, elem_type, params=None, obj=None): """ Dumps tuple of elements to the writer. :param elem: :param elem_type: :param params: :param obj: :return: """ if len(elem) != len(elem_type.f_specs()): raise ValueError('Fixed size tuple has not defined size: %s' % len(elem_type.f_specs())) elem_fields = params[0] if params else None if elem_fields is None: elem_fields = elem_type.f_specs() obj = [] if obj is None else x.get_elem(obj) for idx, elem in enumerate(elem): try: self.tracker.push_index(idx) fvalue = await self._dump_field(elem, elem_fields[idx], params[1:] if params else None) obj.append(fvalue) self.tracker.pop() except Exception as e: raise helpers.ArchiveException(e, tracker=self.tracker) from e return obj
['async', 'def', 'dump_tuple', '(', 'self', ',', 'elem', ',', 'elem_type', ',', 'params', '=', 'None', ',', 'obj', '=', 'None', ')', ':', 'if', 'len', '(', 'elem', ')', '!=', 'len', '(', 'elem_type', '.', 'f_specs', '(', ')', ')', ':', 'raise', 'ValueError', '(', "'Fixed size tuple has not defined size: %s'", '%', 'len', '(', 'elem_type', '.', 'f_specs', '(', ')', ')', ')', 'elem_fields', '=', 'params', '[', '0', ']', 'if', 'params', 'else', 'None', 'if', 'elem_fields', 'is', 'None', ':', 'elem_fields', '=', 'elem_type', '.', 'f_specs', '(', ')', 'obj', '=', '[', ']', 'if', 'obj', 'is', 'None', 'else', 'x', '.', 'get_elem', '(', 'obj', ')', 'for', 'idx', ',', 'elem', 'in', 'enumerate', '(', 'elem', ')', ':', 'try', ':', 'self', '.', 'tracker', '.', 'push_index', '(', 'idx', ')', 'fvalue', '=', 'await', 'self', '.', '_dump_field', '(', 'elem', ',', 'elem_fields', '[', 'idx', ']', ',', 'params', '[', '1', ':', ']', 'if', 'params', 'else', 'None', ')', 'obj', '.', 'append', '(', 'fvalue', ')', 'self', '.', 'tracker', '.', 'pop', '(', ')', 'except', 'Exception', 'as', 'e', ':', 'raise', 'helpers', '.', 'ArchiveException', '(', 'e', ',', 'tracker', '=', 'self', '.', 'tracker', ')', 'from', 'e', 'return', 'obj']
Dumps tuple of elements to the writer. :param elem: :param elem_type: :param params: :param obj: :return:
['Dumps', 'tuple', 'of', 'elements', 'to', 'the', 'writer', '.']
train
https://github.com/ph4r05/monero-serialize/blob/cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42/monero_serialize/xmrrpc.py#L1118-L1146
5,786
DataBiosphere/toil
src/toil/lib/docker.py
dockerPredicate
def dockerPredicate(e): """ Used to ensure Docker exceptions are retried if appropriate :param e: Exception :return: True if e retriable, else False """ if not isinstance(e, subprocess.CalledProcessError): return False if e.returncode == 125: return True
python
def dockerPredicate(e): """ Used to ensure Docker exceptions are retried if appropriate :param e: Exception :return: True if e retriable, else False """ if not isinstance(e, subprocess.CalledProcessError): return False if e.returncode == 125: return True
['def', 'dockerPredicate', '(', 'e', ')', ':', 'if', 'not', 'isinstance', '(', 'e', ',', 'subprocess', '.', 'CalledProcessError', ')', ':', 'return', 'False', 'if', 'e', '.', 'returncode', '==', '125', ':', 'return', 'True']
Used to ensure Docker exceptions are retried if appropriate :param e: Exception :return: True if e retriable, else False
['Used', 'to', 'ensure', 'Docker', 'exceptions', 'are', 'retried', 'if', 'appropriate']
train
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/lib/docker.py#L31-L41
5,787
rameshg87/pyremotevbox
pyremotevbox/ZSI/TC.py
TypeCode.set_attributes
def set_attributes(self, el, pyobj): '''Instance data attributes contains a dictionary of keys (namespaceURI,localName) and attribute values. These values can be self-describing (typecode), or use attribute_typecode_dict to determine serialization. Paramters: el -- MessageInterface representing the element pyobj -- ''' if not hasattr(pyobj, self.attrs_aname): return if not isinstance(getattr(pyobj, self.attrs_aname), dict): raise TypeError,\ 'pyobj.%s must be a dictionary of names and values'\ % self.attrs_aname for attr, value in getattr(pyobj, self.attrs_aname).items(): namespaceURI,localName = None, attr if type(attr) in _seqtypes: namespaceURI, localName = attr what = None if getattr(self, 'attribute_typecode_dict', None) is not None: what = self.attribute_typecode_dict.get(attr) if what is None and namespaceURI is None: what = self.attribute_typecode_dict.get(localName) # allow derived type if hasattr(value, 'typecode') and not isinstance(what, AnyType): if what is not None and not isinstance(value.typecode, what): raise EvaluateException, \ 'self-describing attribute must subclass %s'\ %what.__class__ what = value.typecode self.logger.debug("attribute create -- %s", value) if isinstance(what, QName): what.set_prefix(el, value) #format the data if what is None: value = str(value) else: value = what.get_formatted_content(value) el.setAttributeNS(namespaceURI, localName, value)
python
def set_attributes(self, el, pyobj): '''Instance data attributes contains a dictionary of keys (namespaceURI,localName) and attribute values. These values can be self-describing (typecode), or use attribute_typecode_dict to determine serialization. Paramters: el -- MessageInterface representing the element pyobj -- ''' if not hasattr(pyobj, self.attrs_aname): return if not isinstance(getattr(pyobj, self.attrs_aname), dict): raise TypeError,\ 'pyobj.%s must be a dictionary of names and values'\ % self.attrs_aname for attr, value in getattr(pyobj, self.attrs_aname).items(): namespaceURI,localName = None, attr if type(attr) in _seqtypes: namespaceURI, localName = attr what = None if getattr(self, 'attribute_typecode_dict', None) is not None: what = self.attribute_typecode_dict.get(attr) if what is None and namespaceURI is None: what = self.attribute_typecode_dict.get(localName) # allow derived type if hasattr(value, 'typecode') and not isinstance(what, AnyType): if what is not None and not isinstance(value.typecode, what): raise EvaluateException, \ 'self-describing attribute must subclass %s'\ %what.__class__ what = value.typecode self.logger.debug("attribute create -- %s", value) if isinstance(what, QName): what.set_prefix(el, value) #format the data if what is None: value = str(value) else: value = what.get_formatted_content(value) el.setAttributeNS(namespaceURI, localName, value)
['def', 'set_attributes', '(', 'self', ',', 'el', ',', 'pyobj', ')', ':', 'if', 'not', 'hasattr', '(', 'pyobj', ',', 'self', '.', 'attrs_aname', ')', ':', 'return', 'if', 'not', 'isinstance', '(', 'getattr', '(', 'pyobj', ',', 'self', '.', 'attrs_aname', ')', ',', 'dict', ')', ':', 'raise', 'TypeError', ',', "'pyobj.%s must be a dictionary of names and values'", '%', 'self', '.', 'attrs_aname', 'for', 'attr', ',', 'value', 'in', 'getattr', '(', 'pyobj', ',', 'self', '.', 'attrs_aname', ')', '.', 'items', '(', ')', ':', 'namespaceURI', ',', 'localName', '=', 'None', ',', 'attr', 'if', 'type', '(', 'attr', ')', 'in', '_seqtypes', ':', 'namespaceURI', ',', 'localName', '=', 'attr', 'what', '=', 'None', 'if', 'getattr', '(', 'self', ',', "'attribute_typecode_dict'", ',', 'None', ')', 'is', 'not', 'None', ':', 'what', '=', 'self', '.', 'attribute_typecode_dict', '.', 'get', '(', 'attr', ')', 'if', 'what', 'is', 'None', 'and', 'namespaceURI', 'is', 'None', ':', 'what', '=', 'self', '.', 'attribute_typecode_dict', '.', 'get', '(', 'localName', ')', '# allow derived type', 'if', 'hasattr', '(', 'value', ',', "'typecode'", ')', 'and', 'not', 'isinstance', '(', 'what', ',', 'AnyType', ')', ':', 'if', 'what', 'is', 'not', 'None', 'and', 'not', 'isinstance', '(', 'value', '.', 'typecode', ',', 'what', ')', ':', 'raise', 'EvaluateException', ',', "'self-describing attribute must subclass %s'", '%', 'what', '.', '__class__', 'what', '=', 'value', '.', 'typecode', 'self', '.', 'logger', '.', 'debug', '(', '"attribute create -- %s"', ',', 'value', ')', 'if', 'isinstance', '(', 'what', ',', 'QName', ')', ':', 'what', '.', 'set_prefix', '(', 'el', ',', 'value', ')', '#format the data', 'if', 'what', 'is', 'None', ':', 'value', '=', 'str', '(', 'value', ')', 'else', ':', 'value', '=', 'what', '.', 'get_formatted_content', '(', 'value', ')', 'el', '.', 'setAttributeNS', '(', 'namespaceURI', ',', 'localName', ',', 'value', ')']
Instance data attributes contains a dictionary of keys (namespaceURI,localName) and attribute values. These values can be self-describing (typecode), or use attribute_typecode_dict to determine serialization. Paramters: el -- MessageInterface representing the element pyobj --
['Instance', 'data', 'attributes', 'contains', 'a', 'dictionary', 'of', 'keys', '(', 'namespaceURI', 'localName', ')', 'and', 'attribute', 'values', '.', 'These', 'values', 'can', 'be', 'self', '-', 'describing', '(', 'typecode', ')', 'or', 'use', 'attribute_typecode_dict', 'to', 'determine', 'serialization', '.', 'Paramters', ':', 'el', '--', 'MessageInterface', 'representing', 'the', 'element', 'pyobj', '--']
train
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/TC.py#L301-L348
5,788
google/grr
grr/server/grr_response_server/databases/mem_cronjobs.py
InMemoryDBCronJobMixin.LeaseCronJobs
def LeaseCronJobs(self, cronjob_ids=None, lease_time=None): """Leases all available cron jobs.""" leased_jobs = [] now = rdfvalue.RDFDatetime.Now() expiration_time = now + lease_time for job in itervalues(self.cronjobs): if cronjob_ids and job.cron_job_id not in cronjob_ids: continue existing_lease = self.cronjob_leases.get(job.cron_job_id) if existing_lease is None or existing_lease[0] < now: self.cronjob_leases[job.cron_job_id] = (expiration_time, utils.ProcessIdString()) job = job.Copy() job.leased_until, job.leased_by = self.cronjob_leases[job.cron_job_id] leased_jobs.append(job) return leased_jobs
python
def LeaseCronJobs(self, cronjob_ids=None, lease_time=None): """Leases all available cron jobs.""" leased_jobs = [] now = rdfvalue.RDFDatetime.Now() expiration_time = now + lease_time for job in itervalues(self.cronjobs): if cronjob_ids and job.cron_job_id not in cronjob_ids: continue existing_lease = self.cronjob_leases.get(job.cron_job_id) if existing_lease is None or existing_lease[0] < now: self.cronjob_leases[job.cron_job_id] = (expiration_time, utils.ProcessIdString()) job = job.Copy() job.leased_until, job.leased_by = self.cronjob_leases[job.cron_job_id] leased_jobs.append(job) return leased_jobs
['def', 'LeaseCronJobs', '(', 'self', ',', 'cronjob_ids', '=', 'None', ',', 'lease_time', '=', 'None', ')', ':', 'leased_jobs', '=', '[', ']', 'now', '=', 'rdfvalue', '.', 'RDFDatetime', '.', 'Now', '(', ')', 'expiration_time', '=', 'now', '+', 'lease_time', 'for', 'job', 'in', 'itervalues', '(', 'self', '.', 'cronjobs', ')', ':', 'if', 'cronjob_ids', 'and', 'job', '.', 'cron_job_id', 'not', 'in', 'cronjob_ids', ':', 'continue', 'existing_lease', '=', 'self', '.', 'cronjob_leases', '.', 'get', '(', 'job', '.', 'cron_job_id', ')', 'if', 'existing_lease', 'is', 'None', 'or', 'existing_lease', '[', '0', ']', '<', 'now', ':', 'self', '.', 'cronjob_leases', '[', 'job', '.', 'cron_job_id', ']', '=', '(', 'expiration_time', ',', 'utils', '.', 'ProcessIdString', '(', ')', ')', 'job', '=', 'job', '.', 'Copy', '(', ')', 'job', '.', 'leased_until', ',', 'job', '.', 'leased_by', '=', 'self', '.', 'cronjob_leases', '[', 'job', '.', 'cron_job_id', ']', 'leased_jobs', '.', 'append', '(', 'job', ')', 'return', 'leased_jobs']
Leases all available cron jobs.
['Leases', 'all', 'available', 'cron', 'jobs', '.']
train
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mem_cronjobs.py#L97-L115
5,789
bitcraze/crazyflie-lib-python
cflib/crazyflie/high_level_commander.py
HighLevelCommander.start_trajectory
def start_trajectory(self, trajectory_id, time_scale=1.0, relative=False, reversed=False, group_mask=ALL_GROUPS): """ starts executing a specified trajectory :param trajectory_id: id of the trajectory (previously defined by define_trajectory) :param time_scale: time factor; 1.0 = original speed; >1.0: slower; <1.0: faster :param relative: set to True, if trajectory should be shifted to current setpoint :param reversed: set to True, if trajectory should be executed in reverse :param group_mask: mask for which CFs this should apply to :return: """ self._send_packet(struct.pack('<BBBBBf', self.COMMAND_START_TRAJECTORY, group_mask, relative, reversed, trajectory_id, time_scale))
python
def start_trajectory(self, trajectory_id, time_scale=1.0, relative=False, reversed=False, group_mask=ALL_GROUPS): """ starts executing a specified trajectory :param trajectory_id: id of the trajectory (previously defined by define_trajectory) :param time_scale: time factor; 1.0 = original speed; >1.0: slower; <1.0: faster :param relative: set to True, if trajectory should be shifted to current setpoint :param reversed: set to True, if trajectory should be executed in reverse :param group_mask: mask for which CFs this should apply to :return: """ self._send_packet(struct.pack('<BBBBBf', self.COMMAND_START_TRAJECTORY, group_mask, relative, reversed, trajectory_id, time_scale))
['def', 'start_trajectory', '(', 'self', ',', 'trajectory_id', ',', 'time_scale', '=', '1.0', ',', 'relative', '=', 'False', ',', 'reversed', '=', 'False', ',', 'group_mask', '=', 'ALL_GROUPS', ')', ':', 'self', '.', '_send_packet', '(', 'struct', '.', 'pack', '(', "'<BBBBBf'", ',', 'self', '.', 'COMMAND_START_TRAJECTORY', ',', 'group_mask', ',', 'relative', ',', 'reversed', ',', 'trajectory_id', ',', 'time_scale', ')', ')']
starts executing a specified trajectory :param trajectory_id: id of the trajectory (previously defined by define_trajectory) :param time_scale: time factor; 1.0 = original speed; >1.0: slower; <1.0: faster :param relative: set to True, if trajectory should be shifted to current setpoint :param reversed: set to True, if trajectory should be executed in reverse :param group_mask: mask for which CFs this should apply to :return:
['starts', 'executing', 'a', 'specified', 'trajectory']
train
https://github.com/bitcraze/crazyflie-lib-python/blob/f6ebb4eb315bbe6e02db518936ac17fb615b2af8/cflib/crazyflie/high_level_commander.py#L135-L158
5,790
TUNE-Archive/freight_forwarder
freight_forwarder/utils/utils.py
_display_stream
def _display_stream(normalized_data, stream): """ print stream message from docker-py stream. """ try: stream.write(normalized_data['stream']) except UnicodeEncodeError: stream.write(normalized_data['stream'].encode("utf-8"))
python
def _display_stream(normalized_data, stream): """ print stream message from docker-py stream. """ try: stream.write(normalized_data['stream']) except UnicodeEncodeError: stream.write(normalized_data['stream'].encode("utf-8"))
['def', '_display_stream', '(', 'normalized_data', ',', 'stream', ')', ':', 'try', ':', 'stream', '.', 'write', '(', 'normalized_data', '[', "'stream'", ']', ')', 'except', 'UnicodeEncodeError', ':', 'stream', '.', 'write', '(', 'normalized_data', '[', "'stream'", ']', '.', 'encode', '(', '"utf-8"', ')', ')']
print stream message from docker-py stream.
['print', 'stream', 'message', 'from', 'docker', '-', 'py', 'stream', '.']
train
https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/utils/utils.py#L380-L387
5,791
mozilla/mozilla-django-oidc
mozilla_django_oidc/contrib/drf.py
OIDCAuthentication.authenticate
def authenticate(self, request): """ Authenticate the request and return a tuple of (user, token) or None if there was no authentication attempt. """ access_token = self.get_access_token(request) if not access_token: return None try: user = self.backend.get_or_create_user(access_token, None, None) except HTTPError as exc: resp = exc.response # if the oidc provider returns 401, it means the token is invalid. # in that case, we want to return the upstream error message (which # we can get from the www-authentication header) in the response. if resp.status_code == 401 and 'www-authenticate' in resp.headers: data = parse_www_authenticate_header(resp.headers['www-authenticate']) raise exceptions.AuthenticationFailed(data['error_description']) # for all other http errors, just re-raise the exception. raise except SuspiciousOperation as exc: LOGGER.info('Login failed: %s', exc) raise exceptions.AuthenticationFailed('Login failed') if not user: msg = 'Login failed: No user found for the given access token.' raise exceptions.AuthenticationFailed(msg) return user, access_token
python
def authenticate(self, request): """ Authenticate the request and return a tuple of (user, token) or None if there was no authentication attempt. """ access_token = self.get_access_token(request) if not access_token: return None try: user = self.backend.get_or_create_user(access_token, None, None) except HTTPError as exc: resp = exc.response # if the oidc provider returns 401, it means the token is invalid. # in that case, we want to return the upstream error message (which # we can get from the www-authentication header) in the response. if resp.status_code == 401 and 'www-authenticate' in resp.headers: data = parse_www_authenticate_header(resp.headers['www-authenticate']) raise exceptions.AuthenticationFailed(data['error_description']) # for all other http errors, just re-raise the exception. raise except SuspiciousOperation as exc: LOGGER.info('Login failed: %s', exc) raise exceptions.AuthenticationFailed('Login failed') if not user: msg = 'Login failed: No user found for the given access token.' raise exceptions.AuthenticationFailed(msg) return user, access_token
['def', 'authenticate', '(', 'self', ',', 'request', ')', ':', 'access_token', '=', 'self', '.', 'get_access_token', '(', 'request', ')', 'if', 'not', 'access_token', ':', 'return', 'None', 'try', ':', 'user', '=', 'self', '.', 'backend', '.', 'get_or_create_user', '(', 'access_token', ',', 'None', ',', 'None', ')', 'except', 'HTTPError', 'as', 'exc', ':', 'resp', '=', 'exc', '.', 'response', '# if the oidc provider returns 401, it means the token is invalid.', '# in that case, we want to return the upstream error message (which', '# we can get from the www-authentication header) in the response.', 'if', 'resp', '.', 'status_code', '==', '401', 'and', "'www-authenticate'", 'in', 'resp', '.', 'headers', ':', 'data', '=', 'parse_www_authenticate_header', '(', 'resp', '.', 'headers', '[', "'www-authenticate'", ']', ')', 'raise', 'exceptions', '.', 'AuthenticationFailed', '(', 'data', '[', "'error_description'", ']', ')', '# for all other http errors, just re-raise the exception.', 'raise', 'except', 'SuspiciousOperation', 'as', 'exc', ':', 'LOGGER', '.', 'info', '(', "'Login failed: %s'", ',', 'exc', ')', 'raise', 'exceptions', '.', 'AuthenticationFailed', '(', "'Login failed'", ')', 'if', 'not', 'user', ':', 'msg', '=', "'Login failed: No user found for the given access token.'", 'raise', 'exceptions', '.', 'AuthenticationFailed', '(', 'msg', ')', 'return', 'user', ',', 'access_token']
Authenticate the request and return a tuple of (user, token) or None if there was no authentication attempt.
['Authenticate', 'the', 'request', 'and', 'return', 'a', 'tuple', 'of', '(', 'user', 'token', ')', 'or', 'None', 'if', 'there', 'was', 'no', 'authentication', 'attempt', '.']
train
https://github.com/mozilla/mozilla-django-oidc/blob/e780130deacccbafc85a92f48d1407e042f5f955/mozilla_django_oidc/contrib/drf.py#L62-L94
5,792
saltstack/salt
salt/state.py
BaseHighState._handle_exclude
def _handle_exclude(self, state, sls, saltenv, errors): ''' Take the exclude dec out of the state and apply it to the highstate global dec ''' if 'exclude' in state: exc = state.pop('exclude') if not isinstance(exc, list): err = ('Exclude Declaration in SLS {0} is not formed ' 'as a list'.format(sls)) errors.append(err) state.setdefault('__exclude__', []).extend(exc)
python
def _handle_exclude(self, state, sls, saltenv, errors): ''' Take the exclude dec out of the state and apply it to the highstate global dec ''' if 'exclude' in state: exc = state.pop('exclude') if not isinstance(exc, list): err = ('Exclude Declaration in SLS {0} is not formed ' 'as a list'.format(sls)) errors.append(err) state.setdefault('__exclude__', []).extend(exc)
['def', '_handle_exclude', '(', 'self', ',', 'state', ',', 'sls', ',', 'saltenv', ',', 'errors', ')', ':', 'if', "'exclude'", 'in', 'state', ':', 'exc', '=', 'state', '.', 'pop', '(', "'exclude'", ')', 'if', 'not', 'isinstance', '(', 'exc', ',', 'list', ')', ':', 'err', '=', '(', "'Exclude Declaration in SLS {0} is not formed '", "'as a list'", '.', 'format', '(', 'sls', ')', ')', 'errors', '.', 'append', '(', 'err', ')', 'state', '.', 'setdefault', '(', "'__exclude__'", ',', '[', ']', ')', '.', 'extend', '(', 'exc', ')']
Take the exclude dec out of the state and apply it to the highstate global dec
['Take', 'the', 'exclude', 'dec', 'out', 'of', 'the', 'state', 'and', 'apply', 'it', 'to', 'the', 'highstate', 'global', 'dec']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/state.py#L3935-L3946
5,793
waleedka/hiddenlayer
hiddenlayer/history.py
format_step
def format_step(step, zero_prefix=False): """Return the step value in format suitable for display.""" if isinstance(step, int): return "{:06}".format(step) if zero_prefix else "{}".format(step) elif isinstance(step, tuple): return "{:04}:{:06}".format(*step) if zero_prefix else "{}:{}".format(*step)
python
def format_step(step, zero_prefix=False): """Return the step value in format suitable for display.""" if isinstance(step, int): return "{:06}".format(step) if zero_prefix else "{}".format(step) elif isinstance(step, tuple): return "{:04}:{:06}".format(*step) if zero_prefix else "{}:{}".format(*step)
['def', 'format_step', '(', 'step', ',', 'zero_prefix', '=', 'False', ')', ':', 'if', 'isinstance', '(', 'step', ',', 'int', ')', ':', 'return', '"{:06}"', '.', 'format', '(', 'step', ')', 'if', 'zero_prefix', 'else', '"{}"', '.', 'format', '(', 'step', ')', 'elif', 'isinstance', '(', 'step', ',', 'tuple', ')', ':', 'return', '"{:04}:{:06}"', '.', 'format', '(', '*', 'step', ')', 'if', 'zero_prefix', 'else', '"{}:{}"', '.', 'format', '(', '*', 'step', ')']
Return the step value in format suitable for display.
['Return', 'the', 'step', 'value', 'in', 'format', 'suitable', 'for', 'display', '.']
train
https://github.com/waleedka/hiddenlayer/blob/294f8732b271cbdd6310c55bdf5ce855cbf61c75/hiddenlayer/history.py#L27-L32
5,794
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
found_check
def found_check(): """ Temporarily enables spiceypy default behavior which raises exceptions for false found flags for certain spice functions. All spice functions executed within the context manager will check the found flag return parameter and the found flag will be removed from the return for the given function. For Example bodc2n in spiceypy is normally called like:: name = spice.bodc2n(399) With the possibility that an exception is thrown in the even of a invalid ID:: name = spice.bodc2n(-999991) # throws a SpiceyError With this function however, we can use it as a context manager to do this:: with spice.found_check(): found = spice.bodc2n(-999991) # will raise an exception! Within the context any spice functions called that normally check the found flags will pass through the check without raising an exception if they are false. """ current_catch_state = config.catch_false_founds config.catch_false_founds = True yield config.catch_false_founds = current_catch_state
python
def found_check(): """ Temporarily enables spiceypy default behavior which raises exceptions for false found flags for certain spice functions. All spice functions executed within the context manager will check the found flag return parameter and the found flag will be removed from the return for the given function. For Example bodc2n in spiceypy is normally called like:: name = spice.bodc2n(399) With the possibility that an exception is thrown in the even of a invalid ID:: name = spice.bodc2n(-999991) # throws a SpiceyError With this function however, we can use it as a context manager to do this:: with spice.found_check(): found = spice.bodc2n(-999991) # will raise an exception! Within the context any spice functions called that normally check the found flags will pass through the check without raising an exception if they are false. """ current_catch_state = config.catch_false_founds config.catch_false_founds = True yield config.catch_false_founds = current_catch_state
['def', 'found_check', '(', ')', ':', 'current_catch_state', '=', 'config', '.', 'catch_false_founds', 'config', '.', 'catch_false_founds', '=', 'True', 'yield', 'config', '.', 'catch_false_founds', '=', 'current_catch_state']
Temporarily enables spiceypy default behavior which raises exceptions for false found flags for certain spice functions. All spice functions executed within the context manager will check the found flag return parameter and the found flag will be removed from the return for the given function. For Example bodc2n in spiceypy is normally called like:: name = spice.bodc2n(399) With the possibility that an exception is thrown in the even of a invalid ID:: name = spice.bodc2n(-999991) # throws a SpiceyError With this function however, we can use it as a context manager to do this:: with spice.found_check(): found = spice.bodc2n(-999991) # will raise an exception! Within the context any spice functions called that normally check the found flags will pass through the check without raising an exception if they are false.
['Temporarily', 'enables', 'spiceypy', 'default', 'behavior', 'which', 'raises', 'exceptions', 'for', 'false', 'found', 'flags', 'for', 'certain', 'spice', 'functions', '.', 'All', 'spice', 'functions', 'executed', 'within', 'the', 'context', 'manager', 'will', 'check', 'the', 'found', 'flag', 'return', 'parameter', 'and', 'the', 'found', 'flag', 'will', 'be', 'removed', 'from', 'the', 'return', 'for', 'the', 'given', 'function', '.', 'For', 'Example', 'bodc2n', 'in', 'spiceypy', 'is', 'normally', 'called', 'like', '::']
train
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L143-L170
5,795
boriel/zxbasic
arch/zx48k/backend/__32bit.py
_sub32
def _sub32(ins): """ Pops last 2 dwords from the stack and subtract them. Then push the result onto the stack. NOTE: The operation is TOP[0] = TOP[-1] - TOP[0] If TOP[0] is 0, nothing is done """ op1, op2 = tuple(ins.quad[2:]) if is_int(op2): if int(op2) == 0: # A - 0 = A => Do Nothing output = _32bit_oper(op1) output.append('push de') output.append('push hl') return output rev = op1[0] != 't' and not is_int(op1) and op2[0] == 't' output = _32bit_oper(op1, op2, rev) output.append('call __SUB32') output.append('push de') output.append('push hl') REQUIRES.add('sub32.asm') return output
python
def _sub32(ins): """ Pops last 2 dwords from the stack and subtract them. Then push the result onto the stack. NOTE: The operation is TOP[0] = TOP[-1] - TOP[0] If TOP[0] is 0, nothing is done """ op1, op2 = tuple(ins.quad[2:]) if is_int(op2): if int(op2) == 0: # A - 0 = A => Do Nothing output = _32bit_oper(op1) output.append('push de') output.append('push hl') return output rev = op1[0] != 't' and not is_int(op1) and op2[0] == 't' output = _32bit_oper(op1, op2, rev) output.append('call __SUB32') output.append('push de') output.append('push hl') REQUIRES.add('sub32.asm') return output
['def', '_sub32', '(', 'ins', ')', ':', 'op1', ',', 'op2', '=', 'tuple', '(', 'ins', '.', 'quad', '[', '2', ':', ']', ')', 'if', 'is_int', '(', 'op2', ')', ':', 'if', 'int', '(', 'op2', ')', '==', '0', ':', '# A - 0 = A => Do Nothing', 'output', '=', '_32bit_oper', '(', 'op1', ')', 'output', '.', 'append', '(', "'push de'", ')', 'output', '.', 'append', '(', "'push hl'", ')', 'return', 'output', 'rev', '=', 'op1', '[', '0', ']', '!=', "'t'", 'and', 'not', 'is_int', '(', 'op1', ')', 'and', 'op2', '[', '0', ']', '==', "'t'", 'output', '=', '_32bit_oper', '(', 'op1', ',', 'op2', ',', 'rev', ')', 'output', '.', 'append', '(', "'call __SUB32'", ')', 'output', '.', 'append', '(', "'push de'", ')', 'output', '.', 'append', '(', "'push hl'", ')', 'REQUIRES', '.', 'add', '(', "'sub32.asm'", ')', 'return', 'output']
Pops last 2 dwords from the stack and subtract them. Then push the result onto the stack. NOTE: The operation is TOP[0] = TOP[-1] - TOP[0] If TOP[0] is 0, nothing is done
['Pops', 'last', '2', 'dwords', 'from', 'the', 'stack', 'and', 'subtract', 'them', '.', 'Then', 'push', 'the', 'result', 'onto', 'the', 'stack', '.', 'NOTE', ':', 'The', 'operation', 'is', 'TOP', '[', '0', ']', '=', 'TOP', '[', '-', '1', ']', '-', 'TOP', '[', '0', ']']
train
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/backend/__32bit.py#L241-L264
5,796
jay-johnson/spylunking
spylunking/scripts/search_splunk.py
show_search_results
def show_search_results( log_rec, code_view=True, json_view=False, show_message_details=False): """show_search_results Show search results like rsyslog or as pretty-printed JSON dictionaries per log for debugging drill-down fields :param log_rec: log record from splunk :param code_view: show as a normal tail -f <log file> view :param json_view: pretty print each log's dictionary :param show_message_details """ log_dict = None try: log_dict = json.loads( log_rec) except Exception as e: log.error(( 'Failed logging record={} with ex={}').format( log_rec, e)) return # end of try/ex if not log_dict: log.error(( 'Failed to parse log_rec={} as a dictionary').format( log_rec)) return if code_view: comp_name = log_dict.get( 'name', '') logger_name = log_dict.get( 'logger_name', '') use_log_name = ( '{}').format( logger_name) if logger_name: use_log_name = '{}'.format( logger_name) else: if comp_name: use_log_name = '{}'.format( comp_name) prefix_log = ( '{} {} - {} -').format( log_dict.get( 'systime', log_dict.get( 'asctime', '')), use_log_name, log_dict.get( 'levelname', '')) suffix_log = '' if log_dict.get( 'exc', ''): suffix_log = ( '{} exc={}').format( suffix_log, log_dict.get( 'exc', '')) if show_message_details: suffix_log = ( 'dc={} env={} ' 'source={} line={}').format( log_dict.get( 'dc', ''), log_dict.get( 'env', ''), log_dict.get( 'path', ''), log_dict.get( 'lineno', '')) msg = ( '{} {} {}').format( prefix_log, log_dict.get( 'message', ''), suffix_log) if log_dict['levelname'] == 'INFO': log.info(( '{}').format( msg)) elif log_dict['levelname'] == 'DEBUG': log.debug(( '{}').format( msg)) elif log_dict['levelname'] == 'ERROR': log.error(( '{}').format( msg)) elif log_dict['levelname'] == 'CRITICAL': log.critical(( '{}').format( msg)) elif log_dict['levelname'] == 'WARNING': log.warning(( '{}').format( msg)) else: log.debug(( '{}').format( msg)) elif json_view: if log_dict['levelname'] == 'INFO': log.info(( '{}').format( ppj(log_dict))) elif log_dict['levelname'] == 'DEBUG': log.debug(( '{}').format( ppj(log_dict))) elif log_dict['levelname'] == 'ERROR': log.error(( '{}').format( ppj(log_dict))) elif log_dict['levelname'] == 'CRITICAL': log.critical(( '{}').format( ppj(log_dict))) elif log_dict['levelname'] == 'WARNING': log.warning(( '{}').format( ppj(log_dict))) else: log.debug(( '{}').format( ppj(log_dict))) else: log.error(( 'Please use either code_view or json_view to view the logs'))
python
def show_search_results( log_rec, code_view=True, json_view=False, show_message_details=False): """show_search_results Show search results like rsyslog or as pretty-printed JSON dictionaries per log for debugging drill-down fields :param log_rec: log record from splunk :param code_view: show as a normal tail -f <log file> view :param json_view: pretty print each log's dictionary :param show_message_details """ log_dict = None try: log_dict = json.loads( log_rec) except Exception as e: log.error(( 'Failed logging record={} with ex={}').format( log_rec, e)) return # end of try/ex if not log_dict: log.error(( 'Failed to parse log_rec={} as a dictionary').format( log_rec)) return if code_view: comp_name = log_dict.get( 'name', '') logger_name = log_dict.get( 'logger_name', '') use_log_name = ( '{}').format( logger_name) if logger_name: use_log_name = '{}'.format( logger_name) else: if comp_name: use_log_name = '{}'.format( comp_name) prefix_log = ( '{} {} - {} -').format( log_dict.get( 'systime', log_dict.get( 'asctime', '')), use_log_name, log_dict.get( 'levelname', '')) suffix_log = '' if log_dict.get( 'exc', ''): suffix_log = ( '{} exc={}').format( suffix_log, log_dict.get( 'exc', '')) if show_message_details: suffix_log = ( 'dc={} env={} ' 'source={} line={}').format( log_dict.get( 'dc', ''), log_dict.get( 'env', ''), log_dict.get( 'path', ''), log_dict.get( 'lineno', '')) msg = ( '{} {} {}').format( prefix_log, log_dict.get( 'message', ''), suffix_log) if log_dict['levelname'] == 'INFO': log.info(( '{}').format( msg)) elif log_dict['levelname'] == 'DEBUG': log.debug(( '{}').format( msg)) elif log_dict['levelname'] == 'ERROR': log.error(( '{}').format( msg)) elif log_dict['levelname'] == 'CRITICAL': log.critical(( '{}').format( msg)) elif log_dict['levelname'] == 'WARNING': log.warning(( '{}').format( msg)) else: log.debug(( '{}').format( msg)) elif json_view: if log_dict['levelname'] == 'INFO': log.info(( '{}').format( ppj(log_dict))) elif log_dict['levelname'] == 'DEBUG': log.debug(( '{}').format( ppj(log_dict))) elif log_dict['levelname'] == 'ERROR': log.error(( '{}').format( ppj(log_dict))) elif log_dict['levelname'] == 'CRITICAL': log.critical(( '{}').format( ppj(log_dict))) elif log_dict['levelname'] == 'WARNING': log.warning(( '{}').format( ppj(log_dict))) else: log.debug(( '{}').format( ppj(log_dict))) else: log.error(( 'Please use either code_view or json_view to view the logs'))
['def', 'show_search_results', '(', 'log_rec', ',', 'code_view', '=', 'True', ',', 'json_view', '=', 'False', ',', 'show_message_details', '=', 'False', ')', ':', 'log_dict', '=', 'None', 'try', ':', 'log_dict', '=', 'json', '.', 'loads', '(', 'log_rec', ')', 'except', 'Exception', 'as', 'e', ':', 'log', '.', 'error', '(', '(', "'Failed logging record={} with ex={}'", ')', '.', 'format', '(', 'log_rec', ',', 'e', ')', ')', 'return', '# end of try/ex', 'if', 'not', 'log_dict', ':', 'log', '.', 'error', '(', '(', "'Failed to parse log_rec={} as a dictionary'", ')', '.', 'format', '(', 'log_rec', ')', ')', 'return', 'if', 'code_view', ':', 'comp_name', '=', 'log_dict', '.', 'get', '(', "'name'", ',', "''", ')', 'logger_name', '=', 'log_dict', '.', 'get', '(', "'logger_name'", ',', "''", ')', 'use_log_name', '=', '(', "'{}'", ')', '.', 'format', '(', 'logger_name', ')', 'if', 'logger_name', ':', 'use_log_name', '=', "'{}'", '.', 'format', '(', 'logger_name', ')', 'else', ':', 'if', 'comp_name', ':', 'use_log_name', '=', "'{}'", '.', 'format', '(', 'comp_name', ')', 'prefix_log', '=', '(', "'{} {} - {} -'", ')', '.', 'format', '(', 'log_dict', '.', 'get', '(', "'systime'", ',', 'log_dict', '.', 'get', '(', "'asctime'", ',', "''", ')', ')', ',', 'use_log_name', ',', 'log_dict', '.', 'get', '(', "'levelname'", ',', "''", ')', ')', 'suffix_log', '=', "''", 'if', 'log_dict', '.', 'get', '(', "'exc'", ',', "''", ')', ':', 'suffix_log', '=', '(', "'{} exc={}'", ')', '.', 'format', '(', 'suffix_log', ',', 'log_dict', '.', 'get', '(', "'exc'", ',', "''", ')', ')', 'if', 'show_message_details', ':', 'suffix_log', '=', '(', "'dc={} env={} '", "'source={} line={}'", ')', '.', 'format', '(', 'log_dict', '.', 'get', '(', "'dc'", ',', "''", ')', ',', 'log_dict', '.', 'get', '(', "'env'", ',', "''", ')', ',', 'log_dict', '.', 'get', '(', "'path'", ',', "''", ')', ',', 'log_dict', '.', 'get', '(', "'lineno'", ',', "''", ')', ')', 'msg', '=', '(', "'{} {} {}'", ')', '.', 'format', '(', 'prefix_log', ',', 'log_dict', '.', 'get', '(', "'message'", ',', "''", ')', ',', 'suffix_log', ')', 'if', 'log_dict', '[', "'levelname'", ']', '==', "'INFO'", ':', 'log', '.', 'info', '(', '(', "'{}'", ')', '.', 'format', '(', 'msg', ')', ')', 'elif', 'log_dict', '[', "'levelname'", ']', '==', "'DEBUG'", ':', 'log', '.', 'debug', '(', '(', "'{}'", ')', '.', 'format', '(', 'msg', ')', ')', 'elif', 'log_dict', '[', "'levelname'", ']', '==', "'ERROR'", ':', 'log', '.', 'error', '(', '(', "'{}'", ')', '.', 'format', '(', 'msg', ')', ')', 'elif', 'log_dict', '[', "'levelname'", ']', '==', "'CRITICAL'", ':', 'log', '.', 'critical', '(', '(', "'{}'", ')', '.', 'format', '(', 'msg', ')', ')', 'elif', 'log_dict', '[', "'levelname'", ']', '==', "'WARNING'", ':', 'log', '.', 'warning', '(', '(', "'{}'", ')', '.', 'format', '(', 'msg', ')', ')', 'else', ':', 'log', '.', 'debug', '(', '(', "'{}'", ')', '.', 'format', '(', 'msg', ')', ')', 'elif', 'json_view', ':', 'if', 'log_dict', '[', "'levelname'", ']', '==', "'INFO'", ':', 'log', '.', 'info', '(', '(', "'{}'", ')', '.', 'format', '(', 'ppj', '(', 'log_dict', ')', ')', ')', 'elif', 'log_dict', '[', "'levelname'", ']', '==', "'DEBUG'", ':', 'log', '.', 'debug', '(', '(', "'{}'", ')', '.', 'format', '(', 'ppj', '(', 'log_dict', ')', ')', ')', 'elif', 'log_dict', '[', "'levelname'", ']', '==', "'ERROR'", ':', 'log', '.', 'error', '(', '(', "'{}'", ')', '.', 'format', '(', 'ppj', '(', 'log_dict', ')', ')', ')', 'elif', 'log_dict', '[', "'levelname'", ']', '==', "'CRITICAL'", ':', 'log', '.', 'critical', '(', '(', "'{}'", ')', '.', 'format', '(', 'ppj', '(', 'log_dict', ')', ')', ')', 'elif', 'log_dict', '[', "'levelname'", ']', '==', "'WARNING'", ':', 'log', '.', 'warning', '(', '(', "'{}'", ')', '.', 'format', '(', 'ppj', '(', 'log_dict', ')', ')', ')', 'else', ':', 'log', '.', 'debug', '(', '(', "'{}'", ')', '.', 'format', '(', 'ppj', '(', 'log_dict', ')', ')', ')', 'else', ':', 'log', '.', 'error', '(', '(', "'Please use either code_view or json_view to view the logs'", ')', ')']
show_search_results Show search results like rsyslog or as pretty-printed JSON dictionaries per log for debugging drill-down fields :param log_rec: log record from splunk :param code_view: show as a normal tail -f <log file> view :param json_view: pretty print each log's dictionary :param show_message_details
['show_search_results']
train
https://github.com/jay-johnson/spylunking/blob/95cc86776f04ec5935cf04e291cf18798345d6cb/spylunking/scripts/search_splunk.py#L97-L248
5,797
dpkp/kafka-python
kafka/consumer/subscription_state.py
SubscriptionState.need_offset_reset
def need_offset_reset(self, partition, offset_reset_strategy=None): """Mark partition for offset reset using specified or default strategy. Arguments: partition (TopicPartition): partition to mark offset_reset_strategy (OffsetResetStrategy, optional) """ if offset_reset_strategy is None: offset_reset_strategy = self._default_offset_reset_strategy self.assignment[partition].await_reset(offset_reset_strategy)
python
def need_offset_reset(self, partition, offset_reset_strategy=None): """Mark partition for offset reset using specified or default strategy. Arguments: partition (TopicPartition): partition to mark offset_reset_strategy (OffsetResetStrategy, optional) """ if offset_reset_strategy is None: offset_reset_strategy = self._default_offset_reset_strategy self.assignment[partition].await_reset(offset_reset_strategy)
['def', 'need_offset_reset', '(', 'self', ',', 'partition', ',', 'offset_reset_strategy', '=', 'None', ')', ':', 'if', 'offset_reset_strategy', 'is', 'None', ':', 'offset_reset_strategy', '=', 'self', '.', '_default_offset_reset_strategy', 'self', '.', 'assignment', '[', 'partition', ']', '.', 'await_reset', '(', 'offset_reset_strategy', ')']
Mark partition for offset reset using specified or default strategy. Arguments: partition (TopicPartition): partition to mark offset_reset_strategy (OffsetResetStrategy, optional)
['Mark', 'partition', 'for', 'offset', 'reset', 'using', 'specified', 'or', 'default', 'strategy', '.']
train
https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/consumer/subscription_state.py#L325-L334
5,798
senaite/senaite.core
bika/lims/browser/analysisrequest/add2.py
ajaxAnalysisRequestAddView.get_base_info
def get_base_info(self, obj): """Returns the base info of an object """ if obj is None: return {} info = { "id": obj.getId(), "uid": obj.UID(), "title": obj.Title(), "description": obj.Description(), "url": obj.absolute_url(), } return info
python
def get_base_info(self, obj): """Returns the base info of an object """ if obj is None: return {} info = { "id": obj.getId(), "uid": obj.UID(), "title": obj.Title(), "description": obj.Description(), "url": obj.absolute_url(), } return info
['def', 'get_base_info', '(', 'self', ',', 'obj', ')', ':', 'if', 'obj', 'is', 'None', ':', 'return', '{', '}', 'info', '=', '{', '"id"', ':', 'obj', '.', 'getId', '(', ')', ',', '"uid"', ':', 'obj', '.', 'UID', '(', ')', ',', '"title"', ':', 'obj', '.', 'Title', '(', ')', ',', '"description"', ':', 'obj', '.', 'Description', '(', ')', ',', '"url"', ':', 'obj', '.', 'absolute_url', '(', ')', ',', '}', 'return', 'info']
Returns the base info of an object
['Returns', 'the', 'base', 'info', 'of', 'an', 'object']
train
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/analysisrequest/add2.py#L793-L807
5,799
SuryaSankar/flask-sqlalchemy-booster
flask_sqlalchemy_booster/model_booster/queryable_mixin.py
QueryableMixin.count
def count(cls, *criterion, **kwargs): """Returns a count of the instances meeting the specified filter criterion and kwargs. Examples: >>> User.count() 500 >>> User.count(country="India") 300 >>> User.count(User.age > 50, country="India") 39 """ if criterion or kwargs: return cls.filter( *criterion, **kwargs).count() else: return cls.query.count()
python
def count(cls, *criterion, **kwargs): """Returns a count of the instances meeting the specified filter criterion and kwargs. Examples: >>> User.count() 500 >>> User.count(country="India") 300 >>> User.count(User.age > 50, country="India") 39 """ if criterion or kwargs: return cls.filter( *criterion, **kwargs).count() else: return cls.query.count()
['def', 'count', '(', 'cls', ',', '*', 'criterion', ',', '*', '*', 'kwargs', ')', ':', 'if', 'criterion', 'or', 'kwargs', ':', 'return', 'cls', '.', 'filter', '(', '*', 'criterion', ',', '*', '*', 'kwargs', ')', '.', 'count', '(', ')', 'else', ':', 'return', 'cls', '.', 'query', '.', 'count', '(', ')']
Returns a count of the instances meeting the specified filter criterion and kwargs. Examples: >>> User.count() 500 >>> User.count(country="India") 300 >>> User.count(User.age > 50, country="India") 39
['Returns', 'a', 'count', 'of', 'the', 'instances', 'meeting', 'the', 'specified', 'filter', 'criterion', 'and', 'kwargs', '.']
train
https://github.com/SuryaSankar/flask-sqlalchemy-booster/blob/444048d167ab7718f758e943665ef32d101423a5/flask_sqlalchemy_booster/model_booster/queryable_mixin.py#L290-L311