Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
4,100
OLC-Bioinformatics/sipprverse
method.py
Method.complete
def complete(self): """ Determine if the analyses of the strains are complete e.g. there are no missing GDCS genes, and the sample.general.bestassemblyfile != 'NA' """ # Boolean to store the completeness of the analyses allcomplete = True # Clear the list of samples that still require more sequence data self.incomplete = list() for sample in self.runmetadata.samples: if sample.general.bestassemblyfile != 'NA': try: # If the sample has been tagged as incomplete, only add it to the complete metadata list if the # pipeline is on its final iteration if sample.general.incomplete: if self.final: self.completemetadata.append(sample) else: sample.general.complete = False allcomplete = False self.incomplete.append(sample.name) except AttributeError: sample.general.complete = True self.completemetadata.append(sample) else: if self.final: self.completemetadata.append(sample) else: sample.general.complete = False allcomplete = False self.incomplete.append(sample.name) # If all the samples are complete, set the global variable for run completeness to True if allcomplete: self.analysescomplete = True
python
def complete(self): """ Determine if the analyses of the strains are complete e.g. there are no missing GDCS genes, and the sample.general.bestassemblyfile != 'NA' """ # Boolean to store the completeness of the analyses allcomplete = True # Clear the list of samples that still require more sequence data self.incomplete = list() for sample in self.runmetadata.samples: if sample.general.bestassemblyfile != 'NA': try: # If the sample has been tagged as incomplete, only add it to the complete metadata list if the # pipeline is on its final iteration if sample.general.incomplete: if self.final: self.completemetadata.append(sample) else: sample.general.complete = False allcomplete = False self.incomplete.append(sample.name) except AttributeError: sample.general.complete = True self.completemetadata.append(sample) else: if self.final: self.completemetadata.append(sample) else: sample.general.complete = False allcomplete = False self.incomplete.append(sample.name) # If all the samples are complete, set the global variable for run completeness to True if allcomplete: self.analysescomplete = True
['def', 'complete', '(', 'self', ')', ':', '# Boolean to store the completeness of the analyses', 'allcomplete', '=', 'True', '# Clear the list of samples that still require more sequence data', 'self', '.', 'incomplete', '=', 'list', '(', ')', 'for', 'sample', 'in', 'self', '.', 'runmetadata', '.', 'samples', ':', 'if', 'sample', '.', 'general', '.', 'bestassemblyfile', '!=', "'NA'", ':', 'try', ':', '# If the sample has been tagged as incomplete, only add it to the complete metadata list if the', '# pipeline is on its final iteration', 'if', 'sample', '.', 'general', '.', 'incomplete', ':', 'if', 'self', '.', 'final', ':', 'self', '.', 'completemetadata', '.', 'append', '(', 'sample', ')', 'else', ':', 'sample', '.', 'general', '.', 'complete', '=', 'False', 'allcomplete', '=', 'False', 'self', '.', 'incomplete', '.', 'append', '(', 'sample', '.', 'name', ')', 'except', 'AttributeError', ':', 'sample', '.', 'general', '.', 'complete', '=', 'True', 'self', '.', 'completemetadata', '.', 'append', '(', 'sample', ')', 'else', ':', 'if', 'self', '.', 'final', ':', 'self', '.', 'completemetadata', '.', 'append', '(', 'sample', ')', 'else', ':', 'sample', '.', 'general', '.', 'complete', '=', 'False', 'allcomplete', '=', 'False', 'self', '.', 'incomplete', '.', 'append', '(', 'sample', '.', 'name', ')', '# If all the samples are complete, set the global variable for run completeness to True', 'if', 'allcomplete', ':', 'self', '.', 'analysescomplete', '=', 'True']
Determine if the analyses of the strains are complete e.g. there are no missing GDCS genes, and the sample.general.bestassemblyfile != 'NA'
['Determine', 'if', 'the', 'analyses', 'of', 'the', 'strains', 'are', 'complete', 'e', '.', 'g', '.', 'there', 'are', 'no', 'missing', 'GDCS', 'genes', 'and', 'the', 'sample', '.', 'general', '.', 'bestassemblyfile', '!', '=', 'NA']
train
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/method.py#L228-L261
4,101
blakev/python-syncthing
syncthing/__init__.py
Database.browse
def browse(self, folder, levels=None, prefix=None): """ Returns the directory tree of the global model. Directories are always JSON objects (map/dictionary), and files are always arrays of modification time and size. The first integer is the files modification time, and the second integer is the file size. Args: folder (str): The root folder to traverse. levels (int): How deep within the tree we want to dwell down. (0 based, defaults to unlimited depth) prefix (str): Defines a prefix within the tree where to start building the structure. Returns: dict """ assert isinstance(levels, int) or levels is None assert isinstance(prefix, string_types) or prefix is None return self.get('browse', params={'folder': folder, 'levels': levels, 'prefix': prefix})
python
def browse(self, folder, levels=None, prefix=None): """ Returns the directory tree of the global model. Directories are always JSON objects (map/dictionary), and files are always arrays of modification time and size. The first integer is the files modification time, and the second integer is the file size. Args: folder (str): The root folder to traverse. levels (int): How deep within the tree we want to dwell down. (0 based, defaults to unlimited depth) prefix (str): Defines a prefix within the tree where to start building the structure. Returns: dict """ assert isinstance(levels, int) or levels is None assert isinstance(prefix, string_types) or prefix is None return self.get('browse', params={'folder': folder, 'levels': levels, 'prefix': prefix})
['def', 'browse', '(', 'self', ',', 'folder', ',', 'levels', '=', 'None', ',', 'prefix', '=', 'None', ')', ':', 'assert', 'isinstance', '(', 'levels', ',', 'int', ')', 'or', 'levels', 'is', 'None', 'assert', 'isinstance', '(', 'prefix', ',', 'string_types', ')', 'or', 'prefix', 'is', 'None', 'return', 'self', '.', 'get', '(', "'browse'", ',', 'params', '=', '{', "'folder'", ':', 'folder', ',', "'levels'", ':', 'levels', ',', "'prefix'", ':', 'prefix', '}', ')']
Returns the directory tree of the global model. Directories are always JSON objects (map/dictionary), and files are always arrays of modification time and size. The first integer is the files modification time, and the second integer is the file size. Args: folder (str): The root folder to traverse. levels (int): How deep within the tree we want to dwell down. (0 based, defaults to unlimited depth) prefix (str): Defines a prefix within the tree where to start building the structure. Returns: dict
['Returns', 'the', 'directory', 'tree', 'of', 'the', 'global', 'model', '.']
train
https://github.com/blakev/python-syncthing/blob/a7f4930f86f7543cd96990277945467896fb523d/syncthing/__init__.py#L581-L603
4,102
nugget/python-insteonplm
insteonplm/states/x10.py
X10DimmableSwitch.set_level
def set_level(self, val): """Set the device ON LEVEL.""" if val == 0: self.off() elif val == 255: self.on() else: setlevel = 255 if val < 1: setlevel = val * 255 elif val <= 0xff: setlevel = val change = setlevel - self._value increment = 255 / self._steps steps = round(abs(change) / increment) print('Steps: ', steps) if change > 0: method = self.brighten self._value += round(steps * increment) self._value = min(255, self._value) else: method = self.dim self._value -= round(steps * increment) self._value = max(0, self._value) # pylint: disable=unused-variable for step in range(0, steps): method(True) self._update_subscribers(self._value)
python
def set_level(self, val): """Set the device ON LEVEL.""" if val == 0: self.off() elif val == 255: self.on() else: setlevel = 255 if val < 1: setlevel = val * 255 elif val <= 0xff: setlevel = val change = setlevel - self._value increment = 255 / self._steps steps = round(abs(change) / increment) print('Steps: ', steps) if change > 0: method = self.brighten self._value += round(steps * increment) self._value = min(255, self._value) else: method = self.dim self._value -= round(steps * increment) self._value = max(0, self._value) # pylint: disable=unused-variable for step in range(0, steps): method(True) self._update_subscribers(self._value)
['def', 'set_level', '(', 'self', ',', 'val', ')', ':', 'if', 'val', '==', '0', ':', 'self', '.', 'off', '(', ')', 'elif', 'val', '==', '255', ':', 'self', '.', 'on', '(', ')', 'else', ':', 'setlevel', '=', '255', 'if', 'val', '<', '1', ':', 'setlevel', '=', 'val', '*', '255', 'elif', 'val', '<=', '0xff', ':', 'setlevel', '=', 'val', 'change', '=', 'setlevel', '-', 'self', '.', '_value', 'increment', '=', '255', '/', 'self', '.', '_steps', 'steps', '=', 'round', '(', 'abs', '(', 'change', ')', '/', 'increment', ')', 'print', '(', "'Steps: '", ',', 'steps', ')', 'if', 'change', '>', '0', ':', 'method', '=', 'self', '.', 'brighten', 'self', '.', '_value', '+=', 'round', '(', 'steps', '*', 'increment', ')', 'self', '.', '_value', '=', 'min', '(', '255', ',', 'self', '.', '_value', ')', 'else', ':', 'method', '=', 'self', '.', 'dim', 'self', '.', '_value', '-=', 'round', '(', 'steps', '*', 'increment', ')', 'self', '.', '_value', '=', 'max', '(', '0', ',', 'self', '.', '_value', ')', '# pylint: disable=unused-variable', 'for', 'step', 'in', 'range', '(', '0', ',', 'steps', ')', ':', 'method', '(', 'True', ')', 'self', '.', '_update_subscribers', '(', 'self', '.', '_value', ')']
Set the device ON LEVEL.
['Set', 'the', 'device', 'ON', 'LEVEL', '.']
train
https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/x10.py#L107-L134
4,103
jkokorian/pyqt2waybinding
pyqt2waybinding/__init__.py
Observer.bindToProperty
def bindToProperty(self,instance,propertyName,useGetter=False): """ 2-way binds to an instance property. Parameters: - instance -- the object instance - propertyName -- the name of the property to bind to - useGetter: when True, calls the getter method to obtain the value. When False, the signal argument is used as input for the target setter. (default False) Notes: 2-way binds to an instance property according to one of the following naming conventions: @property, propertyName.setter and pyqtSignal - getter: propertyName - setter: propertyName - changedSignal: propertyNameChanged getter, setter and pyqtSignal (this is used when binding to standard QWidgets like QSpinBox) - getter: propertyName() - setter: setPropertyName() - changedSignal: propertyNameChanged """ endpoint = BindingEndpoint.forProperty(instance,propertyName,useGetter = useGetter) self.bindToEndPoint(endpoint)
python
def bindToProperty(self,instance,propertyName,useGetter=False): """ 2-way binds to an instance property. Parameters: - instance -- the object instance - propertyName -- the name of the property to bind to - useGetter: when True, calls the getter method to obtain the value. When False, the signal argument is used as input for the target setter. (default False) Notes: 2-way binds to an instance property according to one of the following naming conventions: @property, propertyName.setter and pyqtSignal - getter: propertyName - setter: propertyName - changedSignal: propertyNameChanged getter, setter and pyqtSignal (this is used when binding to standard QWidgets like QSpinBox) - getter: propertyName() - setter: setPropertyName() - changedSignal: propertyNameChanged """ endpoint = BindingEndpoint.forProperty(instance,propertyName,useGetter = useGetter) self.bindToEndPoint(endpoint)
['def', 'bindToProperty', '(', 'self', ',', 'instance', ',', 'propertyName', ',', 'useGetter', '=', 'False', ')', ':', 'endpoint', '=', 'BindingEndpoint', '.', 'forProperty', '(', 'instance', ',', 'propertyName', ',', 'useGetter', '=', 'useGetter', ')', 'self', '.', 'bindToEndPoint', '(', 'endpoint', ')']
2-way binds to an instance property. Parameters: - instance -- the object instance - propertyName -- the name of the property to bind to - useGetter: when True, calls the getter method to obtain the value. When False, the signal argument is used as input for the target setter. (default False) Notes: 2-way binds to an instance property according to one of the following naming conventions: @property, propertyName.setter and pyqtSignal - getter: propertyName - setter: propertyName - changedSignal: propertyNameChanged getter, setter and pyqtSignal (this is used when binding to standard QWidgets like QSpinBox) - getter: propertyName() - setter: setPropertyName() - changedSignal: propertyNameChanged
['2', '-', 'way', 'binds', 'to', 'an', 'instance', 'property', '.']
train
https://github.com/jkokorian/pyqt2waybinding/blob/fb1fb84f55608cfbf99c6486650100ba81743117/pyqt2waybinding/__init__.py#L142-L166
4,104
kensho-technologies/graphql-compiler
graphql_compiler/schema_generation/schema_graph.py
SchemaGraph.get_default_property_values
def get_default_property_values(self, classname): """Return a dict with default values for all properties declared on this class.""" schema_element = self.get_element_by_class_name(classname) result = { property_name: property_descriptor.default for property_name, property_descriptor in six.iteritems(schema_element.properties) } if schema_element.is_edge: # Remove the source/destination properties for edges, if they exist. result.pop(EDGE_SOURCE_PROPERTY_NAME, None) result.pop(EDGE_DESTINATION_PROPERTY_NAME, None) return result
python
def get_default_property_values(self, classname): """Return a dict with default values for all properties declared on this class.""" schema_element = self.get_element_by_class_name(classname) result = { property_name: property_descriptor.default for property_name, property_descriptor in six.iteritems(schema_element.properties) } if schema_element.is_edge: # Remove the source/destination properties for edges, if they exist. result.pop(EDGE_SOURCE_PROPERTY_NAME, None) result.pop(EDGE_DESTINATION_PROPERTY_NAME, None) return result
['def', 'get_default_property_values', '(', 'self', ',', 'classname', ')', ':', 'schema_element', '=', 'self', '.', 'get_element_by_class_name', '(', 'classname', ')', 'result', '=', '{', 'property_name', ':', 'property_descriptor', '.', 'default', 'for', 'property_name', ',', 'property_descriptor', 'in', 'six', '.', 'iteritems', '(', 'schema_element', '.', 'properties', ')', '}', 'if', 'schema_element', '.', 'is_edge', ':', '# Remove the source/destination properties for edges, if they exist.', 'result', '.', 'pop', '(', 'EDGE_SOURCE_PROPERTY_NAME', ',', 'None', ')', 'result', '.', 'pop', '(', 'EDGE_DESTINATION_PROPERTY_NAME', ',', 'None', ')', 'return', 'result']
Return a dict with default values for all properties declared on this class.
['Return', 'a', 'dict', 'with', 'default', 'values', 'for', 'all', 'properties', 'declared', 'on', 'this', 'class', '.']
train
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/schema_generation/schema_graph.py#L297-L311
4,105
tensorpack/tensorpack
tensorpack/models/layer_norm.py
LayerNorm
def LayerNorm( x, epsilon=1e-5, use_bias=True, use_scale=True, gamma_init=None, data_format='channels_last'): """ Layer Normalization layer, as described in the paper: `Layer Normalization <https://arxiv.org/abs/1607.06450>`_. Args: x (tf.Tensor): a 4D or 2D tensor. When 4D, the layout should match data_format. epsilon (float): epsilon to avoid divide-by-zero. use_scale, use_bias (bool): whether to use the extra affine transformation or not. """ data_format = get_data_format(data_format, keras_mode=False) shape = x.get_shape().as_list() ndims = len(shape) assert ndims in [2, 4] mean, var = tf.nn.moments(x, list(range(1, len(shape))), keep_dims=True) if data_format == 'NCHW': chan = shape[1] new_shape = [1, chan, 1, 1] else: chan = shape[-1] new_shape = [1, 1, 1, chan] if ndims == 2: new_shape = [1, chan] if use_bias: beta = tf.get_variable('beta', [chan], initializer=tf.constant_initializer()) beta = tf.reshape(beta, new_shape) else: beta = tf.zeros([1] * ndims, name='beta') if use_scale: if gamma_init is None: gamma_init = tf.constant_initializer(1.0) gamma = tf.get_variable('gamma', [chan], initializer=gamma_init) gamma = tf.reshape(gamma, new_shape) else: gamma = tf.ones([1] * ndims, name='gamma') ret = tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon, name='output') vh = ret.variables = VariableHolder() if use_scale: vh.gamma = gamma if use_bias: vh.beta = beta return ret
python
def LayerNorm( x, epsilon=1e-5, use_bias=True, use_scale=True, gamma_init=None, data_format='channels_last'): """ Layer Normalization layer, as described in the paper: `Layer Normalization <https://arxiv.org/abs/1607.06450>`_. Args: x (tf.Tensor): a 4D or 2D tensor. When 4D, the layout should match data_format. epsilon (float): epsilon to avoid divide-by-zero. use_scale, use_bias (bool): whether to use the extra affine transformation or not. """ data_format = get_data_format(data_format, keras_mode=False) shape = x.get_shape().as_list() ndims = len(shape) assert ndims in [2, 4] mean, var = tf.nn.moments(x, list(range(1, len(shape))), keep_dims=True) if data_format == 'NCHW': chan = shape[1] new_shape = [1, chan, 1, 1] else: chan = shape[-1] new_shape = [1, 1, 1, chan] if ndims == 2: new_shape = [1, chan] if use_bias: beta = tf.get_variable('beta', [chan], initializer=tf.constant_initializer()) beta = tf.reshape(beta, new_shape) else: beta = tf.zeros([1] * ndims, name='beta') if use_scale: if gamma_init is None: gamma_init = tf.constant_initializer(1.0) gamma = tf.get_variable('gamma', [chan], initializer=gamma_init) gamma = tf.reshape(gamma, new_shape) else: gamma = tf.ones([1] * ndims, name='gamma') ret = tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon, name='output') vh = ret.variables = VariableHolder() if use_scale: vh.gamma = gamma if use_bias: vh.beta = beta return ret
['def', 'LayerNorm', '(', 'x', ',', 'epsilon', '=', '1e-5', ',', 'use_bias', '=', 'True', ',', 'use_scale', '=', 'True', ',', 'gamma_init', '=', 'None', ',', 'data_format', '=', "'channels_last'", ')', ':', 'data_format', '=', 'get_data_format', '(', 'data_format', ',', 'keras_mode', '=', 'False', ')', 'shape', '=', 'x', '.', 'get_shape', '(', ')', '.', 'as_list', '(', ')', 'ndims', '=', 'len', '(', 'shape', ')', 'assert', 'ndims', 'in', '[', '2', ',', '4', ']', 'mean', ',', 'var', '=', 'tf', '.', 'nn', '.', 'moments', '(', 'x', ',', 'list', '(', 'range', '(', '1', ',', 'len', '(', 'shape', ')', ')', ')', ',', 'keep_dims', '=', 'True', ')', 'if', 'data_format', '==', "'NCHW'", ':', 'chan', '=', 'shape', '[', '1', ']', 'new_shape', '=', '[', '1', ',', 'chan', ',', '1', ',', '1', ']', 'else', ':', 'chan', '=', 'shape', '[', '-', '1', ']', 'new_shape', '=', '[', '1', ',', '1', ',', '1', ',', 'chan', ']', 'if', 'ndims', '==', '2', ':', 'new_shape', '=', '[', '1', ',', 'chan', ']', 'if', 'use_bias', ':', 'beta', '=', 'tf', '.', 'get_variable', '(', "'beta'", ',', '[', 'chan', ']', ',', 'initializer', '=', 'tf', '.', 'constant_initializer', '(', ')', ')', 'beta', '=', 'tf', '.', 'reshape', '(', 'beta', ',', 'new_shape', ')', 'else', ':', 'beta', '=', 'tf', '.', 'zeros', '(', '[', '1', ']', '*', 'ndims', ',', 'name', '=', "'beta'", ')', 'if', 'use_scale', ':', 'if', 'gamma_init', 'is', 'None', ':', 'gamma_init', '=', 'tf', '.', 'constant_initializer', '(', '1.0', ')', 'gamma', '=', 'tf', '.', 'get_variable', '(', "'gamma'", ',', '[', 'chan', ']', ',', 'initializer', '=', 'gamma_init', ')', 'gamma', '=', 'tf', '.', 'reshape', '(', 'gamma', ',', 'new_shape', ')', 'else', ':', 'gamma', '=', 'tf', '.', 'ones', '(', '[', '1', ']', '*', 'ndims', ',', 'name', '=', "'gamma'", ')', 'ret', '=', 'tf', '.', 'nn', '.', 'batch_normalization', '(', 'x', ',', 'mean', ',', 'var', ',', 'beta', ',', 'gamma', ',', 'epsilon', ',', 'name', '=', "'output'", ')', 'vh', '=', 'ret', '.', 'variables', '=', 'VariableHolder', '(', ')', 'if', 'use_scale', ':', 'vh', '.', 'gamma', '=', 'gamma', 'if', 'use_bias', ':', 'vh', '.', 'beta', '=', 'beta', 'return', 'ret']
Layer Normalization layer, as described in the paper: `Layer Normalization <https://arxiv.org/abs/1607.06450>`_. Args: x (tf.Tensor): a 4D or 2D tensor. When 4D, the layout should match data_format. epsilon (float): epsilon to avoid divide-by-zero. use_scale, use_bias (bool): whether to use the extra affine transformation or not.
['Layer', 'Normalization', 'layer', 'as', 'described', 'in', 'the', 'paper', ':', 'Layer', 'Normalization', '<https', ':', '//', 'arxiv', '.', 'org', '/', 'abs', '/', '1607', '.', '06450', '>', '_', '.']
train
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/models/layer_norm.py#L14-L63
4,106
SiLab-Bonn/pyBAR
pybar/utils/utils.py
get_iso_time
def get_iso_time(): '''returns time as ISO string, mapping to and from datetime in ugly way convert to string with str() ''' t1 = time.time() t2 = datetime.datetime.fromtimestamp(t1) t4 = t2.__str__() try: t4a, t4b = t4.split(".", 1) except ValueError: t4a = t4 t4b = '000000' t5 = datetime.datetime.strptime(t4a, "%Y-%m-%d %H:%M:%S") ms = int(t4b.ljust(6, '0')[:6]) return t5.replace(microsecond=ms)
python
def get_iso_time(): '''returns time as ISO string, mapping to and from datetime in ugly way convert to string with str() ''' t1 = time.time() t2 = datetime.datetime.fromtimestamp(t1) t4 = t2.__str__() try: t4a, t4b = t4.split(".", 1) except ValueError: t4a = t4 t4b = '000000' t5 = datetime.datetime.strptime(t4a, "%Y-%m-%d %H:%M:%S") ms = int(t4b.ljust(6, '0')[:6]) return t5.replace(microsecond=ms)
['def', 'get_iso_time', '(', ')', ':', 't1', '=', 'time', '.', 'time', '(', ')', 't2', '=', 'datetime', '.', 'datetime', '.', 'fromtimestamp', '(', 't1', ')', 't4', '=', 't2', '.', '__str__', '(', ')', 'try', ':', 't4a', ',', 't4b', '=', 't4', '.', 'split', '(', '"."', ',', '1', ')', 'except', 'ValueError', ':', 't4a', '=', 't4', 't4b', '=', "'000000'", 't5', '=', 'datetime', '.', 'datetime', '.', 'strptime', '(', 't4a', ',', '"%Y-%m-%d %H:%M:%S"', ')', 'ms', '=', 'int', '(', 't4b', '.', 'ljust', '(', '6', ',', "'0'", ')', '[', ':', '6', ']', ')', 'return', 't5', '.', 'replace', '(', 'microsecond', '=', 'ms', ')']
returns time as ISO string, mapping to and from datetime in ugly way convert to string with str()
['returns', 'time', 'as', 'ISO', 'string', 'mapping', 'to', 'and', 'from', 'datetime', 'in', 'ugly', 'way', 'convert', 'to', 'string', 'with', 'str', '()']
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/utils/utils.py#L267-L282
4,107
PmagPy/PmagPy
programs/s_hext.py
main
def main(): """ NAME s_hext.py DESCRIPTION calculates Hext statistics for tensor data SYNTAX s_hext.py [-h][-i][-f file] [<filename] OPTIONS -h prints help message and quits -f file specifies filename on command line -l NMEAS do line by line instead of whole file, use number of measurements NMEAS for degrees of freedom < filename, reads from standard input (Unix like operating systems only) INPUT x11,x22,x33,x12,x23,x13,sigma [sigma only if line by line] OUTPUT F F12 F23 sigma and three sets of: tau dec inc Eij dec inc Eik dec inc DEFAULT average whole file """ ave=1 if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-l' in sys.argv: ind=sys.argv.index('-l') npts=int(sys.argv[ind+1]) ave=0 if '-f' in sys.argv: ind=sys.argv.index('-f') file=sys.argv[ind+1] f=open(file,'r') data=f.readlines() f.close() else: data=sys.stdin.readlines() Ss=[] for line in data: s=[] rec=line.split() for i in range(6): s.append(float(rec[i])) if ave==0: sig=float(rec[6]) hpars=pmag.dohext(npts-6,sig,s) print('%s %4.2f %s %4.2f %s %4.2f'%('F = ',hpars['F'],'F12 = ',hpars['F12'],'F23 = ',hpars['F23'])) print('%s %i %s %14.12f'%('Nmeas = ',npts,' sigma = ',sig)) print('%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(hpars["t1"],hpars["v1_dec"],hpars["v1_inc"],hpars["e12"],hpars["v2_dec"],hpars["v2_inc"],hpars["e13"],hpars["v3_dec"],hpars["v3_inc"] )) print('%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(hpars["t2"],hpars["v2_dec"],hpars["v2_inc"],hpars["e23"],hpars["v3_dec"],hpars["v3_inc"],hpars["e12"],hpars["v1_dec"],hpars["v1_inc"] )) print('%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(hpars["t3"],hpars["v3_dec"],hpars["v3_inc"],hpars["e13"],hpars["v1_dec"],hpars["v1_inc"],hpars["e23"],hpars["v2_dec"],hpars["v2_inc"] )) else: Ss.append(s) if ave==1: npts=len(Ss) nf,sigma,avs=pmag.sbar(Ss) hpars=pmag.dohext(nf,sigma,avs) print('%s %4.2f %s %4.2f %s %4.2f'%('F = ',hpars['F'],'F12 = ',hpars['F12'],'F23 = ',hpars['F23'])) print('%s %i %s %14.12f'%('N = ',npts,' sigma = ',sigma)) print('%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(hpars["t1"],hpars["v1_dec"],hpars["v1_inc"],hpars["e12"],hpars["v2_dec"],hpars["v2_inc"],hpars["e13"],hpars["v3_dec"],hpars["v3_inc"] )) print('%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(hpars["t2"],hpars["v2_dec"],hpars["v2_inc"],hpars["e23"],hpars["v3_dec"],hpars["v3_inc"],hpars["e12"],hpars["v1_dec"],hpars["v1_inc"] )) print('%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(hpars["t3"],hpars["v3_dec"],hpars["v3_inc"],hpars["e13"],hpars["v1_dec"],hpars["v1_inc"],hpars["e23"],hpars["v2_dec"],hpars["v2_inc"] ))
python
def main(): """ NAME s_hext.py DESCRIPTION calculates Hext statistics for tensor data SYNTAX s_hext.py [-h][-i][-f file] [<filename] OPTIONS -h prints help message and quits -f file specifies filename on command line -l NMEAS do line by line instead of whole file, use number of measurements NMEAS for degrees of freedom < filename, reads from standard input (Unix like operating systems only) INPUT x11,x22,x33,x12,x23,x13,sigma [sigma only if line by line] OUTPUT F F12 F23 sigma and three sets of: tau dec inc Eij dec inc Eik dec inc DEFAULT average whole file """ ave=1 if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-l' in sys.argv: ind=sys.argv.index('-l') npts=int(sys.argv[ind+1]) ave=0 if '-f' in sys.argv: ind=sys.argv.index('-f') file=sys.argv[ind+1] f=open(file,'r') data=f.readlines() f.close() else: data=sys.stdin.readlines() Ss=[] for line in data: s=[] rec=line.split() for i in range(6): s.append(float(rec[i])) if ave==0: sig=float(rec[6]) hpars=pmag.dohext(npts-6,sig,s) print('%s %4.2f %s %4.2f %s %4.2f'%('F = ',hpars['F'],'F12 = ',hpars['F12'],'F23 = ',hpars['F23'])) print('%s %i %s %14.12f'%('Nmeas = ',npts,' sigma = ',sig)) print('%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(hpars["t1"],hpars["v1_dec"],hpars["v1_inc"],hpars["e12"],hpars["v2_dec"],hpars["v2_inc"],hpars["e13"],hpars["v3_dec"],hpars["v3_inc"] )) print('%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(hpars["t2"],hpars["v2_dec"],hpars["v2_inc"],hpars["e23"],hpars["v3_dec"],hpars["v3_inc"],hpars["e12"],hpars["v1_dec"],hpars["v1_inc"] )) print('%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(hpars["t3"],hpars["v3_dec"],hpars["v3_inc"],hpars["e13"],hpars["v1_dec"],hpars["v1_inc"],hpars["e23"],hpars["v2_dec"],hpars["v2_inc"] )) else: Ss.append(s) if ave==1: npts=len(Ss) nf,sigma,avs=pmag.sbar(Ss) hpars=pmag.dohext(nf,sigma,avs) print('%s %4.2f %s %4.2f %s %4.2f'%('F = ',hpars['F'],'F12 = ',hpars['F12'],'F23 = ',hpars['F23'])) print('%s %i %s %14.12f'%('N = ',npts,' sigma = ',sigma)) print('%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(hpars["t1"],hpars["v1_dec"],hpars["v1_inc"],hpars["e12"],hpars["v2_dec"],hpars["v2_inc"],hpars["e13"],hpars["v3_dec"],hpars["v3_inc"] )) print('%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(hpars["t2"],hpars["v2_dec"],hpars["v2_inc"],hpars["e23"],hpars["v3_dec"],hpars["v3_inc"],hpars["e12"],hpars["v1_dec"],hpars["v1_inc"] )) print('%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(hpars["t3"],hpars["v3_dec"],hpars["v3_inc"],hpars["e13"],hpars["v1_dec"],hpars["v1_inc"],hpars["e23"],hpars["v2_dec"],hpars["v2_inc"] ))
['def', 'main', '(', ')', ':', 'ave', '=', '1', 'if', "'-h'", 'in', 'sys', '.', 'argv', ':', 'print', '(', 'main', '.', '__doc__', ')', 'sys', '.', 'exit', '(', ')', 'if', "'-l'", 'in', 'sys', '.', 'argv', ':', 'ind', '=', 'sys', '.', 'argv', '.', 'index', '(', "'-l'", ')', 'npts', '=', 'int', '(', 'sys', '.', 'argv', '[', 'ind', '+', '1', ']', ')', 'ave', '=', '0', 'if', "'-f'", 'in', 'sys', '.', 'argv', ':', 'ind', '=', 'sys', '.', 'argv', '.', 'index', '(', "'-f'", ')', 'file', '=', 'sys', '.', 'argv', '[', 'ind', '+', '1', ']', 'f', '=', 'open', '(', 'file', ',', "'r'", ')', 'data', '=', 'f', '.', 'readlines', '(', ')', 'f', '.', 'close', '(', ')', 'else', ':', 'data', '=', 'sys', '.', 'stdin', '.', 'readlines', '(', ')', 'Ss', '=', '[', ']', 'for', 'line', 'in', 'data', ':', 's', '=', '[', ']', 'rec', '=', 'line', '.', 'split', '(', ')', 'for', 'i', 'in', 'range', '(', '6', ')', ':', 's', '.', 'append', '(', 'float', '(', 'rec', '[', 'i', ']', ')', ')', 'if', 'ave', '==', '0', ':', 'sig', '=', 'float', '(', 'rec', '[', '6', ']', ')', 'hpars', '=', 'pmag', '.', 'dohext', '(', 'npts', '-', '6', ',', 'sig', ',', 's', ')', 'print', '(', "'%s %4.2f %s %4.2f %s %4.2f'", '%', '(', "'F = '", ',', 'hpars', '[', "'F'", ']', ',', "'F12 = '", ',', 'hpars', '[', "'F12'", ']', ',', "'F23 = '", ',', 'hpars', '[', "'F23'", ']', ')', ')', 'print', '(', "'%s %i %s %14.12f'", '%', '(', "'Nmeas = '", ',', 'npts', ',', "' sigma = '", ',', 'sig', ')', ')', 'print', '(', "'%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'", '%', '(', 'hpars', '[', '"t1"', ']', ',', 'hpars', '[', '"v1_dec"', ']', ',', 'hpars', '[', '"v1_inc"', ']', ',', 'hpars', '[', '"e12"', ']', ',', 'hpars', '[', '"v2_dec"', ']', ',', 'hpars', '[', '"v2_inc"', ']', ',', 'hpars', '[', '"e13"', ']', ',', 'hpars', '[', '"v3_dec"', ']', ',', 'hpars', '[', '"v3_inc"', ']', ')', ')', 'print', '(', "'%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'", '%', '(', 'hpars', '[', '"t2"', ']', ',', 'hpars', '[', '"v2_dec"', ']', ',', 'hpars', '[', '"v2_inc"', ']', ',', 'hpars', '[', '"e23"', ']', ',', 'hpars', '[', '"v3_dec"', ']', ',', 'hpars', '[', '"v3_inc"', ']', ',', 'hpars', '[', '"e12"', ']', ',', 'hpars', '[', '"v1_dec"', ']', ',', 'hpars', '[', '"v1_inc"', ']', ')', ')', 'print', '(', "'%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'", '%', '(', 'hpars', '[', '"t3"', ']', ',', 'hpars', '[', '"v3_dec"', ']', ',', 'hpars', '[', '"v3_inc"', ']', ',', 'hpars', '[', '"e13"', ']', ',', 'hpars', '[', '"v1_dec"', ']', ',', 'hpars', '[', '"v1_inc"', ']', ',', 'hpars', '[', '"e23"', ']', ',', 'hpars', '[', '"v2_dec"', ']', ',', 'hpars', '[', '"v2_inc"', ']', ')', ')', 'else', ':', 'Ss', '.', 'append', '(', 's', ')', 'if', 'ave', '==', '1', ':', 'npts', '=', 'len', '(', 'Ss', ')', 'nf', ',', 'sigma', ',', 'avs', '=', 'pmag', '.', 'sbar', '(', 'Ss', ')', 'hpars', '=', 'pmag', '.', 'dohext', '(', 'nf', ',', 'sigma', ',', 'avs', ')', 'print', '(', "'%s %4.2f %s %4.2f %s %4.2f'", '%', '(', "'F = '", ',', 'hpars', '[', "'F'", ']', ',', "'F12 = '", ',', 'hpars', '[', "'F12'", ']', ',', "'F23 = '", ',', 'hpars', '[', "'F23'", ']', ')', ')', 'print', '(', "'%s %i %s %14.12f'", '%', '(', "'N = '", ',', 'npts', ',', "' sigma = '", ',', 'sigma', ')', ')', 'print', '(', "'%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'", '%', '(', 'hpars', '[', '"t1"', ']', ',', 'hpars', '[', '"v1_dec"', ']', ',', 'hpars', '[', '"v1_inc"', ']', ',', 'hpars', '[', '"e12"', ']', ',', 'hpars', '[', '"v2_dec"', ']', ',', 'hpars', '[', '"v2_inc"', ']', ',', 'hpars', '[', '"e13"', ']', ',', 'hpars', '[', '"v3_dec"', ']', ',', 'hpars', '[', '"v3_inc"', ']', ')', ')', 'print', '(', "'%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'", '%', '(', 'hpars', '[', '"t2"', ']', ',', 'hpars', '[', '"v2_dec"', ']', ',', 'hpars', '[', '"v2_inc"', ']', ',', 'hpars', '[', '"e23"', ']', ',', 'hpars', '[', '"v3_dec"', ']', ',', 'hpars', '[', '"v3_inc"', ']', ',', 'hpars', '[', '"e12"', ']', ',', 'hpars', '[', '"v1_dec"', ']', ',', 'hpars', '[', '"v1_inc"', ']', ')', ')', 'print', '(', "'%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'", '%', '(', 'hpars', '[', '"t3"', ']', ',', 'hpars', '[', '"v3_dec"', ']', ',', 'hpars', '[', '"v3_inc"', ']', ',', 'hpars', '[', '"e13"', ']', ',', 'hpars', '[', '"v1_dec"', ']', ',', 'hpars', '[', '"v1_inc"', ']', ',', 'hpars', '[', '"e23"', ']', ',', 'hpars', '[', '"v2_dec"', ']', ',', 'hpars', '[', '"v2_inc"', ']', ')', ')']
NAME s_hext.py DESCRIPTION calculates Hext statistics for tensor data SYNTAX s_hext.py [-h][-i][-f file] [<filename] OPTIONS -h prints help message and quits -f file specifies filename on command line -l NMEAS do line by line instead of whole file, use number of measurements NMEAS for degrees of freedom < filename, reads from standard input (Unix like operating systems only) INPUT x11,x22,x33,x12,x23,x13,sigma [sigma only if line by line] OUTPUT F F12 F23 sigma and three sets of: tau dec inc Eij dec inc Eik dec inc DEFAULT average whole file
['NAME', 's_hext', '.', 'py']
train
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/s_hext.py#L8-L76
4,108
bcbio/bcbio-nextgen
bcbio/qc/qsignature.py
summary
def summary(*samples): """Run SignatureCompareRelatedSimple module from qsignature tool. Creates a matrix of pairwise comparison among samples. The function will not run if the output exists :param samples: list with only one element containing all samples information :returns: (dict) with the path of the output to be joined to summary """ warnings, similar = [], [] qsig = config_utils.get_program("qsignature", samples[0][0]["config"]) if not qsig: return [[]] res_qsig = config_utils.get_resources("qsignature", samples[0][0]["config"]) jvm_opts = " ".join(res_qsig.get("jvm_opts", ["-Xms750m", "-Xmx8g"])) work_dir = samples[0][0]["dirs"]["work"] count = 0 for data in samples: data = data[0] vcf = tz.get_in(["summary", "qc", "qsignature", "base"], data) if vcf: count += 1 vcf_name = dd.get_sample_name(data) + ".qsig.vcf" out_dir = utils.safe_makedir(os.path.join(work_dir, "qsignature")) if not os.path.lexists(os.path.join(out_dir, vcf_name)): os.symlink(vcf, os.path.join(out_dir, vcf_name)) if count > 0: qc_out_dir = utils.safe_makedir(os.path.join(work_dir, "qc", "qsignature")) out_file = os.path.join(qc_out_dir, "qsignature.xml") out_ma_file = os.path.join(qc_out_dir, "qsignature.ma") out_warn_file = os.path.join(qc_out_dir, "qsignature.warnings") log = os.path.join(work_dir, "qsignature", "qsig-summary.log") if not os.path.exists(out_file): with file_transaction(samples[0][0], out_file) as file_txt_out: base_cmd = ("{qsig} {jvm_opts} " "org.qcmg.sig.SignatureCompareRelatedSimple " "-log {log} -dir {out_dir} " "-o {file_txt_out} ") do.run(base_cmd.format(**locals()), "qsignature score calculation") error, warnings, similar = _parse_qsignature_output(out_file, out_ma_file, out_warn_file, samples[0][0]) return [{'total samples': count, 'similar samples pairs': len(similar), 'warnings samples pairs': len(warnings), 'error samples': list(error), 'out_dir': qc_out_dir}] else: return []
python
def summary(*samples): """Run SignatureCompareRelatedSimple module from qsignature tool. Creates a matrix of pairwise comparison among samples. The function will not run if the output exists :param samples: list with only one element containing all samples information :returns: (dict) with the path of the output to be joined to summary """ warnings, similar = [], [] qsig = config_utils.get_program("qsignature", samples[0][0]["config"]) if not qsig: return [[]] res_qsig = config_utils.get_resources("qsignature", samples[0][0]["config"]) jvm_opts = " ".join(res_qsig.get("jvm_opts", ["-Xms750m", "-Xmx8g"])) work_dir = samples[0][0]["dirs"]["work"] count = 0 for data in samples: data = data[0] vcf = tz.get_in(["summary", "qc", "qsignature", "base"], data) if vcf: count += 1 vcf_name = dd.get_sample_name(data) + ".qsig.vcf" out_dir = utils.safe_makedir(os.path.join(work_dir, "qsignature")) if not os.path.lexists(os.path.join(out_dir, vcf_name)): os.symlink(vcf, os.path.join(out_dir, vcf_name)) if count > 0: qc_out_dir = utils.safe_makedir(os.path.join(work_dir, "qc", "qsignature")) out_file = os.path.join(qc_out_dir, "qsignature.xml") out_ma_file = os.path.join(qc_out_dir, "qsignature.ma") out_warn_file = os.path.join(qc_out_dir, "qsignature.warnings") log = os.path.join(work_dir, "qsignature", "qsig-summary.log") if not os.path.exists(out_file): with file_transaction(samples[0][0], out_file) as file_txt_out: base_cmd = ("{qsig} {jvm_opts} " "org.qcmg.sig.SignatureCompareRelatedSimple " "-log {log} -dir {out_dir} " "-o {file_txt_out} ") do.run(base_cmd.format(**locals()), "qsignature score calculation") error, warnings, similar = _parse_qsignature_output(out_file, out_ma_file, out_warn_file, samples[0][0]) return [{'total samples': count, 'similar samples pairs': len(similar), 'warnings samples pairs': len(warnings), 'error samples': list(error), 'out_dir': qc_out_dir}] else: return []
['def', 'summary', '(', '*', 'samples', ')', ':', 'warnings', ',', 'similar', '=', '[', ']', ',', '[', ']', 'qsig', '=', 'config_utils', '.', 'get_program', '(', '"qsignature"', ',', 'samples', '[', '0', ']', '[', '0', ']', '[', '"config"', ']', ')', 'if', 'not', 'qsig', ':', 'return', '[', '[', ']', ']', 'res_qsig', '=', 'config_utils', '.', 'get_resources', '(', '"qsignature"', ',', 'samples', '[', '0', ']', '[', '0', ']', '[', '"config"', ']', ')', 'jvm_opts', '=', '" "', '.', 'join', '(', 'res_qsig', '.', 'get', '(', '"jvm_opts"', ',', '[', '"-Xms750m"', ',', '"-Xmx8g"', ']', ')', ')', 'work_dir', '=', 'samples', '[', '0', ']', '[', '0', ']', '[', '"dirs"', ']', '[', '"work"', ']', 'count', '=', '0', 'for', 'data', 'in', 'samples', ':', 'data', '=', 'data', '[', '0', ']', 'vcf', '=', 'tz', '.', 'get_in', '(', '[', '"summary"', ',', '"qc"', ',', '"qsignature"', ',', '"base"', ']', ',', 'data', ')', 'if', 'vcf', ':', 'count', '+=', '1', 'vcf_name', '=', 'dd', '.', 'get_sample_name', '(', 'data', ')', '+', '".qsig.vcf"', 'out_dir', '=', 'utils', '.', 'safe_makedir', '(', 'os', '.', 'path', '.', 'join', '(', 'work_dir', ',', '"qsignature"', ')', ')', 'if', 'not', 'os', '.', 'path', '.', 'lexists', '(', 'os', '.', 'path', '.', 'join', '(', 'out_dir', ',', 'vcf_name', ')', ')', ':', 'os', '.', 'symlink', '(', 'vcf', ',', 'os', '.', 'path', '.', 'join', '(', 'out_dir', ',', 'vcf_name', ')', ')', 'if', 'count', '>', '0', ':', 'qc_out_dir', '=', 'utils', '.', 'safe_makedir', '(', 'os', '.', 'path', '.', 'join', '(', 'work_dir', ',', '"qc"', ',', '"qsignature"', ')', ')', 'out_file', '=', 'os', '.', 'path', '.', 'join', '(', 'qc_out_dir', ',', '"qsignature.xml"', ')', 'out_ma_file', '=', 'os', '.', 'path', '.', 'join', '(', 'qc_out_dir', ',', '"qsignature.ma"', ')', 'out_warn_file', '=', 'os', '.', 'path', '.', 'join', '(', 'qc_out_dir', ',', '"qsignature.warnings"', ')', 'log', '=', 'os', '.', 'path', '.', 'join', '(', 'work_dir', ',', '"qsignature"', ',', '"qsig-summary.log"', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'out_file', ')', ':', 'with', 'file_transaction', '(', 'samples', '[', '0', ']', '[', '0', ']', ',', 'out_file', ')', 'as', 'file_txt_out', ':', 'base_cmd', '=', '(', '"{qsig} {jvm_opts} "', '"org.qcmg.sig.SignatureCompareRelatedSimple "', '"-log {log} -dir {out_dir} "', '"-o {file_txt_out} "', ')', 'do', '.', 'run', '(', 'base_cmd', '.', 'format', '(', '*', '*', 'locals', '(', ')', ')', ',', '"qsignature score calculation"', ')', 'error', ',', 'warnings', ',', 'similar', '=', '_parse_qsignature_output', '(', 'out_file', ',', 'out_ma_file', ',', 'out_warn_file', ',', 'samples', '[', '0', ']', '[', '0', ']', ')', 'return', '[', '{', "'total samples'", ':', 'count', ',', "'similar samples pairs'", ':', 'len', '(', 'similar', ')', ',', "'warnings samples pairs'", ':', 'len', '(', 'warnings', ')', ',', "'error samples'", ':', 'list', '(', 'error', ')', ',', "'out_dir'", ':', 'qc_out_dir', '}', ']', 'else', ':', 'return', '[', ']']
Run SignatureCompareRelatedSimple module from qsignature tool. Creates a matrix of pairwise comparison among samples. The function will not run if the output exists :param samples: list with only one element containing all samples information :returns: (dict) with the path of the output to be joined to summary
['Run', 'SignatureCompareRelatedSimple', 'module', 'from', 'qsignature', 'tool', '.']
train
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/qsignature.py#L71-L118
4,109
pypa/pipenv
pipenv/vendor/distlib/_backport/tarfile.py
_Stream._read
def _read(self, size): """Return size bytes from the stream. """ if self.comptype == "tar": return self.__read(size) c = len(self.dbuf) while c < size: buf = self.__read(self.bufsize) if not buf: break try: buf = self.cmp.decompress(buf) except IOError: raise ReadError("invalid compressed data") self.dbuf += buf c += len(buf) buf = self.dbuf[:size] self.dbuf = self.dbuf[size:] return buf
python
def _read(self, size): """Return size bytes from the stream. """ if self.comptype == "tar": return self.__read(size) c = len(self.dbuf) while c < size: buf = self.__read(self.bufsize) if not buf: break try: buf = self.cmp.decompress(buf) except IOError: raise ReadError("invalid compressed data") self.dbuf += buf c += len(buf) buf = self.dbuf[:size] self.dbuf = self.dbuf[size:] return buf
['def', '_read', '(', 'self', ',', 'size', ')', ':', 'if', 'self', '.', 'comptype', '==', '"tar"', ':', 'return', 'self', '.', '__read', '(', 'size', ')', 'c', '=', 'len', '(', 'self', '.', 'dbuf', ')', 'while', 'c', '<', 'size', ':', 'buf', '=', 'self', '.', '__read', '(', 'self', '.', 'bufsize', ')', 'if', 'not', 'buf', ':', 'break', 'try', ':', 'buf', '=', 'self', '.', 'cmp', '.', 'decompress', '(', 'buf', ')', 'except', 'IOError', ':', 'raise', 'ReadError', '(', '"invalid compressed data"', ')', 'self', '.', 'dbuf', '+=', 'buf', 'c', '+=', 'len', '(', 'buf', ')', 'buf', '=', 'self', '.', 'dbuf', '[', ':', 'size', ']', 'self', '.', 'dbuf', '=', 'self', '.', 'dbuf', '[', 'size', ':', ']', 'return', 'buf']
Return size bytes from the stream.
['Return', 'size', 'bytes', 'from', 'the', 'stream', '.']
train
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L583-L602
4,110
mitsei/dlkit
dlkit/json_/repository/sessions.py
CompositionLookupSession.get_composition
def get_composition(self, composition_id): """Gets the ``Composition`` specified by its ``Id``. arg: composition_id (osid.id.Id): ``Id`` of the ``Composiiton`` return: (osid.repository.Composition) - the composition raise: NotFound - ``composition_id`` not found raise: NullArgument - ``composition_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method is must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resource # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('repository', collection='Composition', runtime=self._runtime) result = collection.find_one( dict({'_id': ObjectId(self._get_id(composition_id, 'repository').get_identifier())}, **self._view_filter())) return objects.Composition(osid_object_map=result, runtime=self._runtime, proxy=self._proxy)
python
def get_composition(self, composition_id): """Gets the ``Composition`` specified by its ``Id``. arg: composition_id (osid.id.Id): ``Id`` of the ``Composiiton`` return: (osid.repository.Composition) - the composition raise: NotFound - ``composition_id`` not found raise: NullArgument - ``composition_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method is must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resource # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('repository', collection='Composition', runtime=self._runtime) result = collection.find_one( dict({'_id': ObjectId(self._get_id(composition_id, 'repository').get_identifier())}, **self._view_filter())) return objects.Composition(osid_object_map=result, runtime=self._runtime, proxy=self._proxy)
['def', 'get_composition', '(', 'self', ',', 'composition_id', ')', ':', '# Implemented from template for', '# osid.resource.ResourceLookupSession.get_resource', '# NOTE: This implementation currently ignores plenary view', 'collection', '=', 'JSONClientValidated', '(', "'repository'", ',', 'collection', '=', "'Composition'", ',', 'runtime', '=', 'self', '.', '_runtime', ')', 'result', '=', 'collection', '.', 'find_one', '(', 'dict', '(', '{', "'_id'", ':', 'ObjectId', '(', 'self', '.', '_get_id', '(', 'composition_id', ',', "'repository'", ')', '.', 'get_identifier', '(', ')', ')', '}', ',', '*', '*', 'self', '.', '_view_filter', '(', ')', ')', ')', 'return', 'objects', '.', 'Composition', '(', 'osid_object_map', '=', 'result', ',', 'runtime', '=', 'self', '.', '_runtime', ',', 'proxy', '=', 'self', '.', '_proxy', ')']
Gets the ``Composition`` specified by its ``Id``. arg: composition_id (osid.id.Id): ``Id`` of the ``Composiiton`` return: (osid.repository.Composition) - the composition raise: NotFound - ``composition_id`` not found raise: NullArgument - ``composition_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method is must be implemented.*
['Gets', 'the', 'Composition', 'specified', 'by', 'its', 'Id', '.']
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/sessions.py#L3225-L3247
4,111
pyQode/pyqode.core
pyqode/core/widgets/tabs.py
TabWidget._on_dirty_changed
def _on_dirty_changed(self, dirty): """ Adds a star in front of a dirtt tab and emits dirty_changed. """ try: title = self._current._tab_name index = self.indexOf(self._current) if dirty: self.setTabText(index, "* " + title) else: self.setTabText(index, title) except AttributeError: pass self.dirty_changed.emit(dirty)
python
def _on_dirty_changed(self, dirty): """ Adds a star in front of a dirtt tab and emits dirty_changed. """ try: title = self._current._tab_name index = self.indexOf(self._current) if dirty: self.setTabText(index, "* " + title) else: self.setTabText(index, title) except AttributeError: pass self.dirty_changed.emit(dirty)
['def', '_on_dirty_changed', '(', 'self', ',', 'dirty', ')', ':', 'try', ':', 'title', '=', 'self', '.', '_current', '.', '_tab_name', 'index', '=', 'self', '.', 'indexOf', '(', 'self', '.', '_current', ')', 'if', 'dirty', ':', 'self', '.', 'setTabText', '(', 'index', ',', '"* "', '+', 'title', ')', 'else', ':', 'self', '.', 'setTabText', '(', 'index', ',', 'title', ')', 'except', 'AttributeError', ':', 'pass', 'self', '.', 'dirty_changed', '.', 'emit', '(', 'dirty', ')']
Adds a star in front of a dirtt tab and emits dirty_changed.
['Adds', 'a', 'star', 'in', 'front', 'of', 'a', 'dirtt', 'tab', 'and', 'emits', 'dirty_changed', '.']
train
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/widgets/tabs.py#L428-L441
4,112
AoiKuiyuyou/AoikI18n
src/aoiki18n/aoiki18n_.py
I18n.yaml_force_unicode
def yaml_force_unicode(): """ Force pyyaml to return unicode values. """ #/ ## modified from |http://stackoverflow.com/a/2967461| if sys.version_info[0] == 2: def construct_func(self, node): return self.construct_scalar(node) yaml.Loader.add_constructor(U('tag:yaml.org,2002:str'), construct_func) yaml.SafeLoader.add_constructor(U('tag:yaml.org,2002:str'), construct_func)
python
def yaml_force_unicode(): """ Force pyyaml to return unicode values. """ #/ ## modified from |http://stackoverflow.com/a/2967461| if sys.version_info[0] == 2: def construct_func(self, node): return self.construct_scalar(node) yaml.Loader.add_constructor(U('tag:yaml.org,2002:str'), construct_func) yaml.SafeLoader.add_constructor(U('tag:yaml.org,2002:str'), construct_func)
['def', 'yaml_force_unicode', '(', ')', ':', '#/', '## modified from |http://stackoverflow.com/a/2967461|', 'if', 'sys', '.', 'version_info', '[', '0', ']', '==', '2', ':', 'def', 'construct_func', '(', 'self', ',', 'node', ')', ':', 'return', 'self', '.', 'construct_scalar', '(', 'node', ')', 'yaml', '.', 'Loader', '.', 'add_constructor', '(', 'U', '(', "'tag:yaml.org,2002:str'", ')', ',', 'construct_func', ')', 'yaml', '.', 'SafeLoader', '.', 'add_constructor', '(', 'U', '(', "'tag:yaml.org,2002:str'", ')', ',', 'construct_func', ')']
Force pyyaml to return unicode values.
['Force', 'pyyaml', 'to', 'return', 'unicode', 'values', '.']
train
https://github.com/AoiKuiyuyou/AoikI18n/blob/8d60ea6a2be24e533a9cf92b433a8cfdb67f813e/src/aoiki18n/aoiki18n_.py#L78-L88
4,113
orb-framework/orb
orb/core/query.py
QueryCompound.columns
def columns(self, model=None): """ Returns any columns used within this query. :return [<orb.Column>, ..] """ for query in self.__queries: for column in query.columns(model=model): yield column
python
def columns(self, model=None): """ Returns any columns used within this query. :return [<orb.Column>, ..] """ for query in self.__queries: for column in query.columns(model=model): yield column
['def', 'columns', '(', 'self', ',', 'model', '=', 'None', ')', ':', 'for', 'query', 'in', 'self', '.', '__queries', ':', 'for', 'column', 'in', 'query', '.', 'columns', '(', 'model', '=', 'model', ')', ':', 'yield', 'column']
Returns any columns used within this query. :return [<orb.Column>, ..]
['Returns', 'any', 'columns', 'used', 'within', 'this', 'query', '.']
train
https://github.com/orb-framework/orb/blob/575be2689cb269e65a0a2678232ff940acc19e5a/orb/core/query.py#L1384-L1392
4,114
numenta/htmresearch
htmresearch/algorithms/lateral_pooler.py
LateralPooler.feedforward
def feedforward(self): """ Soon to be depriciated. Needed to make the SP implementation compatible with some older code. """ m = self._numInputs n = self._numColumns W = np.zeros((n, m)) for i in range(self._numColumns): self.getPermanence(i, W[i, :]) return W
python
def feedforward(self): """ Soon to be depriciated. Needed to make the SP implementation compatible with some older code. """ m = self._numInputs n = self._numColumns W = np.zeros((n, m)) for i in range(self._numColumns): self.getPermanence(i, W[i, :]) return W
['def', 'feedforward', '(', 'self', ')', ':', 'm', '=', 'self', '.', '_numInputs', 'n', '=', 'self', '.', '_numColumns', 'W', '=', 'np', '.', 'zeros', '(', '(', 'n', ',', 'm', ')', ')', 'for', 'i', 'in', 'range', '(', 'self', '.', '_numColumns', ')', ':', 'self', '.', 'getPermanence', '(', 'i', ',', 'W', '[', 'i', ',', ':', ']', ')', 'return', 'W']
Soon to be depriciated. Needed to make the SP implementation compatible with some older code.
['Soon', 'to', 'be', 'depriciated', '.', 'Needed', 'to', 'make', 'the', 'SP', 'implementation', 'compatible', 'with', 'some', 'older', 'code', '.']
train
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/lateral_pooler.py#L201-L213
4,115
ConsenSys/mythril-classic
mythril/laser/ethereum/state/machine_state.py
MachineState.mem_extend
def mem_extend(self, start: int, size: int) -> None: """Extends the memory of this machine state. :param start: Start of memory extension :param size: Size of memory extension """ m_extend = self.calculate_extension_size(start, size) if m_extend: extend_gas = self.calculate_memory_gas(start, size) self.min_gas_used += extend_gas self.max_gas_used += extend_gas self.check_gas() self.memory.extend(m_extend)
python
def mem_extend(self, start: int, size: int) -> None: """Extends the memory of this machine state. :param start: Start of memory extension :param size: Size of memory extension """ m_extend = self.calculate_extension_size(start, size) if m_extend: extend_gas = self.calculate_memory_gas(start, size) self.min_gas_used += extend_gas self.max_gas_used += extend_gas self.check_gas() self.memory.extend(m_extend)
['def', 'mem_extend', '(', 'self', ',', 'start', ':', 'int', ',', 'size', ':', 'int', ')', '->', 'None', ':', 'm_extend', '=', 'self', '.', 'calculate_extension_size', '(', 'start', ',', 'size', ')', 'if', 'm_extend', ':', 'extend_gas', '=', 'self', '.', 'calculate_memory_gas', '(', 'start', ',', 'size', ')', 'self', '.', 'min_gas_used', '+=', 'extend_gas', 'self', '.', 'max_gas_used', '+=', 'extend_gas', 'self', '.', 'check_gas', '(', ')', 'self', '.', 'memory', '.', 'extend', '(', 'm_extend', ')']
Extends the memory of this machine state. :param start: Start of memory extension :param size: Size of memory extension
['Extends', 'the', 'memory', 'of', 'this', 'machine', 'state', '.']
train
https://github.com/ConsenSys/mythril-classic/blob/27af71c34b2ce94f4fae5613ec457f93df1a8f56/mythril/laser/ethereum/state/machine_state.py#L151-L163
4,116
toumorokoshi/sprinter
sprinter/formula/base.py
FormulaBase.validate
def validate(self): """ validates the feature configuration, and returns a list of errors (empty list if no error) validate should: * required variables * warn on unused variables errors should either be reported via self._log_error(), or raise an exception """ if self.target: for k in self.target.keys(): if k in self.deprecated_options: self.logger.warn( self.deprecated_options[k].format(option=k, feature=self.feature_name)) elif (k not in self.valid_options and k not in self.required_options and '*' not in self.valid_options): self.logger.warn("Unused option %s in %s!" % (k, self.feature_name)) for k in self.required_options: if not self.target.has(k): self._log_error( "Required option %s not present in feature %s!" % (k, self.feature_name))
python
def validate(self): """ validates the feature configuration, and returns a list of errors (empty list if no error) validate should: * required variables * warn on unused variables errors should either be reported via self._log_error(), or raise an exception """ if self.target: for k in self.target.keys(): if k in self.deprecated_options: self.logger.warn( self.deprecated_options[k].format(option=k, feature=self.feature_name)) elif (k not in self.valid_options and k not in self.required_options and '*' not in self.valid_options): self.logger.warn("Unused option %s in %s!" % (k, self.feature_name)) for k in self.required_options: if not self.target.has(k): self._log_error( "Required option %s not present in feature %s!" % (k, self.feature_name))
['def', 'validate', '(', 'self', ')', ':', 'if', 'self', '.', 'target', ':', 'for', 'k', 'in', 'self', '.', 'target', '.', 'keys', '(', ')', ':', 'if', 'k', 'in', 'self', '.', 'deprecated_options', ':', 'self', '.', 'logger', '.', 'warn', '(', 'self', '.', 'deprecated_options', '[', 'k', ']', '.', 'format', '(', 'option', '=', 'k', ',', 'feature', '=', 'self', '.', 'feature_name', ')', ')', 'elif', '(', 'k', 'not', 'in', 'self', '.', 'valid_options', 'and', 'k', 'not', 'in', 'self', '.', 'required_options', 'and', "'*'", 'not', 'in', 'self', '.', 'valid_options', ')', ':', 'self', '.', 'logger', '.', 'warn', '(', '"Unused option %s in %s!"', '%', '(', 'k', ',', 'self', '.', 'feature_name', ')', ')', 'for', 'k', 'in', 'self', '.', 'required_options', ':', 'if', 'not', 'self', '.', 'target', '.', 'has', '(', 'k', ')', ':', 'self', '.', '_log_error', '(', '"Required option %s not present in feature %s!"', '%', '(', 'k', ',', 'self', '.', 'feature_name', ')', ')']
validates the feature configuration, and returns a list of errors (empty list if no error) validate should: * required variables * warn on unused variables errors should either be reported via self._log_error(), or raise an exception
['validates', 'the', 'feature', 'configuration', 'and', 'returns', 'a', 'list', 'of', 'errors', '(', 'empty', 'list', 'if', 'no', 'error', ')']
train
https://github.com/toumorokoshi/sprinter/blob/846697a7a087e69c61d075232e754d6975a64152/sprinter/formula/base.py#L141-L163
4,117
dmlc/gluon-nlp
scripts/sentiment_analysis/text_cnn.py
model
def model(dropout, vocab, model_mode, output_size): """Construct the model.""" textCNN = SentimentNet(dropout=dropout, vocab_size=len(vocab), model_mode=model_mode,\ output_size=output_size) textCNN.hybridize() return textCNN
python
def model(dropout, vocab, model_mode, output_size): """Construct the model.""" textCNN = SentimentNet(dropout=dropout, vocab_size=len(vocab), model_mode=model_mode,\ output_size=output_size) textCNN.hybridize() return textCNN
['def', 'model', '(', 'dropout', ',', 'vocab', ',', 'model_mode', ',', 'output_size', ')', ':', 'textCNN', '=', 'SentimentNet', '(', 'dropout', '=', 'dropout', ',', 'vocab_size', '=', 'len', '(', 'vocab', ')', ',', 'model_mode', '=', 'model_mode', ',', 'output_size', '=', 'output_size', ')', 'textCNN', '.', 'hybridize', '(', ')', 'return', 'textCNN']
Construct the model.
['Construct', 'the', 'model', '.']
train
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/sentiment_analysis/text_cnn.py#L40-L46
4,118
saltstack/salt
salt/netapi/rest_cherrypy/app.py
salt_api_acl_tool
def salt_api_acl_tool(username, request): ''' ..versionadded:: 2016.3.0 Verifies user requests against the API whitelist. (User/IP pair) in order to provide whitelisting for the API similar to the master, but over the API. ..code-block:: yaml rest_cherrypy: api_acl: users: '*': - 1.1.1.1 - 1.1.1.2 foo: - 8.8.4.4 bar: - '*' :param username: Username to check against the API. :type username: str :param request: Cherrypy request to check against the API. :type request: cherrypy.request ''' failure_str = ("[api_acl] Authentication failed for " "user %s from IP %s") success_str = ("[api_acl] Authentication sucessful for " "user %s from IP %s") pass_str = ("[api_acl] Authentication not checked for " "user %s from IP %s") acl = None # Salt Configuration salt_config = cherrypy.config.get('saltopts', None) if salt_config: # Cherrypy Config. cherrypy_conf = salt_config.get('rest_cherrypy', None) if cherrypy_conf: # ACL Config. acl = cherrypy_conf.get('api_acl', None) ip = request.remote.ip if acl: users = acl.get('users', {}) if users: if username in users: if ip in users[username] or '*' in users[username]: logger.info(success_str, username, ip) return True else: logger.info(failure_str, username, ip) return False elif username not in users and '*' in users: if ip in users['*'] or '*' in users['*']: logger.info(success_str, username, ip) return True else: logger.info(failure_str, username, ip) return False else: logger.info(failure_str, username, ip) return False else: logger.info(pass_str, username, ip) return True
python
def salt_api_acl_tool(username, request): ''' ..versionadded:: 2016.3.0 Verifies user requests against the API whitelist. (User/IP pair) in order to provide whitelisting for the API similar to the master, but over the API. ..code-block:: yaml rest_cherrypy: api_acl: users: '*': - 1.1.1.1 - 1.1.1.2 foo: - 8.8.4.4 bar: - '*' :param username: Username to check against the API. :type username: str :param request: Cherrypy request to check against the API. :type request: cherrypy.request ''' failure_str = ("[api_acl] Authentication failed for " "user %s from IP %s") success_str = ("[api_acl] Authentication sucessful for " "user %s from IP %s") pass_str = ("[api_acl] Authentication not checked for " "user %s from IP %s") acl = None # Salt Configuration salt_config = cherrypy.config.get('saltopts', None) if salt_config: # Cherrypy Config. cherrypy_conf = salt_config.get('rest_cherrypy', None) if cherrypy_conf: # ACL Config. acl = cherrypy_conf.get('api_acl', None) ip = request.remote.ip if acl: users = acl.get('users', {}) if users: if username in users: if ip in users[username] or '*' in users[username]: logger.info(success_str, username, ip) return True else: logger.info(failure_str, username, ip) return False elif username not in users and '*' in users: if ip in users['*'] or '*' in users['*']: logger.info(success_str, username, ip) return True else: logger.info(failure_str, username, ip) return False else: logger.info(failure_str, username, ip) return False else: logger.info(pass_str, username, ip) return True
['def', 'salt_api_acl_tool', '(', 'username', ',', 'request', ')', ':', 'failure_str', '=', '(', '"[api_acl] Authentication failed for "', '"user %s from IP %s"', ')', 'success_str', '=', '(', '"[api_acl] Authentication sucessful for "', '"user %s from IP %s"', ')', 'pass_str', '=', '(', '"[api_acl] Authentication not checked for "', '"user %s from IP %s"', ')', 'acl', '=', 'None', '# Salt Configuration', 'salt_config', '=', 'cherrypy', '.', 'config', '.', 'get', '(', "'saltopts'", ',', 'None', ')', 'if', 'salt_config', ':', '# Cherrypy Config.', 'cherrypy_conf', '=', 'salt_config', '.', 'get', '(', "'rest_cherrypy'", ',', 'None', ')', 'if', 'cherrypy_conf', ':', '# ACL Config.', 'acl', '=', 'cherrypy_conf', '.', 'get', '(', "'api_acl'", ',', 'None', ')', 'ip', '=', 'request', '.', 'remote', '.', 'ip', 'if', 'acl', ':', 'users', '=', 'acl', '.', 'get', '(', "'users'", ',', '{', '}', ')', 'if', 'users', ':', 'if', 'username', 'in', 'users', ':', 'if', 'ip', 'in', 'users', '[', 'username', ']', 'or', "'*'", 'in', 'users', '[', 'username', ']', ':', 'logger', '.', 'info', '(', 'success_str', ',', 'username', ',', 'ip', ')', 'return', 'True', 'else', ':', 'logger', '.', 'info', '(', 'failure_str', ',', 'username', ',', 'ip', ')', 'return', 'False', 'elif', 'username', 'not', 'in', 'users', 'and', "'*'", 'in', 'users', ':', 'if', 'ip', 'in', 'users', '[', "'*'", ']', 'or', "'*'", 'in', 'users', '[', "'*'", ']', ':', 'logger', '.', 'info', '(', 'success_str', ',', 'username', ',', 'ip', ')', 'return', 'True', 'else', ':', 'logger', '.', 'info', '(', 'failure_str', ',', 'username', ',', 'ip', ')', 'return', 'False', 'else', ':', 'logger', '.', 'info', '(', 'failure_str', ',', 'username', ',', 'ip', ')', 'return', 'False', 'else', ':', 'logger', '.', 'info', '(', 'pass_str', ',', 'username', ',', 'ip', ')', 'return', 'True']
..versionadded:: 2016.3.0 Verifies user requests against the API whitelist. (User/IP pair) in order to provide whitelisting for the API similar to the master, but over the API. ..code-block:: yaml rest_cherrypy: api_acl: users: '*': - 1.1.1.1 - 1.1.1.2 foo: - 8.8.4.4 bar: - '*' :param username: Username to check against the API. :type username: str :param request: Cherrypy request to check against the API. :type request: cherrypy.request
['..', 'versionadded', '::', '2016', '.', '3', '.', '0']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_cherrypy/app.py#L696-L762
4,119
go-macaroon-bakery/py-macaroon-bakery
macaroonbakery/httpbakery/_browser.py
WebBrowserInteractor._wait_for_token
def _wait_for_token(self, ctx, wait_token_url): ''' Returns a token from a the wait token URL @param wait_token_url URL to wait for (string) :return DischargeToken ''' resp = requests.get(wait_token_url) if resp.status_code != 200: raise InteractionError('cannot get {}'.format(wait_token_url)) json_resp = resp.json() kind = json_resp.get('kind') if kind is None: raise InteractionError( 'cannot get kind token from {}'.format(wait_token_url)) token_val = json_resp.get('token') if token_val is None: token_val = json_resp.get('token64') if token_val is None: raise InteractionError( 'cannot get token from {}'.format(wait_token_url)) token_val = base64.b64decode(token_val) return DischargeToken(kind=kind, value=token_val)
python
def _wait_for_token(self, ctx, wait_token_url): ''' Returns a token from a the wait token URL @param wait_token_url URL to wait for (string) :return DischargeToken ''' resp = requests.get(wait_token_url) if resp.status_code != 200: raise InteractionError('cannot get {}'.format(wait_token_url)) json_resp = resp.json() kind = json_resp.get('kind') if kind is None: raise InteractionError( 'cannot get kind token from {}'.format(wait_token_url)) token_val = json_resp.get('token') if token_val is None: token_val = json_resp.get('token64') if token_val is None: raise InteractionError( 'cannot get token from {}'.format(wait_token_url)) token_val = base64.b64decode(token_val) return DischargeToken(kind=kind, value=token_val)
['def', '_wait_for_token', '(', 'self', ',', 'ctx', ',', 'wait_token_url', ')', ':', 'resp', '=', 'requests', '.', 'get', '(', 'wait_token_url', ')', 'if', 'resp', '.', 'status_code', '!=', '200', ':', 'raise', 'InteractionError', '(', "'cannot get {}'", '.', 'format', '(', 'wait_token_url', ')', ')', 'json_resp', '=', 'resp', '.', 'json', '(', ')', 'kind', '=', 'json_resp', '.', 'get', '(', "'kind'", ')', 'if', 'kind', 'is', 'None', ':', 'raise', 'InteractionError', '(', "'cannot get kind token from {}'", '.', 'format', '(', 'wait_token_url', ')', ')', 'token_val', '=', 'json_resp', '.', 'get', '(', "'token'", ')', 'if', 'token_val', 'is', 'None', ':', 'token_val', '=', 'json_resp', '.', 'get', '(', "'token64'", ')', 'if', 'token_val', 'is', 'None', ':', 'raise', 'InteractionError', '(', "'cannot get token from {}'", '.', 'format', '(', 'wait_token_url', ')', ')', 'token_val', '=', 'base64', '.', 'b64decode', '(', 'token_val', ')', 'return', 'DischargeToken', '(', 'kind', '=', 'kind', ',', 'value', '=', 'token_val', ')']
Returns a token from a the wait token URL @param wait_token_url URL to wait for (string) :return DischargeToken
['Returns', 'a', 'token', 'from', 'a', 'the', 'wait', 'token', 'URL']
train
https://github.com/go-macaroon-bakery/py-macaroon-bakery/blob/63ce1ef1dabe816eb8aaec48fbb46761c34ddf77/macaroonbakery/httpbakery/_browser.py#L49-L69
4,120
cbclab/MOT
mot/lib/cl_function.py
_ProcedureWorker._build_kernel
def _build_kernel(self, kernel_source, compile_flags=()): """Convenience function for building the kernel for this worker. Args: kernel_source (str): the kernel source to use for building the kernel Returns: cl.Program: a compiled CL kernel """ return cl.Program(self._cl_context, kernel_source).build(' '.join(compile_flags))
python
def _build_kernel(self, kernel_source, compile_flags=()): """Convenience function for building the kernel for this worker. Args: kernel_source (str): the kernel source to use for building the kernel Returns: cl.Program: a compiled CL kernel """ return cl.Program(self._cl_context, kernel_source).build(' '.join(compile_flags))
['def', '_build_kernel', '(', 'self', ',', 'kernel_source', ',', 'compile_flags', '=', '(', ')', ')', ':', 'return', 'cl', '.', 'Program', '(', 'self', '.', '_cl_context', ',', 'kernel_source', ')', '.', 'build', '(', "' '", '.', 'join', '(', 'compile_flags', ')', ')']
Convenience function for building the kernel for this worker. Args: kernel_source (str): the kernel source to use for building the kernel Returns: cl.Program: a compiled CL kernel
['Convenience', 'function', 'for', 'building', 'the', 'kernel', 'for', 'this', 'worker', '.']
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/cl_function.py#L706-L715
4,121
openstack/networking-cisco
networking_cisco/plugins/cisco/cfg_agent/device_drivers/iosxe/iosxe_routing_driver.py
IosXeRoutingDriver._cfg_exists
def _cfg_exists(self, cfg_str): """Check a partial config string exists in the running config. :param cfg_str: config string to check :return : True or False """ ios_cfg = self._get_running_config() parse = HTParser(ios_cfg) cfg_raw = parse.find_lines("^" + cfg_str) LOG.debug("_cfg_exists(): Found lines %s", cfg_raw) return len(cfg_raw) > 0
python
def _cfg_exists(self, cfg_str): """Check a partial config string exists in the running config. :param cfg_str: config string to check :return : True or False """ ios_cfg = self._get_running_config() parse = HTParser(ios_cfg) cfg_raw = parse.find_lines("^" + cfg_str) LOG.debug("_cfg_exists(): Found lines %s", cfg_raw) return len(cfg_raw) > 0
['def', '_cfg_exists', '(', 'self', ',', 'cfg_str', ')', ':', 'ios_cfg', '=', 'self', '.', '_get_running_config', '(', ')', 'parse', '=', 'HTParser', '(', 'ios_cfg', ')', 'cfg_raw', '=', 'parse', '.', 'find_lines', '(', '"^"', '+', 'cfg_str', ')', 'LOG', '.', 'debug', '(', '"_cfg_exists(): Found lines %s"', ',', 'cfg_raw', ')', 'return', 'len', '(', 'cfg_raw', ')', '>', '0']
Check a partial config string exists in the running config. :param cfg_str: config string to check :return : True or False
['Check', 'a', 'partial', 'config', 'string', 'exists', 'in', 'the', 'running', 'config', '.']
train
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/cfg_agent/device_drivers/iosxe/iosxe_routing_driver.py#L405-L415
4,122
idmillington/layout
layout/rl_utils.py
ReportlabOutput.draw_polygon
def draw_polygon( self, *pts, close_path=True, stroke=None, stroke_width=1, stroke_dash=None, fill=None ) -> None: """Draws the given polygon.""" c = self.c c.saveState() if stroke is not None: c.setStrokeColorRGB(*stroke) c.setLineWidth(stroke_width) c.setDash(stroke_dash) if fill is not None: c.setFillColorRGB(*fill) p = c.beginPath() fn = p.moveTo for x,y in zip(*[iter(pts)]*2): fn(x, y) fn = p.lineTo if close_path: p.close() c.drawPath(p, stroke=(stroke is not None), fill=(fill is not None)) c.restoreState()
python
def draw_polygon( self, *pts, close_path=True, stroke=None, stroke_width=1, stroke_dash=None, fill=None ) -> None: """Draws the given polygon.""" c = self.c c.saveState() if stroke is not None: c.setStrokeColorRGB(*stroke) c.setLineWidth(stroke_width) c.setDash(stroke_dash) if fill is not None: c.setFillColorRGB(*fill) p = c.beginPath() fn = p.moveTo for x,y in zip(*[iter(pts)]*2): fn(x, y) fn = p.lineTo if close_path: p.close() c.drawPath(p, stroke=(stroke is not None), fill=(fill is not None)) c.restoreState()
['def', 'draw_polygon', '(', 'self', ',', '*', 'pts', ',', 'close_path', '=', 'True', ',', 'stroke', '=', 'None', ',', 'stroke_width', '=', '1', ',', 'stroke_dash', '=', 'None', ',', 'fill', '=', 'None', ')', '->', 'None', ':', 'c', '=', 'self', '.', 'c', 'c', '.', 'saveState', '(', ')', 'if', 'stroke', 'is', 'not', 'None', ':', 'c', '.', 'setStrokeColorRGB', '(', '*', 'stroke', ')', 'c', '.', 'setLineWidth', '(', 'stroke_width', ')', 'c', '.', 'setDash', '(', 'stroke_dash', ')', 'if', 'fill', 'is', 'not', 'None', ':', 'c', '.', 'setFillColorRGB', '(', '*', 'fill', ')', 'p', '=', 'c', '.', 'beginPath', '(', ')', 'fn', '=', 'p', '.', 'moveTo', 'for', 'x', ',', 'y', 'in', 'zip', '(', '*', '[', 'iter', '(', 'pts', ')', ']', '*', '2', ')', ':', 'fn', '(', 'x', ',', 'y', ')', 'fn', '=', 'p', '.', 'lineTo', 'if', 'close_path', ':', 'p', '.', 'close', '(', ')', 'c', '.', 'drawPath', '(', 'p', ',', 'stroke', '=', '(', 'stroke', 'is', 'not', 'None', ')', ',', 'fill', '=', '(', 'fill', 'is', 'not', 'None', ')', ')', 'c', '.', 'restoreState', '(', ')']
Draws the given polygon.
['Draws', 'the', 'given', 'polygon', '.']
train
https://github.com/idmillington/layout/blob/c452d1d7a74c9a74f7639c1b49e2a41c4e354bb5/layout/rl_utils.py#L98-L127
4,123
plivo/sharq-server
sharq_server/server.py
SharQServer._view_interval
def _view_interval(self, queue_type, queue_id): """Updates the queue interval in SharQ.""" response = { 'status': 'failure' } try: request_data = json.loads(request.data) interval = request_data['interval'] except Exception, e: response['message'] = e.message return jsonify(**response), 400 request_data = { 'queue_type': queue_type, 'queue_id': queue_id, 'interval': interval } try: response = self.sq.interval(**request_data) if response['status'] == 'failure': return jsonify(**response), 404 except Exception, e: response['message'] = e.message return jsonify(**response), 400 return jsonify(**response)
python
def _view_interval(self, queue_type, queue_id): """Updates the queue interval in SharQ.""" response = { 'status': 'failure' } try: request_data = json.loads(request.data) interval = request_data['interval'] except Exception, e: response['message'] = e.message return jsonify(**response), 400 request_data = { 'queue_type': queue_type, 'queue_id': queue_id, 'interval': interval } try: response = self.sq.interval(**request_data) if response['status'] == 'failure': return jsonify(**response), 404 except Exception, e: response['message'] = e.message return jsonify(**response), 400 return jsonify(**response)
['def', '_view_interval', '(', 'self', ',', 'queue_type', ',', 'queue_id', ')', ':', 'response', '=', '{', "'status'", ':', "'failure'", '}', 'try', ':', 'request_data', '=', 'json', '.', 'loads', '(', 'request', '.', 'data', ')', 'interval', '=', 'request_data', '[', "'interval'", ']', 'except', 'Exception', ',', 'e', ':', 'response', '[', "'message'", ']', '=', 'e', '.', 'message', 'return', 'jsonify', '(', '*', '*', 'response', ')', ',', '400', 'request_data', '=', '{', "'queue_type'", ':', 'queue_type', ',', "'queue_id'", ':', 'queue_id', ',', "'interval'", ':', 'interval', '}', 'try', ':', 'response', '=', 'self', '.', 'sq', '.', 'interval', '(', '*', '*', 'request_data', ')', 'if', 'response', '[', "'status'", ']', '==', "'failure'", ':', 'return', 'jsonify', '(', '*', '*', 'response', ')', ',', '404', 'except', 'Exception', ',', 'e', ':', 'response', '[', "'message'", ']', '=', 'e', '.', 'message', 'return', 'jsonify', '(', '*', '*', 'response', ')', ',', '400', 'return', 'jsonify', '(', '*', '*', 'response', ')']
Updates the queue interval in SharQ.
['Updates', 'the', 'queue', 'interval', 'in', 'SharQ', '.']
train
https://github.com/plivo/sharq-server/blob/9f4c50eb5ee28d1084591febc4a3a34d7ffd0556/sharq_server/server.py#L133-L159
4,124
allenai/allennlp
allennlp/data/dataset_readers/dataset_utils/text2sql_utils.py
clean_and_split_sql
def clean_and_split_sql(sql: str) -> List[str]: """ Cleans up and unifies a SQL query. This involves unifying quoted strings and splitting brackets which aren't formatted consistently in the data. """ sql_tokens: List[str] = [] for token in sql.strip().split(): token = token.replace('"', "'").replace("%", "") if token.endswith("(") and len(token) > 1: sql_tokens.extend(split_table_and_column_names(token[:-1])) sql_tokens.extend(split_table_and_column_names(token[-1])) else: sql_tokens.extend(split_table_and_column_names(token)) return sql_tokens
python
def clean_and_split_sql(sql: str) -> List[str]: """ Cleans up and unifies a SQL query. This involves unifying quoted strings and splitting brackets which aren't formatted consistently in the data. """ sql_tokens: List[str] = [] for token in sql.strip().split(): token = token.replace('"', "'").replace("%", "") if token.endswith("(") and len(token) > 1: sql_tokens.extend(split_table_and_column_names(token[:-1])) sql_tokens.extend(split_table_and_column_names(token[-1])) else: sql_tokens.extend(split_table_and_column_names(token)) return sql_tokens
['def', 'clean_and_split_sql', '(', 'sql', ':', 'str', ')', '->', 'List', '[', 'str', ']', ':', 'sql_tokens', ':', 'List', '[', 'str', ']', '=', '[', ']', 'for', 'token', 'in', 'sql', '.', 'strip', '(', ')', '.', 'split', '(', ')', ':', 'token', '=', 'token', '.', 'replace', '(', '\'"\'', ',', '"\'"', ')', '.', 'replace', '(', '"%"', ',', '""', ')', 'if', 'token', '.', 'endswith', '(', '"("', ')', 'and', 'len', '(', 'token', ')', '>', '1', ':', 'sql_tokens', '.', 'extend', '(', 'split_table_and_column_names', '(', 'token', '[', ':', '-', '1', ']', ')', ')', 'sql_tokens', '.', 'extend', '(', 'split_table_and_column_names', '(', 'token', '[', '-', '1', ']', ')', ')', 'else', ':', 'sql_tokens', '.', 'extend', '(', 'split_table_and_column_names', '(', 'token', ')', ')', 'return', 'sql_tokens']
Cleans up and unifies a SQL query. This involves unifying quoted strings and splitting brackets which aren't formatted consistently in the data.
['Cleans', 'up', 'and', 'unifies', 'a', 'SQL', 'query', '.', 'This', 'involves', 'unifying', 'quoted', 'strings', 'and', 'splitting', 'brackets', 'which', 'aren', 't', 'formatted', 'consistently', 'in', 'the', 'data', '.']
train
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/dataset_readers/dataset_utils/text2sql_utils.py#L89-L102
4,125
django-fluent/django-fluent-blogs
fluent_blogs/sitemaps.py
TagArchiveSitemap.lastmod
def lastmod(self, tag): """Return the last modification of the entry.""" lastitems = EntryModel.objects.published().order_by('-modification_date').filter(tags=tag).only('modification_date') return lastitems[0].modification_date
python
def lastmod(self, tag): """Return the last modification of the entry.""" lastitems = EntryModel.objects.published().order_by('-modification_date').filter(tags=tag).only('modification_date') return lastitems[0].modification_date
['def', 'lastmod', '(', 'self', ',', 'tag', ')', ':', 'lastitems', '=', 'EntryModel', '.', 'objects', '.', 'published', '(', ')', '.', 'order_by', '(', "'-modification_date'", ')', '.', 'filter', '(', 'tags', '=', 'tag', ')', '.', 'only', '(', "'modification_date'", ')', 'return', 'lastitems', '[', '0', ']', '.', 'modification_date']
Return the last modification of the entry.
['Return', 'the', 'last', 'modification', 'of', 'the', 'entry', '.']
train
https://github.com/django-fluent/django-fluent-blogs/blob/86b148549a010eaca9a2ea987fe43be250e06c50/fluent_blogs/sitemaps.py#L86-L89
4,126
ClimateImpactLab/DataFS
datafs/managers/manager_dynamo.py
DynamoDBManager._update_spec_config
def _update_spec_config(self, document_name, spec): ''' Dynamo implementation of project specific metadata spec ''' # add the updated archive_metadata object to Dynamo self._spec_table.update_item( Key={'_id': '{}'.format(document_name)}, UpdateExpression="SET config = :v", ExpressionAttributeValues={':v': spec}, ReturnValues='ALL_NEW')
python
def _update_spec_config(self, document_name, spec): ''' Dynamo implementation of project specific metadata spec ''' # add the updated archive_metadata object to Dynamo self._spec_table.update_item( Key={'_id': '{}'.format(document_name)}, UpdateExpression="SET config = :v", ExpressionAttributeValues={':v': spec}, ReturnValues='ALL_NEW')
['def', '_update_spec_config', '(', 'self', ',', 'document_name', ',', 'spec', ')', ':', '# add the updated archive_metadata object to Dynamo', 'self', '.', '_spec_table', '.', 'update_item', '(', 'Key', '=', '{', "'_id'", ':', "'{}'", '.', 'format', '(', 'document_name', ')', '}', ',', 'UpdateExpression', '=', '"SET config = :v"', ',', 'ExpressionAttributeValues', '=', '{', "':v'", ':', 'spec', '}', ',', 'ReturnValues', '=', "'ALL_NEW'", ')']
Dynamo implementation of project specific metadata spec
['Dynamo', 'implementation', 'of', 'project', 'specific', 'metadata', 'spec']
train
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/managers/manager_dynamo.py#L182-L192
4,127
openstack/networking-cisco
networking_cisco/apps/saf/agent/vdp/lldpad.py
LldpadDriver.construct_vdp_dict
def construct_vdp_dict(self, mode, mgrid, typeid, typeid_ver, vsiid_frmt, vsiid, filter_frmt, gid, mac, vlan, oui_id, oui_data): """Constructs the VDP Message. Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP Section for more detailed information :param mode: Associate or De-associate :param mgrid: MGR ID :param typeid: Type ID :param typeid_ver: Version of the Type ID :param vsiid_frmt: Format of the following VSI argument :param vsiid: VSI value :param filter_frmt: Filter Format :param gid: Group ID the vNIC belongs to :param mac: MAC Address of the vNIC :param vlan: VLAN of the vNIC :param oui_id: OUI Type :param oui_data: OUI Data :return vdp_keyword_str: Dictionary of VDP arguments and values """ vdp_keyword_str = {} if mgrid is None: mgrid = self.vdp_opts.get('mgrid') mgrid_str = "mgrid2=%s" % mgrid if typeid is None: typeid = self.vdp_opts.get('typeid') typeid_str = "typeid=%s" % typeid if typeid_ver is None: typeid_ver = self.vdp_opts.get('typeidver') typeid_ver_str = "typeidver=%s" % typeid_ver if int(vsiid_frmt) == int(self.vdp_opts.get('vsiidfrmt')): vsiid_str = "uuid=%s" % vsiid else: # Only format supported for now LOG.error("Unsupported VSIID Format1") return vdp_keyword_str if vlan == constants.INVALID_VLAN: vlan = 0 if int(filter_frmt) == vdp_const.VDP_FILTER_GIDMACVID: if not mac or gid == 0: LOG.error("Incorrect Filter Format Specified") return vdp_keyword_str else: f = "filter=%s-%s-%s" filter_str = f % (vlan, mac, gid) elif int(filter_frmt) == vdp_const.VDP_FILTER_GIDVID: if gid == 0: LOG.error("NULL GID Specified") return vdp_keyword_str else: filter_str = "filter=" + '%d' % vlan + "--" + '%ld' % gid elif int(filter_frmt) == vdp_const.VDP_FILTER_MACVID: if not mac: LOG.error("NULL MAC Specified") return vdp_keyword_str else: filter_str = "filter=" + '%d' % vlan + "-" + mac elif int(filter_frmt) == vdp_const.VDP_FILTER_VID: filter_str = "filter=" + '%d' % vlan else: LOG.error("Incorrect Filter Format Specified") return vdp_keyword_str oui_list = [] if oui_id is not None and oui_data is not None: if oui_id is 'cisco': oui_list = self.gen_cisco_vdp_oui(oui_id, oui_data) mode_str = "mode=" + mode vdp_keyword_str = dict(mode=mode_str, mgrid=mgrid_str, typeid=typeid_str, typeid_ver=typeid_ver_str, vsiid=vsiid_str, filter=filter_str, oui_list=oui_list) return vdp_keyword_str
python
def construct_vdp_dict(self, mode, mgrid, typeid, typeid_ver, vsiid_frmt, vsiid, filter_frmt, gid, mac, vlan, oui_id, oui_data): """Constructs the VDP Message. Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP Section for more detailed information :param mode: Associate or De-associate :param mgrid: MGR ID :param typeid: Type ID :param typeid_ver: Version of the Type ID :param vsiid_frmt: Format of the following VSI argument :param vsiid: VSI value :param filter_frmt: Filter Format :param gid: Group ID the vNIC belongs to :param mac: MAC Address of the vNIC :param vlan: VLAN of the vNIC :param oui_id: OUI Type :param oui_data: OUI Data :return vdp_keyword_str: Dictionary of VDP arguments and values """ vdp_keyword_str = {} if mgrid is None: mgrid = self.vdp_opts.get('mgrid') mgrid_str = "mgrid2=%s" % mgrid if typeid is None: typeid = self.vdp_opts.get('typeid') typeid_str = "typeid=%s" % typeid if typeid_ver is None: typeid_ver = self.vdp_opts.get('typeidver') typeid_ver_str = "typeidver=%s" % typeid_ver if int(vsiid_frmt) == int(self.vdp_opts.get('vsiidfrmt')): vsiid_str = "uuid=%s" % vsiid else: # Only format supported for now LOG.error("Unsupported VSIID Format1") return vdp_keyword_str if vlan == constants.INVALID_VLAN: vlan = 0 if int(filter_frmt) == vdp_const.VDP_FILTER_GIDMACVID: if not mac or gid == 0: LOG.error("Incorrect Filter Format Specified") return vdp_keyword_str else: f = "filter=%s-%s-%s" filter_str = f % (vlan, mac, gid) elif int(filter_frmt) == vdp_const.VDP_FILTER_GIDVID: if gid == 0: LOG.error("NULL GID Specified") return vdp_keyword_str else: filter_str = "filter=" + '%d' % vlan + "--" + '%ld' % gid elif int(filter_frmt) == vdp_const.VDP_FILTER_MACVID: if not mac: LOG.error("NULL MAC Specified") return vdp_keyword_str else: filter_str = "filter=" + '%d' % vlan + "-" + mac elif int(filter_frmt) == vdp_const.VDP_FILTER_VID: filter_str = "filter=" + '%d' % vlan else: LOG.error("Incorrect Filter Format Specified") return vdp_keyword_str oui_list = [] if oui_id is not None and oui_data is not None: if oui_id is 'cisco': oui_list = self.gen_cisco_vdp_oui(oui_id, oui_data) mode_str = "mode=" + mode vdp_keyword_str = dict(mode=mode_str, mgrid=mgrid_str, typeid=typeid_str, typeid_ver=typeid_ver_str, vsiid=vsiid_str, filter=filter_str, oui_list=oui_list) return vdp_keyword_str
['def', 'construct_vdp_dict', '(', 'self', ',', 'mode', ',', 'mgrid', ',', 'typeid', ',', 'typeid_ver', ',', 'vsiid_frmt', ',', 'vsiid', ',', 'filter_frmt', ',', 'gid', ',', 'mac', ',', 'vlan', ',', 'oui_id', ',', 'oui_data', ')', ':', 'vdp_keyword_str', '=', '{', '}', 'if', 'mgrid', 'is', 'None', ':', 'mgrid', '=', 'self', '.', 'vdp_opts', '.', 'get', '(', "'mgrid'", ')', 'mgrid_str', '=', '"mgrid2=%s"', '%', 'mgrid', 'if', 'typeid', 'is', 'None', ':', 'typeid', '=', 'self', '.', 'vdp_opts', '.', 'get', '(', "'typeid'", ')', 'typeid_str', '=', '"typeid=%s"', '%', 'typeid', 'if', 'typeid_ver', 'is', 'None', ':', 'typeid_ver', '=', 'self', '.', 'vdp_opts', '.', 'get', '(', "'typeidver'", ')', 'typeid_ver_str', '=', '"typeidver=%s"', '%', 'typeid_ver', 'if', 'int', '(', 'vsiid_frmt', ')', '==', 'int', '(', 'self', '.', 'vdp_opts', '.', 'get', '(', "'vsiidfrmt'", ')', ')', ':', 'vsiid_str', '=', '"uuid=%s"', '%', 'vsiid', 'else', ':', '# Only format supported for now', 'LOG', '.', 'error', '(', '"Unsupported VSIID Format1"', ')', 'return', 'vdp_keyword_str', 'if', 'vlan', '==', 'constants', '.', 'INVALID_VLAN', ':', 'vlan', '=', '0', 'if', 'int', '(', 'filter_frmt', ')', '==', 'vdp_const', '.', 'VDP_FILTER_GIDMACVID', ':', 'if', 'not', 'mac', 'or', 'gid', '==', '0', ':', 'LOG', '.', 'error', '(', '"Incorrect Filter Format Specified"', ')', 'return', 'vdp_keyword_str', 'else', ':', 'f', '=', '"filter=%s-%s-%s"', 'filter_str', '=', 'f', '%', '(', 'vlan', ',', 'mac', ',', 'gid', ')', 'elif', 'int', '(', 'filter_frmt', ')', '==', 'vdp_const', '.', 'VDP_FILTER_GIDVID', ':', 'if', 'gid', '==', '0', ':', 'LOG', '.', 'error', '(', '"NULL GID Specified"', ')', 'return', 'vdp_keyword_str', 'else', ':', 'filter_str', '=', '"filter="', '+', "'%d'", '%', 'vlan', '+', '"--"', '+', "'%ld'", '%', 'gid', 'elif', 'int', '(', 'filter_frmt', ')', '==', 'vdp_const', '.', 'VDP_FILTER_MACVID', ':', 'if', 'not', 'mac', ':', 'LOG', '.', 'error', '(', '"NULL MAC Specified"', ')', 'return', 'vdp_keyword_str', 'else', ':', 'filter_str', '=', '"filter="', '+', "'%d'", '%', 'vlan', '+', '"-"', '+', 'mac', 'elif', 'int', '(', 'filter_frmt', ')', '==', 'vdp_const', '.', 'VDP_FILTER_VID', ':', 'filter_str', '=', '"filter="', '+', "'%d'", '%', 'vlan', 'else', ':', 'LOG', '.', 'error', '(', '"Incorrect Filter Format Specified"', ')', 'return', 'vdp_keyword_str', 'oui_list', '=', '[', ']', 'if', 'oui_id', 'is', 'not', 'None', 'and', 'oui_data', 'is', 'not', 'None', ':', 'if', 'oui_id', 'is', "'cisco'", ':', 'oui_list', '=', 'self', '.', 'gen_cisco_vdp_oui', '(', 'oui_id', ',', 'oui_data', ')', 'mode_str', '=', '"mode="', '+', 'mode', 'vdp_keyword_str', '=', 'dict', '(', 'mode', '=', 'mode_str', ',', 'mgrid', '=', 'mgrid_str', ',', 'typeid', '=', 'typeid_str', ',', 'typeid_ver', '=', 'typeid_ver_str', ',', 'vsiid', '=', 'vsiid_str', ',', 'filter', '=', 'filter_str', ',', 'oui_list', '=', 'oui_list', ')', 'return', 'vdp_keyword_str']
Constructs the VDP Message. Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP Section for more detailed information :param mode: Associate or De-associate :param mgrid: MGR ID :param typeid: Type ID :param typeid_ver: Version of the Type ID :param vsiid_frmt: Format of the following VSI argument :param vsiid: VSI value :param filter_frmt: Filter Format :param gid: Group ID the vNIC belongs to :param mac: MAC Address of the vNIC :param vlan: VLAN of the vNIC :param oui_id: OUI Type :param oui_data: OUI Data :return vdp_keyword_str: Dictionary of VDP arguments and values
['Constructs', 'the', 'VDP', 'Message', '.']
train
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/agent/vdp/lldpad.py#L330-L402
4,128
DistrictDataLabs/yellowbrick
yellowbrick/datasets/base.py
Dataset.to_dataframe
def to_dataframe(self): """ Returns the entire dataset as a single pandas DataFrame. Returns ------- df : DataFrame with shape (n_instances, n_columns) A pandas DataFrame containing the complete original data table including all targets (specified by the meta data) and all features (including those that might have been filtered out). """ if pd is None: raise DatasetsError( "pandas is required to load DataFrame, it can be installed with pip" ) path = find_dataset_path(self.name, ext=".csv.gz", data_home=self.data_home) return pd.read_csv(path, compression="gzip")
python
def to_dataframe(self): """ Returns the entire dataset as a single pandas DataFrame. Returns ------- df : DataFrame with shape (n_instances, n_columns) A pandas DataFrame containing the complete original data table including all targets (specified by the meta data) and all features (including those that might have been filtered out). """ if pd is None: raise DatasetsError( "pandas is required to load DataFrame, it can be installed with pip" ) path = find_dataset_path(self.name, ext=".csv.gz", data_home=self.data_home) return pd.read_csv(path, compression="gzip")
['def', 'to_dataframe', '(', 'self', ')', ':', 'if', 'pd', 'is', 'None', ':', 'raise', 'DatasetsError', '(', '"pandas is required to load DataFrame, it can be installed with pip"', ')', 'path', '=', 'find_dataset_path', '(', 'self', '.', 'name', ',', 'ext', '=', '".csv.gz"', ',', 'data_home', '=', 'self', '.', 'data_home', ')', 'return', 'pd', '.', 'read_csv', '(', 'path', ',', 'compression', '=', '"gzip"', ')']
Returns the entire dataset as a single pandas DataFrame. Returns ------- df : DataFrame with shape (n_instances, n_columns) A pandas DataFrame containing the complete original data table including all targets (specified by the meta data) and all features (including those that might have been filtered out).
['Returns', 'the', 'entire', 'dataset', 'as', 'a', 'single', 'pandas', 'DataFrame', '.']
train
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/datasets/base.py#L232-L249
4,129
paypal/baler
baler/baler.py
static_uint8_variable_for_data
def static_uint8_variable_for_data(variable_name, data, max_line_length=120, comment="", indent=2): r""" >>> static_uint8_variable_for_data("v", "abc") 'static uint8_t v[3] = {\n 0x61, 0x62, 0x63,\n}; // v' >>> static_uint8_variable_for_data("v", "abc", comment="hi") 'static uint8_t v[3] = { // hi\n 0x61, 0x62, 0x63,\n}; // v' >>> static_uint8_variable_for_data("v", "abc", indent=4) 'static uint8_t v[3] = {\n 0x61, 0x62, 0x63,\n}; // v' >>> static_uint8_variable_for_data("v", "abcabcabcabc", max_line_length=20) 'static uint8_t v[12] = {\n 0x61, 0x62, 0x63,\n 0x61, 0x62, 0x63,\n 0x61, 0x62, 0x63,\n 0x61, 0x62, 0x63,\n}; // v' """ hex_components = [] for byte in data: byte_as_hex = "0x{u:02X}".format(u=ord(byte)) hex_components.append(byte_as_hex) chunk_size = (max_line_length - indent + 2 - 1) // 6 # 6 is len("0xAA, "); +2 for the last element's ", "; -1 for the trailing comma array_lines = [] for chunk_offset in xrange(0, len(hex_components), chunk_size): chunk = hex_components[chunk_offset:chunk_offset + chunk_size] array_lines.append(" " * indent + ", ".join(chunk) + ",") array_data = "\n".join(array_lines) if comment != "": comment = " // " + comment substitutions = {"v": variable_name, "l": len(hex_components), "d": array_data, "c": comment} declaration = "static uint8_t {v}[{l}] = {{{c}\n{d}\n}}; // {v}".format(**substitutions) return declaration
python
def static_uint8_variable_for_data(variable_name, data, max_line_length=120, comment="", indent=2): r""" >>> static_uint8_variable_for_data("v", "abc") 'static uint8_t v[3] = {\n 0x61, 0x62, 0x63,\n}; // v' >>> static_uint8_variable_for_data("v", "abc", comment="hi") 'static uint8_t v[3] = { // hi\n 0x61, 0x62, 0x63,\n}; // v' >>> static_uint8_variable_for_data("v", "abc", indent=4) 'static uint8_t v[3] = {\n 0x61, 0x62, 0x63,\n}; // v' >>> static_uint8_variable_for_data("v", "abcabcabcabc", max_line_length=20) 'static uint8_t v[12] = {\n 0x61, 0x62, 0x63,\n 0x61, 0x62, 0x63,\n 0x61, 0x62, 0x63,\n 0x61, 0x62, 0x63,\n}; // v' """ hex_components = [] for byte in data: byte_as_hex = "0x{u:02X}".format(u=ord(byte)) hex_components.append(byte_as_hex) chunk_size = (max_line_length - indent + 2 - 1) // 6 # 6 is len("0xAA, "); +2 for the last element's ", "; -1 for the trailing comma array_lines = [] for chunk_offset in xrange(0, len(hex_components), chunk_size): chunk = hex_components[chunk_offset:chunk_offset + chunk_size] array_lines.append(" " * indent + ", ".join(chunk) + ",") array_data = "\n".join(array_lines) if comment != "": comment = " // " + comment substitutions = {"v": variable_name, "l": len(hex_components), "d": array_data, "c": comment} declaration = "static uint8_t {v}[{l}] = {{{c}\n{d}\n}}; // {v}".format(**substitutions) return declaration
['def', 'static_uint8_variable_for_data', '(', 'variable_name', ',', 'data', ',', 'max_line_length', '=', '120', ',', 'comment', '=', '""', ',', 'indent', '=', '2', ')', ':', 'hex_components', '=', '[', ']', 'for', 'byte', 'in', 'data', ':', 'byte_as_hex', '=', '"0x{u:02X}"', '.', 'format', '(', 'u', '=', 'ord', '(', 'byte', ')', ')', 'hex_components', '.', 'append', '(', 'byte_as_hex', ')', 'chunk_size', '=', '(', 'max_line_length', '-', 'indent', '+', '2', '-', '1', ')', '//', '6', '# 6 is len("0xAA, "); +2 for the last element\'s ", "; -1 for the trailing comma', 'array_lines', '=', '[', ']', 'for', 'chunk_offset', 'in', 'xrange', '(', '0', ',', 'len', '(', 'hex_components', ')', ',', 'chunk_size', ')', ':', 'chunk', '=', 'hex_components', '[', 'chunk_offset', ':', 'chunk_offset', '+', 'chunk_size', ']', 'array_lines', '.', 'append', '(', '" "', '*', 'indent', '+', '", "', '.', 'join', '(', 'chunk', ')', '+', '","', ')', 'array_data', '=', '"\\n"', '.', 'join', '(', 'array_lines', ')', 'if', 'comment', '!=', '""', ':', 'comment', '=', '" // "', '+', 'comment', 'substitutions', '=', '{', '"v"', ':', 'variable_name', ',', '"l"', ':', 'len', '(', 'hex_components', ')', ',', '"d"', ':', 'array_data', ',', '"c"', ':', 'comment', '}', 'declaration', '=', '"static uint8_t {v}[{l}] = {{{c}\\n{d}\\n}}; // {v}"', '.', 'format', '(', '*', '*', 'substitutions', ')', 'return', 'declaration']
r""" >>> static_uint8_variable_for_data("v", "abc") 'static uint8_t v[3] = {\n 0x61, 0x62, 0x63,\n}; // v' >>> static_uint8_variable_for_data("v", "abc", comment="hi") 'static uint8_t v[3] = { // hi\n 0x61, 0x62, 0x63,\n}; // v' >>> static_uint8_variable_for_data("v", "abc", indent=4) 'static uint8_t v[3] = {\n 0x61, 0x62, 0x63,\n}; // v' >>> static_uint8_variable_for_data("v", "abcabcabcabc", max_line_length=20) 'static uint8_t v[12] = {\n 0x61, 0x62, 0x63,\n 0x61, 0x62, 0x63,\n 0x61, 0x62, 0x63,\n 0x61, 0x62, 0x63,\n}; // v'
['r', '>>>', 'static_uint8_variable_for_data', '(', 'v', 'abc', ')', 'static', 'uint8_t', 'v', '[', '3', ']', '=', '{', '\\', 'n', '0x61', '0x62', '0x63', '\\', 'n', '}', ';', '//', 'v', '>>>', 'static_uint8_variable_for_data', '(', 'v', 'abc', 'comment', '=', 'hi', ')', 'static', 'uint8_t', 'v', '[', '3', ']', '=', '{', '//', 'hi', '\\', 'n', '0x61', '0x62', '0x63', '\\', 'n', '}', ';', '//', 'v', '>>>', 'static_uint8_variable_for_data', '(', 'v', 'abc', 'indent', '=', '4', ')', 'static', 'uint8_t', 'v', '[', '3', ']', '=', '{', '\\', 'n', '0x61', '0x62', '0x63', '\\', 'n', '}', ';', '//', 'v', '>>>', 'static_uint8_variable_for_data', '(', 'v', 'abcabcabcabc', 'max_line_length', '=', '20', ')', 'static', 'uint8_t', 'v', '[', '12', ']', '=', '{', '\\', 'n', '0x61', '0x62', '0x63', '\\', 'n', '0x61', '0x62', '0x63', '\\', 'n', '0x61', '0x62', '0x63', '\\', 'n', '0x61', '0x62', '0x63', '\\', 'n', '}', ';', '//', 'v']
train
https://github.com/paypal/baler/blob/db4f09dd2c7729b2df5268c87ad3b4cb43396abf/baler/baler.py#L44-L77
4,130
TylerTemp/docpie
docpie/pie.py
Docpie.set_config
def set_config(self, **config): """Shadow all the current config.""" reinit = False if 'stdopt' in config: stdopt = config.pop('stdopt') reinit = (stdopt != self.stdopt) self.stdopt = stdopt if 'attachopt' in config: attachopt = config.pop('attachopt') reinit = reinit or (attachopt != self.attachopt) self.attachopt = attachopt if 'attachvalue' in config: attachvalue = config.pop('attachvalue') reinit = reinit or (attachvalue != self.attachvalue) self.attachvalue = attachvalue if 'auto2dashes' in config: self.auto2dashes = config.pop('auto2dashes') if 'name' in config: name = config.pop('name') reinit = reinit or (name != self.name) self.name = name if 'help' in config: self.help = config.pop('help') self._set_or_remove_extra_handler( self.help, ('--help', '-h'), self.help_handler) if 'version' in config: self.version = config.pop('version') self._set_or_remove_extra_handler( self.version is not None, ('--version', '-v'), self.version_handler) if 'case_sensitive' in config: case_sensitive = config.pop('case_sensitive') reinit = reinit or (case_sensitive != self.case_sensitive) self.case_sensitive = case_sensitive if 'optionsfirst' in config: self.options_first = config.pop('optionsfirst') if 'appearedonly' in config: self.appeared_only = config.pop('appearedonly') if 'namedoptions' in config: namedoptions = config.pop('namedoptions') reinit = reinit or (namedoptions != self.namedoptions) self.namedoptions = namedoptions if 'extra' in config: self.extra.update(self._formal_extra(config.pop('extra'))) if config: # should be empty raise ValueError( '`%s` %s not accepted key argument%s' % ( '`, `'.join(config), 'is' if len(config) == 1 else 'are', '' if len(config) == 1 else 's' )) if self.doc is not None and reinit: logger.warning( 'You changed the config that requires re-initialized' ' `Docpie` object. Create a new one instead' ) self._init()
python
def set_config(self, **config): """Shadow all the current config.""" reinit = False if 'stdopt' in config: stdopt = config.pop('stdopt') reinit = (stdopt != self.stdopt) self.stdopt = stdopt if 'attachopt' in config: attachopt = config.pop('attachopt') reinit = reinit or (attachopt != self.attachopt) self.attachopt = attachopt if 'attachvalue' in config: attachvalue = config.pop('attachvalue') reinit = reinit or (attachvalue != self.attachvalue) self.attachvalue = attachvalue if 'auto2dashes' in config: self.auto2dashes = config.pop('auto2dashes') if 'name' in config: name = config.pop('name') reinit = reinit or (name != self.name) self.name = name if 'help' in config: self.help = config.pop('help') self._set_or_remove_extra_handler( self.help, ('--help', '-h'), self.help_handler) if 'version' in config: self.version = config.pop('version') self._set_or_remove_extra_handler( self.version is not None, ('--version', '-v'), self.version_handler) if 'case_sensitive' in config: case_sensitive = config.pop('case_sensitive') reinit = reinit or (case_sensitive != self.case_sensitive) self.case_sensitive = case_sensitive if 'optionsfirst' in config: self.options_first = config.pop('optionsfirst') if 'appearedonly' in config: self.appeared_only = config.pop('appearedonly') if 'namedoptions' in config: namedoptions = config.pop('namedoptions') reinit = reinit or (namedoptions != self.namedoptions) self.namedoptions = namedoptions if 'extra' in config: self.extra.update(self._formal_extra(config.pop('extra'))) if config: # should be empty raise ValueError( '`%s` %s not accepted key argument%s' % ( '`, `'.join(config), 'is' if len(config) == 1 else 'are', '' if len(config) == 1 else 's' )) if self.doc is not None and reinit: logger.warning( 'You changed the config that requires re-initialized' ' `Docpie` object. Create a new one instead' ) self._init()
['def', 'set_config', '(', 'self', ',', '*', '*', 'config', ')', ':', 'reinit', '=', 'False', 'if', "'stdopt'", 'in', 'config', ':', 'stdopt', '=', 'config', '.', 'pop', '(', "'stdopt'", ')', 'reinit', '=', '(', 'stdopt', '!=', 'self', '.', 'stdopt', ')', 'self', '.', 'stdopt', '=', 'stdopt', 'if', "'attachopt'", 'in', 'config', ':', 'attachopt', '=', 'config', '.', 'pop', '(', "'attachopt'", ')', 'reinit', '=', 'reinit', 'or', '(', 'attachopt', '!=', 'self', '.', 'attachopt', ')', 'self', '.', 'attachopt', '=', 'attachopt', 'if', "'attachvalue'", 'in', 'config', ':', 'attachvalue', '=', 'config', '.', 'pop', '(', "'attachvalue'", ')', 'reinit', '=', 'reinit', 'or', '(', 'attachvalue', '!=', 'self', '.', 'attachvalue', ')', 'self', '.', 'attachvalue', '=', 'attachvalue', 'if', "'auto2dashes'", 'in', 'config', ':', 'self', '.', 'auto2dashes', '=', 'config', '.', 'pop', '(', "'auto2dashes'", ')', 'if', "'name'", 'in', 'config', ':', 'name', '=', 'config', '.', 'pop', '(', "'name'", ')', 'reinit', '=', 'reinit', 'or', '(', 'name', '!=', 'self', '.', 'name', ')', 'self', '.', 'name', '=', 'name', 'if', "'help'", 'in', 'config', ':', 'self', '.', 'help', '=', 'config', '.', 'pop', '(', "'help'", ')', 'self', '.', '_set_or_remove_extra_handler', '(', 'self', '.', 'help', ',', '(', "'--help'", ',', "'-h'", ')', ',', 'self', '.', 'help_handler', ')', 'if', "'version'", 'in', 'config', ':', 'self', '.', 'version', '=', 'config', '.', 'pop', '(', "'version'", ')', 'self', '.', '_set_or_remove_extra_handler', '(', 'self', '.', 'version', 'is', 'not', 'None', ',', '(', "'--version'", ',', "'-v'", ')', ',', 'self', '.', 'version_handler', ')', 'if', "'case_sensitive'", 'in', 'config', ':', 'case_sensitive', '=', 'config', '.', 'pop', '(', "'case_sensitive'", ')', 'reinit', '=', 'reinit', 'or', '(', 'case_sensitive', '!=', 'self', '.', 'case_sensitive', ')', 'self', '.', 'case_sensitive', '=', 'case_sensitive', 'if', "'optionsfirst'", 'in', 'config', ':', 'self', '.', 'options_first', '=', 'config', '.', 'pop', '(', "'optionsfirst'", ')', 'if', "'appearedonly'", 'in', 'config', ':', 'self', '.', 'appeared_only', '=', 'config', '.', 'pop', '(', "'appearedonly'", ')', 'if', "'namedoptions'", 'in', 'config', ':', 'namedoptions', '=', 'config', '.', 'pop', '(', "'namedoptions'", ')', 'reinit', '=', 'reinit', 'or', '(', 'namedoptions', '!=', 'self', '.', 'namedoptions', ')', 'self', '.', 'namedoptions', '=', 'namedoptions', 'if', "'extra'", 'in', 'config', ':', 'self', '.', 'extra', '.', 'update', '(', 'self', '.', '_formal_extra', '(', 'config', '.', 'pop', '(', "'extra'", ')', ')', ')', 'if', 'config', ':', '# should be empty', 'raise', 'ValueError', '(', "'`%s` %s not accepted key argument%s'", '%', '(', "'`, `'", '.', 'join', '(', 'config', ')', ',', "'is'", 'if', 'len', '(', 'config', ')', '==', '1', 'else', "'are'", ',', "''", 'if', 'len', '(', 'config', ')', '==', '1', 'else', "'s'", ')', ')', 'if', 'self', '.', 'doc', 'is', 'not', 'None', 'and', 'reinit', ':', 'logger', '.', 'warning', '(', "'You changed the config that requires re-initialized'", "' `Docpie` object. Create a new one instead'", ')', 'self', '.', '_init', '(', ')']
Shadow all the current config.
['Shadow', 'all', 'the', 'current', 'config', '.']
train
https://github.com/TylerTemp/docpie/blob/e658454b81b6c79a020d499f12ad73496392c09a/docpie/pie.py#L655-L714
4,131
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
ckgpav
def ckgpav(inst, sclkdp, tol, ref): """ Get pointing (attitude) and angular velocity for a specified spacecraft clock time. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ckgpav_c.html :param inst: NAIF ID of instrument, spacecraft, or structure. :type inst: int :param sclkdp: Encoded spacecraft clock time. :type sclkdp: float :param tol: Time tolerance. :type tol: float :param ref: Reference frame. :type ref: str :return: C-matrix pointing data, Angular velocity vector, Output encoded spacecraft clock time. :rtype: tuple """ inst = ctypes.c_int(inst) sclkdp = ctypes.c_double(sclkdp) tol = ctypes.c_double(tol) ref = stypes.stringToCharP(ref) cmat = stypes.emptyDoubleMatrix() av = stypes.emptyDoubleVector(3) clkout = ctypes.c_double() found = ctypes.c_int() libspice.ckgpav_c(inst, sclkdp, tol, ref, cmat, av, ctypes.byref(clkout), ctypes.byref(found)) return stypes.cMatrixToNumpy(cmat), stypes.cVectorToPython( av), clkout.value, bool(found.value)
python
def ckgpav(inst, sclkdp, tol, ref): """ Get pointing (attitude) and angular velocity for a specified spacecraft clock time. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ckgpav_c.html :param inst: NAIF ID of instrument, spacecraft, or structure. :type inst: int :param sclkdp: Encoded spacecraft clock time. :type sclkdp: float :param tol: Time tolerance. :type tol: float :param ref: Reference frame. :type ref: str :return: C-matrix pointing data, Angular velocity vector, Output encoded spacecraft clock time. :rtype: tuple """ inst = ctypes.c_int(inst) sclkdp = ctypes.c_double(sclkdp) tol = ctypes.c_double(tol) ref = stypes.stringToCharP(ref) cmat = stypes.emptyDoubleMatrix() av = stypes.emptyDoubleVector(3) clkout = ctypes.c_double() found = ctypes.c_int() libspice.ckgpav_c(inst, sclkdp, tol, ref, cmat, av, ctypes.byref(clkout), ctypes.byref(found)) return stypes.cMatrixToNumpy(cmat), stypes.cVectorToPython( av), clkout.value, bool(found.value)
['def', 'ckgpav', '(', 'inst', ',', 'sclkdp', ',', 'tol', ',', 'ref', ')', ':', 'inst', '=', 'ctypes', '.', 'c_int', '(', 'inst', ')', 'sclkdp', '=', 'ctypes', '.', 'c_double', '(', 'sclkdp', ')', 'tol', '=', 'ctypes', '.', 'c_double', '(', 'tol', ')', 'ref', '=', 'stypes', '.', 'stringToCharP', '(', 'ref', ')', 'cmat', '=', 'stypes', '.', 'emptyDoubleMatrix', '(', ')', 'av', '=', 'stypes', '.', 'emptyDoubleVector', '(', '3', ')', 'clkout', '=', 'ctypes', '.', 'c_double', '(', ')', 'found', '=', 'ctypes', '.', 'c_int', '(', ')', 'libspice', '.', 'ckgpav_c', '(', 'inst', ',', 'sclkdp', ',', 'tol', ',', 'ref', ',', 'cmat', ',', 'av', ',', 'ctypes', '.', 'byref', '(', 'clkout', ')', ',', 'ctypes', '.', 'byref', '(', 'found', ')', ')', 'return', 'stypes', '.', 'cMatrixToNumpy', '(', 'cmat', ')', ',', 'stypes', '.', 'cVectorToPython', '(', 'av', ')', ',', 'clkout', '.', 'value', ',', 'bool', '(', 'found', '.', 'value', ')']
Get pointing (attitude) and angular velocity for a specified spacecraft clock time. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ckgpav_c.html :param inst: NAIF ID of instrument, spacecraft, or structure. :type inst: int :param sclkdp: Encoded spacecraft clock time. :type sclkdp: float :param tol: Time tolerance. :type tol: float :param ref: Reference frame. :type ref: str :return: C-matrix pointing data, Angular velocity vector, Output encoded spacecraft clock time. :rtype: tuple
['Get', 'pointing', '(', 'attitude', ')', 'and', 'angular', 'velocity', 'for', 'a', 'specified', 'spacecraft', 'clock', 'time', '.']
train
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L1031-L1063
4,132
datadesk/python-documentcloud
documentcloud/toolbox.py
credentials_required
def credentials_required(method_func): """ Decorator for methods that checks that the client has credentials. Throws a CredentialsMissingError when they are absent. """ def _checkcredentials(self, *args, **kwargs): if self.username and self.password: return method_func(self, *args, **kwargs) else: raise CredentialsMissingError("This is a private method. \ You must provide a username and password when you initialize the \ DocumentCloud client to attempt this type of request.") return wraps(method_func)(_checkcredentials)
python
def credentials_required(method_func): """ Decorator for methods that checks that the client has credentials. Throws a CredentialsMissingError when they are absent. """ def _checkcredentials(self, *args, **kwargs): if self.username and self.password: return method_func(self, *args, **kwargs) else: raise CredentialsMissingError("This is a private method. \ You must provide a username and password when you initialize the \ DocumentCloud client to attempt this type of request.") return wraps(method_func)(_checkcredentials)
['def', 'credentials_required', '(', 'method_func', ')', ':', 'def', '_checkcredentials', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'if', 'self', '.', 'username', 'and', 'self', '.', 'password', ':', 'return', 'method_func', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'else', ':', 'raise', 'CredentialsMissingError', '(', '"This is a private method. \\\nYou must provide a username and password when you initialize the \\\nDocumentCloud client to attempt this type of request."', ')', 'return', 'wraps', '(', 'method_func', ')', '(', '_checkcredentials', ')']
Decorator for methods that checks that the client has credentials. Throws a CredentialsMissingError when they are absent.
['Decorator', 'for', 'methods', 'that', 'checks', 'that', 'the', 'client', 'has', 'credentials', '.']
train
https://github.com/datadesk/python-documentcloud/blob/0d7f42cbf1edf5c61fca37ed846362cba4abfd76/documentcloud/toolbox.py#L45-L59
4,133
jart/fabulous
fabulous/utils.py
TerminalInfo.dimensions
def dimensions(self): """Returns terminal dimensions Don't save this information for long periods of time because the user might resize their terminal. :return: Returns ``(width, height)``. If there's no terminal to be found, we'll just return ``(79, 40)``. """ try: call = fcntl.ioctl(self.termfd, termios.TIOCGWINSZ, "\000" * 8) except IOError: return (79, 40) else: height, width = struct.unpack("hhhh", call)[:2] return (width, height)
python
def dimensions(self): """Returns terminal dimensions Don't save this information for long periods of time because the user might resize their terminal. :return: Returns ``(width, height)``. If there's no terminal to be found, we'll just return ``(79, 40)``. """ try: call = fcntl.ioctl(self.termfd, termios.TIOCGWINSZ, "\000" * 8) except IOError: return (79, 40) else: height, width = struct.unpack("hhhh", call)[:2] return (width, height)
['def', 'dimensions', '(', 'self', ')', ':', 'try', ':', 'call', '=', 'fcntl', '.', 'ioctl', '(', 'self', '.', 'termfd', ',', 'termios', '.', 'TIOCGWINSZ', ',', '"\\000"', '*', '8', ')', 'except', 'IOError', ':', 'return', '(', '79', ',', '40', ')', 'else', ':', 'height', ',', 'width', '=', 'struct', '.', 'unpack', '(', '"hhhh"', ',', 'call', ')', '[', ':', '2', ']', 'return', '(', 'width', ',', 'height', ')']
Returns terminal dimensions Don't save this information for long periods of time because the user might resize their terminal. :return: Returns ``(width, height)``. If there's no terminal to be found, we'll just return ``(79, 40)``.
['Returns', 'terminal', 'dimensions']
train
https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/utils.py#L100-L115
4,134
DLR-RM/RAFCON
source/rafcon/gui/mygaphas/connector.py
RectanglePointPort.glue
def glue(self, pos): """Calculates the distance between the given position and the port :param (float, float) pos: Distance to this position is calculated :return: Distance to port :rtype: float """ # Distance between border of rectangle and point # Equation from http://stackoverflow.com/a/18157551/3568069 dx = max(self.point.x - self.width / 2. - pos[0], 0, pos[0] - (self.point.x + self.width / 2.)) dy = max(self.point.y - self.height / 2. - pos[1], 0, pos[1] - (self.point.y + self.height / 2.)) dist = sqrt(dx*dx + dy*dy) return self.point, dist
python
def glue(self, pos): """Calculates the distance between the given position and the port :param (float, float) pos: Distance to this position is calculated :return: Distance to port :rtype: float """ # Distance between border of rectangle and point # Equation from http://stackoverflow.com/a/18157551/3568069 dx = max(self.point.x - self.width / 2. - pos[0], 0, pos[0] - (self.point.x + self.width / 2.)) dy = max(self.point.y - self.height / 2. - pos[1], 0, pos[1] - (self.point.y + self.height / 2.)) dist = sqrt(dx*dx + dy*dy) return self.point, dist
['def', 'glue', '(', 'self', ',', 'pos', ')', ':', '# Distance between border of rectangle and point', '# Equation from http://stackoverflow.com/a/18157551/3568069', 'dx', '=', 'max', '(', 'self', '.', 'point', '.', 'x', '-', 'self', '.', 'width', '/', '2.', '-', 'pos', '[', '0', ']', ',', '0', ',', 'pos', '[', '0', ']', '-', '(', 'self', '.', 'point', '.', 'x', '+', 'self', '.', 'width', '/', '2.', ')', ')', 'dy', '=', 'max', '(', 'self', '.', 'point', '.', 'y', '-', 'self', '.', 'height', '/', '2.', '-', 'pos', '[', '1', ']', ',', '0', ',', 'pos', '[', '1', ']', '-', '(', 'self', '.', 'point', '.', 'y', '+', 'self', '.', 'height', '/', '2.', ')', ')', 'dist', '=', 'sqrt', '(', 'dx', '*', 'dx', '+', 'dy', '*', 'dy', ')', 'return', 'self', '.', 'point', ',', 'dist']
Calculates the distance between the given position and the port :param (float, float) pos: Distance to this position is calculated :return: Distance to port :rtype: float
['Calculates', 'the', 'distance', 'between', 'the', 'given', 'position', 'and', 'the', 'port']
train
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/mygaphas/connector.py#L43-L55
4,135
apache/incubator-mxnet
example/gluon/lipnet/trainer.py
Train.infer_batch
def infer_batch(self, dataloader): """ Description : inference for LipNet """ sum_losses = 0 len_losses = 0 for input_data, input_label in dataloader: data = gluon.utils.split_and_load(input_data, self.ctx, even_split=False) label = gluon.utils.split_and_load(input_label, self.ctx, even_split=False) sum_losses, len_losses = self.infer(data, label) sum_losses += sum_losses len_losses += len_losses return sum_losses, len_losses
python
def infer_batch(self, dataloader): """ Description : inference for LipNet """ sum_losses = 0 len_losses = 0 for input_data, input_label in dataloader: data = gluon.utils.split_and_load(input_data, self.ctx, even_split=False) label = gluon.utils.split_and_load(input_label, self.ctx, even_split=False) sum_losses, len_losses = self.infer(data, label) sum_losses += sum_losses len_losses += len_losses return sum_losses, len_losses
['def', 'infer_batch', '(', 'self', ',', 'dataloader', ')', ':', 'sum_losses', '=', '0', 'len_losses', '=', '0', 'for', 'input_data', ',', 'input_label', 'in', 'dataloader', ':', 'data', '=', 'gluon', '.', 'utils', '.', 'split_and_load', '(', 'input_data', ',', 'self', '.', 'ctx', ',', 'even_split', '=', 'False', ')', 'label', '=', 'gluon', '.', 'utils', '.', 'split_and_load', '(', 'input_label', ',', 'self', '.', 'ctx', ',', 'even_split', '=', 'False', ')', 'sum_losses', ',', 'len_losses', '=', 'self', '.', 'infer', '(', 'data', ',', 'label', ')', 'sum_losses', '+=', 'sum_losses', 'len_losses', '+=', 'len_losses', 'return', 'sum_losses', ',', 'len_losses']
Description : inference for LipNet
['Description', ':', 'inference', 'for', 'LipNet']
train
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/lipnet/trainer.py#L188-L201
4,136
Kortemme-Lab/klab
klab/google/gcalendar.py
GoogleCalendar.get_upcoming_event_lists_for_the_remainder_of_the_month
def get_upcoming_event_lists_for_the_remainder_of_the_month(self, year = None, month = None): '''Return the set of events as triple of (today's events, events for the remainder of the week, events for the remainder of the month).''' events = [] if year == None and month == None: now = datetime.now(tz=self.timezone) # timezone? else: now = datetime(year=year, month=month, day=1, hour=0, minute=0, second=0, tzinfo=self.timezone) # Get today's events, including past events start_time = datetime(year=now.year, month=now.month, day=now.day, hour=0, minute=0, second=0, tzinfo=self.timezone) end_time = datetime(year = start_time.year, month = start_time.month, day = start_time.day, hour=23, minute=59, second=59, tzinfo=self.timezone) events.append(self.get_events(start_time.isoformat(), end_time.isoformat())) # Get this week's events if now.weekday() < 6: start_time = datetime(year=now.year, month=now.month, day=now.day + 1, hour=0, minute=0, second=0, tzinfo=self.timezone) end_time = start_time + timedelta(days = 6 - now.weekday()) # We do still want to return events in the next month if they fall within this week. Otherwise #if end_time.month != now.month: # end_time = end_time - timedelta(days = end_time.day) # end_time = datetime(year = end_time.year, month = end_time.month, day = end_time.day, hour=23, minute=59, second=59, tzinfo=self.timezone) #else: end_time = end_time + timedelta(seconds = -1) #end_time = datetime(year = end_time.year, month = end_time.month, day = end_time.day - 1, hour=23, minute=59, second=59, tzinfo=self.timezone) events.append(self.get_events(start_time.isoformat(), end_time.isoformat())) else: events.append([]) # Get this remaining events in the month start_time = end_time + timedelta(seconds = 1) if start_time.month == now.month: if now.month == 12: end_time = datetime(year = start_time.year, month = 12, day = 31, hour=23, minute=59, second=59, tzinfo=self.timezone) else: end_time = datetime(year = start_time.year, month = start_time.month + 1, day = 1, hour=0, minute=0, second=0, tzinfo=self.timezone) end_time = end_time - timedelta(seconds = 1) events.append(self.get_events(start_time.isoformat(), end_time.isoformat())) else: events.append([]) return events
python
def get_upcoming_event_lists_for_the_remainder_of_the_month(self, year = None, month = None): '''Return the set of events as triple of (today's events, events for the remainder of the week, events for the remainder of the month).''' events = [] if year == None and month == None: now = datetime.now(tz=self.timezone) # timezone? else: now = datetime(year=year, month=month, day=1, hour=0, minute=0, second=0, tzinfo=self.timezone) # Get today's events, including past events start_time = datetime(year=now.year, month=now.month, day=now.day, hour=0, minute=0, second=0, tzinfo=self.timezone) end_time = datetime(year = start_time.year, month = start_time.month, day = start_time.day, hour=23, minute=59, second=59, tzinfo=self.timezone) events.append(self.get_events(start_time.isoformat(), end_time.isoformat())) # Get this week's events if now.weekday() < 6: start_time = datetime(year=now.year, month=now.month, day=now.day + 1, hour=0, minute=0, second=0, tzinfo=self.timezone) end_time = start_time + timedelta(days = 6 - now.weekday()) # We do still want to return events in the next month if they fall within this week. Otherwise #if end_time.month != now.month: # end_time = end_time - timedelta(days = end_time.day) # end_time = datetime(year = end_time.year, month = end_time.month, day = end_time.day, hour=23, minute=59, second=59, tzinfo=self.timezone) #else: end_time = end_time + timedelta(seconds = -1) #end_time = datetime(year = end_time.year, month = end_time.month, day = end_time.day - 1, hour=23, minute=59, second=59, tzinfo=self.timezone) events.append(self.get_events(start_time.isoformat(), end_time.isoformat())) else: events.append([]) # Get this remaining events in the month start_time = end_time + timedelta(seconds = 1) if start_time.month == now.month: if now.month == 12: end_time = datetime(year = start_time.year, month = 12, day = 31, hour=23, minute=59, second=59, tzinfo=self.timezone) else: end_time = datetime(year = start_time.year, month = start_time.month + 1, day = 1, hour=0, minute=0, second=0, tzinfo=self.timezone) end_time = end_time - timedelta(seconds = 1) events.append(self.get_events(start_time.isoformat(), end_time.isoformat())) else: events.append([]) return events
['def', 'get_upcoming_event_lists_for_the_remainder_of_the_month', '(', 'self', ',', 'year', '=', 'None', ',', 'month', '=', 'None', ')', ':', 'events', '=', '[', ']', 'if', 'year', '==', 'None', 'and', 'month', '==', 'None', ':', 'now', '=', 'datetime', '.', 'now', '(', 'tz', '=', 'self', '.', 'timezone', ')', '# timezone?', 'else', ':', 'now', '=', 'datetime', '(', 'year', '=', 'year', ',', 'month', '=', 'month', ',', 'day', '=', '1', ',', 'hour', '=', '0', ',', 'minute', '=', '0', ',', 'second', '=', '0', ',', 'tzinfo', '=', 'self', '.', 'timezone', ')', "# Get today's events, including past events", 'start_time', '=', 'datetime', '(', 'year', '=', 'now', '.', 'year', ',', 'month', '=', 'now', '.', 'month', ',', 'day', '=', 'now', '.', 'day', ',', 'hour', '=', '0', ',', 'minute', '=', '0', ',', 'second', '=', '0', ',', 'tzinfo', '=', 'self', '.', 'timezone', ')', 'end_time', '=', 'datetime', '(', 'year', '=', 'start_time', '.', 'year', ',', 'month', '=', 'start_time', '.', 'month', ',', 'day', '=', 'start_time', '.', 'day', ',', 'hour', '=', '23', ',', 'minute', '=', '59', ',', 'second', '=', '59', ',', 'tzinfo', '=', 'self', '.', 'timezone', ')', 'events', '.', 'append', '(', 'self', '.', 'get_events', '(', 'start_time', '.', 'isoformat', '(', ')', ',', 'end_time', '.', 'isoformat', '(', ')', ')', ')', "# Get this week's events", 'if', 'now', '.', 'weekday', '(', ')', '<', '6', ':', 'start_time', '=', 'datetime', '(', 'year', '=', 'now', '.', 'year', ',', 'month', '=', 'now', '.', 'month', ',', 'day', '=', 'now', '.', 'day', '+', '1', ',', 'hour', '=', '0', ',', 'minute', '=', '0', ',', 'second', '=', '0', ',', 'tzinfo', '=', 'self', '.', 'timezone', ')', 'end_time', '=', 'start_time', '+', 'timedelta', '(', 'days', '=', '6', '-', 'now', '.', 'weekday', '(', ')', ')', '# We do still want to return events in the next month if they fall within this week. Otherwise', '#if end_time.month != now.month:', '# end_time = end_time - timedelta(days = end_time.day)', '# end_time = datetime(year = end_time.year, month = end_time.month, day = end_time.day, hour=23, minute=59, second=59, tzinfo=self.timezone)', '#else:', 'end_time', '=', 'end_time', '+', 'timedelta', '(', 'seconds', '=', '-', '1', ')', '#end_time = datetime(year = end_time.year, month = end_time.month, day = end_time.day - 1, hour=23, minute=59, second=59, tzinfo=self.timezone)', 'events', '.', 'append', '(', 'self', '.', 'get_events', '(', 'start_time', '.', 'isoformat', '(', ')', ',', 'end_time', '.', 'isoformat', '(', ')', ')', ')', 'else', ':', 'events', '.', 'append', '(', '[', ']', ')', '# Get this remaining events in the month', 'start_time', '=', 'end_time', '+', 'timedelta', '(', 'seconds', '=', '1', ')', 'if', 'start_time', '.', 'month', '==', 'now', '.', 'month', ':', 'if', 'now', '.', 'month', '==', '12', ':', 'end_time', '=', 'datetime', '(', 'year', '=', 'start_time', '.', 'year', ',', 'month', '=', '12', ',', 'day', '=', '31', ',', 'hour', '=', '23', ',', 'minute', '=', '59', ',', 'second', '=', '59', ',', 'tzinfo', '=', 'self', '.', 'timezone', ')', 'else', ':', 'end_time', '=', 'datetime', '(', 'year', '=', 'start_time', '.', 'year', ',', 'month', '=', 'start_time', '.', 'month', '+', '1', ',', 'day', '=', '1', ',', 'hour', '=', '0', ',', 'minute', '=', '0', ',', 'second', '=', '0', ',', 'tzinfo', '=', 'self', '.', 'timezone', ')', 'end_time', '=', 'end_time', '-', 'timedelta', '(', 'seconds', '=', '1', ')', 'events', '.', 'append', '(', 'self', '.', 'get_events', '(', 'start_time', '.', 'isoformat', '(', ')', ',', 'end_time', '.', 'isoformat', '(', ')', ')', ')', 'else', ':', 'events', '.', 'append', '(', '[', ']', ')', 'return', 'events']
Return the set of events as triple of (today's events, events for the remainder of the week, events for the remainder of the month).
['Return', 'the', 'set', 'of', 'events', 'as', 'triple', 'of', '(', 'today', 's', 'events', 'events', 'for', 'the', 'remainder', 'of', 'the', 'week', 'events', 'for', 'the', 'remainder', 'of', 'the', 'month', ')', '.']
train
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/google/gcalendar.py#L236-L277
4,137
koszullab/metaTOR
metator/scripts/hicstuff.py
distance_to_contact
def distance_to_contact(D, alpha=1): """Compute contact matrix from input distance matrix. Distance values of zeroes are given the largest contact count otherwise inferred non-zero distance values. """ if callable(alpha): distance_function = alpha else: try: a = np.float64(alpha) def distance_function(x): return 1 / (x ** (1 / a)) except TypeError: print("Alpha parameter must be callable or an array-like") raise except ZeroDivisionError: raise ValueError("Alpha parameter must be non-zero") m = np.max(distance_function(D[D != 0])) M = np.zeros(D.shape) M[D != 0] = distance_function(D[D != 0]) M[D == 0] = m return M
python
def distance_to_contact(D, alpha=1): """Compute contact matrix from input distance matrix. Distance values of zeroes are given the largest contact count otherwise inferred non-zero distance values. """ if callable(alpha): distance_function = alpha else: try: a = np.float64(alpha) def distance_function(x): return 1 / (x ** (1 / a)) except TypeError: print("Alpha parameter must be callable or an array-like") raise except ZeroDivisionError: raise ValueError("Alpha parameter must be non-zero") m = np.max(distance_function(D[D != 0])) M = np.zeros(D.shape) M[D != 0] = distance_function(D[D != 0]) M[D == 0] = m return M
['def', 'distance_to_contact', '(', 'D', ',', 'alpha', '=', '1', ')', ':', 'if', 'callable', '(', 'alpha', ')', ':', 'distance_function', '=', 'alpha', 'else', ':', 'try', ':', 'a', '=', 'np', '.', 'float64', '(', 'alpha', ')', 'def', 'distance_function', '(', 'x', ')', ':', 'return', '1', '/', '(', 'x', '**', '(', '1', '/', 'a', ')', ')', 'except', 'TypeError', ':', 'print', '(', '"Alpha parameter must be callable or an array-like"', ')', 'raise', 'except', 'ZeroDivisionError', ':', 'raise', 'ValueError', '(', '"Alpha parameter must be non-zero"', ')', 'm', '=', 'np', '.', 'max', '(', 'distance_function', '(', 'D', '[', 'D', '!=', '0', ']', ')', ')', 'M', '=', 'np', '.', 'zeros', '(', 'D', '.', 'shape', ')', 'M', '[', 'D', '!=', '0', ']', '=', 'distance_function', '(', 'D', '[', 'D', '!=', '0', ']', ')', 'M', '[', 'D', '==', '0', ']', '=', 'm', 'return', 'M']
Compute contact matrix from input distance matrix. Distance values of zeroes are given the largest contact count otherwise inferred non-zero distance values.
['Compute', 'contact', 'matrix', 'from', 'input', 'distance', 'matrix', '.', 'Distance', 'values', 'of', 'zeroes', 'are', 'given', 'the', 'largest', 'contact', 'count', 'otherwise', 'inferred', 'non', '-', 'zero', 'distance', 'values', '.']
train
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L918-L942
4,138
yougov/pmxbot
pmxbot/quotes.py
MongoDBQuotes.delete
def delete(self, lookup): """ If exactly one quote matches, delete it. Otherwise, raise a ValueError. """ lookup, num = self.split_num(lookup) if num: result = self.find_matches(lookup)[num - 1] else: result, = self.find_matches(lookup) self.db.delete_one(result)
python
def delete(self, lookup): """ If exactly one quote matches, delete it. Otherwise, raise a ValueError. """ lookup, num = self.split_num(lookup) if num: result = self.find_matches(lookup)[num - 1] else: result, = self.find_matches(lookup) self.db.delete_one(result)
['def', 'delete', '(', 'self', ',', 'lookup', ')', ':', 'lookup', ',', 'num', '=', 'self', '.', 'split_num', '(', 'lookup', ')', 'if', 'num', ':', 'result', '=', 'self', '.', 'find_matches', '(', 'lookup', ')', '[', 'num', '-', '1', ']', 'else', ':', 'result', ',', '=', 'self', '.', 'find_matches', '(', 'lookup', ')', 'self', '.', 'db', '.', 'delete_one', '(', 'result', ')']
If exactly one quote matches, delete it. Otherwise, raise a ValueError.
['If', 'exactly', 'one', 'quote', 'matches', 'delete', 'it', '.', 'Otherwise', 'raise', 'a', 'ValueError', '.']
train
https://github.com/yougov/pmxbot/blob/5da84a3258a0fd73cb35b60e39769a5d7bfb2ba7/pmxbot/quotes.py#L153-L163
4,139
pip-services3-python/pip-services3-commons-python
pip_services3_commons/refer/References.py
References.get_all
def get_all(self): """ Gets all component references registered in this reference map. :return: a list with component references. """ components = [] self._lock.acquire() try: for reference in self._references: components.append(reference.get_component()) finally: self._lock.release() return components
python
def get_all(self): """ Gets all component references registered in this reference map. :return: a list with component references. """ components = [] self._lock.acquire() try: for reference in self._references: components.append(reference.get_component()) finally: self._lock.release() return components
['def', 'get_all', '(', 'self', ')', ':', 'components', '=', '[', ']', 'self', '.', '_lock', '.', 'acquire', '(', ')', 'try', ':', 'for', 'reference', 'in', 'self', '.', '_references', ':', 'components', '.', 'append', '(', 'reference', '.', 'get_component', '(', ')', ')', 'finally', ':', 'self', '.', '_lock', '.', 'release', '(', ')', 'return', 'components']
Gets all component references registered in this reference map. :return: a list with component references.
['Gets', 'all', 'component', 'references', 'registered', 'in', 'this', 'reference', 'map', '.']
train
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/refer/References.py#L147-L162
4,140
StackStorm/pybind
pybind/nos/v6_0_2f/brocade_firmware_rpc/firmware_download/input/__init__.py
input._set_usb
def _set_usb(self, v, load=False): """ Setter method for usb, mapped from YANG variable /brocade_firmware_rpc/firmware_download/input/usb (container) If this variable is read-only (config: false) in the source YANG file, then _set_usb is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_usb() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=usb.usb, is_container='container', presence=False, yang_name="usb", rest_name="usb", parent=self, choice=(u'protocol-type', u'usb-protocol'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """usb must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=usb.usb, is_container='container', presence=False, yang_name="usb", rest_name="usb", parent=self, choice=(u'protocol-type', u'usb-protocol'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='container', is_config=True)""", }) self.__usb = t if hasattr(self, '_set'): self._set()
python
def _set_usb(self, v, load=False): """ Setter method for usb, mapped from YANG variable /brocade_firmware_rpc/firmware_download/input/usb (container) If this variable is read-only (config: false) in the source YANG file, then _set_usb is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_usb() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=usb.usb, is_container='container', presence=False, yang_name="usb", rest_name="usb", parent=self, choice=(u'protocol-type', u'usb-protocol'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """usb must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=usb.usb, is_container='container', presence=False, yang_name="usb", rest_name="usb", parent=self, choice=(u'protocol-type', u'usb-protocol'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='container', is_config=True)""", }) self.__usb = t if hasattr(self, '_set'): self._set()
['def', '_set_usb', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'usb', '.', 'usb', ',', 'is_container', '=', "'container'", ',', 'presence', '=', 'False', ',', 'yang_name', '=', '"usb"', ',', 'rest_name', '=', '"usb"', ',', 'parent', '=', 'self', ',', 'choice', '=', '(', "u'protocol-type'", ',', "u'usb-protocol'", ')', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'False', ',', 'extensions', '=', 'None', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-firmware'", ',', 'defining_module', '=', "'brocade-firmware'", ',', 'yang_type', '=', "'container'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""usb must be of a type compatible with container"""', ',', "'defined-type'", ':', '"container"', ',', "'generated-type'", ':', '"""YANGDynClass(base=usb.usb, is_container=\'container\', presence=False, yang_name="usb", rest_name="usb", parent=self, choice=(u\'protocol-type\', u\'usb-protocol\'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace=\'urn:brocade.com:mgmt:brocade-firmware\', defining_module=\'brocade-firmware\', yang_type=\'container\', is_config=True)"""', ',', '}', ')', 'self', '.', '__usb', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')']
Setter method for usb, mapped from YANG variable /brocade_firmware_rpc/firmware_download/input/usb (container) If this variable is read-only (config: false) in the source YANG file, then _set_usb is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_usb() directly.
['Setter', 'method', 'for', 'usb', 'mapped', 'from', 'YANG', 'variable', '/', 'brocade_firmware_rpc', '/', 'firmware_download', '/', 'input', '/', 'usb', '(', 'container', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_usb', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_usb', '()', 'directly', '.']
train
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/brocade_firmware_rpc/firmware_download/input/__init__.py#L200-L221
4,141
srittau/python-asserts
asserts/__init__.py
assert_dict_equal
def assert_dict_equal( first, second, key_msg_fmt="{msg}", value_msg_fmt="{msg}" ): """Fail unless first dictionary equals second. The dictionaries are considered equal, if they both contain the same keys, and their respective values are also equal. >>> assert_dict_equal({"foo": 5}, {"foo": 5}) >>> assert_dict_equal({"foo": 5}, {}) Traceback (most recent call last): ... AssertionError: key 'foo' missing from right dict The following key_msg_fmt arguments are supported, if the keys do not match: * msg - the default error message * first - the first dict * second - the second dict * missing_keys - list of keys missing from right * extra_keys - list of keys missing from left The following value_msg_fmt arguments are supported, if a value does not match: * msg - the default error message * first - the first dict * second - the second dict * key - the key where the value does not match * first_value - the value in the first dict * second_value - the value in the second dict """ first_keys = set(first.keys()) second_keys = set(second.keys()) missing_keys = list(first_keys - second_keys) extra_keys = list(second_keys - first_keys) if missing_keys or extra_keys: if missing_keys: if len(missing_keys) == 1: msg = "key {!r} missing from right dict".format( missing_keys[0] ) else: keys = ", ".join(sorted(repr(k) for k in missing_keys)) msg = "keys {} missing from right dict".format(keys) else: if len(extra_keys) == 1: msg = "extra key {!r} in right dict".format(extra_keys[0]) else: keys = ", ".join(sorted(repr(k) for k in extra_keys)) msg = "extra keys {} in right dict".format(keys) if key_msg_fmt: msg = key_msg_fmt.format( msg=msg, first=first, second=second, missing_keys=missing_keys, extra_keys=extra_keys, ) raise AssertionError(msg) for key in first: first_value = first[key] second_value = second[key] msg = "key '{}' differs: {!r} != {!r}".format( key, first_value, second_value ) if value_msg_fmt: msg = value_msg_fmt.format( msg=msg, first=first, second=second, key=key, first_value=first_value, second_value=second_value, ) msg = msg.replace("{", "{{").replace("}", "}}") assert_equal(first_value, second_value, msg_fmt=msg)
python
def assert_dict_equal( first, second, key_msg_fmt="{msg}", value_msg_fmt="{msg}" ): """Fail unless first dictionary equals second. The dictionaries are considered equal, if they both contain the same keys, and their respective values are also equal. >>> assert_dict_equal({"foo": 5}, {"foo": 5}) >>> assert_dict_equal({"foo": 5}, {}) Traceback (most recent call last): ... AssertionError: key 'foo' missing from right dict The following key_msg_fmt arguments are supported, if the keys do not match: * msg - the default error message * first - the first dict * second - the second dict * missing_keys - list of keys missing from right * extra_keys - list of keys missing from left The following value_msg_fmt arguments are supported, if a value does not match: * msg - the default error message * first - the first dict * second - the second dict * key - the key where the value does not match * first_value - the value in the first dict * second_value - the value in the second dict """ first_keys = set(first.keys()) second_keys = set(second.keys()) missing_keys = list(first_keys - second_keys) extra_keys = list(second_keys - first_keys) if missing_keys or extra_keys: if missing_keys: if len(missing_keys) == 1: msg = "key {!r} missing from right dict".format( missing_keys[0] ) else: keys = ", ".join(sorted(repr(k) for k in missing_keys)) msg = "keys {} missing from right dict".format(keys) else: if len(extra_keys) == 1: msg = "extra key {!r} in right dict".format(extra_keys[0]) else: keys = ", ".join(sorted(repr(k) for k in extra_keys)) msg = "extra keys {} in right dict".format(keys) if key_msg_fmt: msg = key_msg_fmt.format( msg=msg, first=first, second=second, missing_keys=missing_keys, extra_keys=extra_keys, ) raise AssertionError(msg) for key in first: first_value = first[key] second_value = second[key] msg = "key '{}' differs: {!r} != {!r}".format( key, first_value, second_value ) if value_msg_fmt: msg = value_msg_fmt.format( msg=msg, first=first, second=second, key=key, first_value=first_value, second_value=second_value, ) msg = msg.replace("{", "{{").replace("}", "}}") assert_equal(first_value, second_value, msg_fmt=msg)
['def', 'assert_dict_equal', '(', 'first', ',', 'second', ',', 'key_msg_fmt', '=', '"{msg}"', ',', 'value_msg_fmt', '=', '"{msg}"', ')', ':', 'first_keys', '=', 'set', '(', 'first', '.', 'keys', '(', ')', ')', 'second_keys', '=', 'set', '(', 'second', '.', 'keys', '(', ')', ')', 'missing_keys', '=', 'list', '(', 'first_keys', '-', 'second_keys', ')', 'extra_keys', '=', 'list', '(', 'second_keys', '-', 'first_keys', ')', 'if', 'missing_keys', 'or', 'extra_keys', ':', 'if', 'missing_keys', ':', 'if', 'len', '(', 'missing_keys', ')', '==', '1', ':', 'msg', '=', '"key {!r} missing from right dict"', '.', 'format', '(', 'missing_keys', '[', '0', ']', ')', 'else', ':', 'keys', '=', '", "', '.', 'join', '(', 'sorted', '(', 'repr', '(', 'k', ')', 'for', 'k', 'in', 'missing_keys', ')', ')', 'msg', '=', '"keys {} missing from right dict"', '.', 'format', '(', 'keys', ')', 'else', ':', 'if', 'len', '(', 'extra_keys', ')', '==', '1', ':', 'msg', '=', '"extra key {!r} in right dict"', '.', 'format', '(', 'extra_keys', '[', '0', ']', ')', 'else', ':', 'keys', '=', '", "', '.', 'join', '(', 'sorted', '(', 'repr', '(', 'k', ')', 'for', 'k', 'in', 'extra_keys', ')', ')', 'msg', '=', '"extra keys {} in right dict"', '.', 'format', '(', 'keys', ')', 'if', 'key_msg_fmt', ':', 'msg', '=', 'key_msg_fmt', '.', 'format', '(', 'msg', '=', 'msg', ',', 'first', '=', 'first', ',', 'second', '=', 'second', ',', 'missing_keys', '=', 'missing_keys', ',', 'extra_keys', '=', 'extra_keys', ',', ')', 'raise', 'AssertionError', '(', 'msg', ')', 'for', 'key', 'in', 'first', ':', 'first_value', '=', 'first', '[', 'key', ']', 'second_value', '=', 'second', '[', 'key', ']', 'msg', '=', '"key \'{}\' differs: {!r} != {!r}"', '.', 'format', '(', 'key', ',', 'first_value', ',', 'second_value', ')', 'if', 'value_msg_fmt', ':', 'msg', '=', 'value_msg_fmt', '.', 'format', '(', 'msg', '=', 'msg', ',', 'first', '=', 'first', ',', 'second', '=', 'second', ',', 'key', '=', 'key', ',', 'first_value', '=', 'first_value', ',', 'second_value', '=', 'second_value', ',', ')', 'msg', '=', 'msg', '.', 'replace', '(', '"{"', ',', '"{{"', ')', '.', 'replace', '(', '"}"', ',', '"}}"', ')', 'assert_equal', '(', 'first_value', ',', 'second_value', ',', 'msg_fmt', '=', 'msg', ')']
Fail unless first dictionary equals second. The dictionaries are considered equal, if they both contain the same keys, and their respective values are also equal. >>> assert_dict_equal({"foo": 5}, {"foo": 5}) >>> assert_dict_equal({"foo": 5}, {}) Traceback (most recent call last): ... AssertionError: key 'foo' missing from right dict The following key_msg_fmt arguments are supported, if the keys do not match: * msg - the default error message * first - the first dict * second - the second dict * missing_keys - list of keys missing from right * extra_keys - list of keys missing from left The following value_msg_fmt arguments are supported, if a value does not match: * msg - the default error message * first - the first dict * second - the second dict * key - the key where the value does not match * first_value - the value in the first dict * second_value - the value in the second dict
['Fail', 'unless', 'first', 'dictionary', 'equals', 'second', '.']
train
https://github.com/srittau/python-asserts/blob/1d5c797031c68ee27552d1c94e7f918c3d3d0453/asserts/__init__.py#L305-L380
4,142
gwastro/pycbc
pycbc/inference/io/base_hdf.py
BaseInferenceFile.read_samples
def read_samples(self, parameters, array_class=None, **kwargs): """Reads samples for the given parameter(s). The ``parameters`` can be the name of any dataset in ``samples_group``, a virtual field or method of ``FieldArray`` (as long as the file contains the necessary fields to derive the virtual field or method), and/or any numpy function of these. The ``parameters`` are parsed to figure out what datasets are needed. Only those datasets will be loaded, and will be the base-level fields of the returned ``FieldArray``. The ``static_params`` are also added as attributes of the returned ``FieldArray``. Parameters ----------- fp : InferenceFile An open file handler to read the samples from. parameters : (list of) strings The parameter(s) to retrieve. array_class : FieldArray-like class, optional The type of array to return. The class must have ``from_kwargs`` and ``parse_parameters`` methods. If None, will return a ``FieldArray``. \**kwargs : All other keyword arguments are passed to ``read_raw_samples``. Returns ------- FieldArray : The samples as a ``FieldArray``. """ # get the type of array class to use if array_class is None: array_class = FieldArray # get the names of fields needed for the given parameters possible_fields = self[self.samples_group].keys() loadfields = array_class.parse_parameters(parameters, possible_fields) samples = self.read_raw_samples(loadfields, **kwargs) # convert to FieldArray samples = array_class.from_kwargs(**samples) # add the static params and attributes addatrs = (self.static_params.items() + self[self.samples_group].attrs.items()) for (p, val) in addatrs: setattr(samples, p, val) return samples
python
def read_samples(self, parameters, array_class=None, **kwargs): """Reads samples for the given parameter(s). The ``parameters`` can be the name of any dataset in ``samples_group``, a virtual field or method of ``FieldArray`` (as long as the file contains the necessary fields to derive the virtual field or method), and/or any numpy function of these. The ``parameters`` are parsed to figure out what datasets are needed. Only those datasets will be loaded, and will be the base-level fields of the returned ``FieldArray``. The ``static_params`` are also added as attributes of the returned ``FieldArray``. Parameters ----------- fp : InferenceFile An open file handler to read the samples from. parameters : (list of) strings The parameter(s) to retrieve. array_class : FieldArray-like class, optional The type of array to return. The class must have ``from_kwargs`` and ``parse_parameters`` methods. If None, will return a ``FieldArray``. \**kwargs : All other keyword arguments are passed to ``read_raw_samples``. Returns ------- FieldArray : The samples as a ``FieldArray``. """ # get the type of array class to use if array_class is None: array_class = FieldArray # get the names of fields needed for the given parameters possible_fields = self[self.samples_group].keys() loadfields = array_class.parse_parameters(parameters, possible_fields) samples = self.read_raw_samples(loadfields, **kwargs) # convert to FieldArray samples = array_class.from_kwargs(**samples) # add the static params and attributes addatrs = (self.static_params.items() + self[self.samples_group].attrs.items()) for (p, val) in addatrs: setattr(samples, p, val) return samples
['def', 'read_samples', '(', 'self', ',', 'parameters', ',', 'array_class', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', '# get the type of array class to use', 'if', 'array_class', 'is', 'None', ':', 'array_class', '=', 'FieldArray', '# get the names of fields needed for the given parameters', 'possible_fields', '=', 'self', '[', 'self', '.', 'samples_group', ']', '.', 'keys', '(', ')', 'loadfields', '=', 'array_class', '.', 'parse_parameters', '(', 'parameters', ',', 'possible_fields', ')', 'samples', '=', 'self', '.', 'read_raw_samples', '(', 'loadfields', ',', '*', '*', 'kwargs', ')', '# convert to FieldArray', 'samples', '=', 'array_class', '.', 'from_kwargs', '(', '*', '*', 'samples', ')', '# add the static params and attributes', 'addatrs', '=', '(', 'self', '.', 'static_params', '.', 'items', '(', ')', '+', 'self', '[', 'self', '.', 'samples_group', ']', '.', 'attrs', '.', 'items', '(', ')', ')', 'for', '(', 'p', ',', 'val', ')', 'in', 'addatrs', ':', 'setattr', '(', 'samples', ',', 'p', ',', 'val', ')', 'return', 'samples']
Reads samples for the given parameter(s). The ``parameters`` can be the name of any dataset in ``samples_group``, a virtual field or method of ``FieldArray`` (as long as the file contains the necessary fields to derive the virtual field or method), and/or any numpy function of these. The ``parameters`` are parsed to figure out what datasets are needed. Only those datasets will be loaded, and will be the base-level fields of the returned ``FieldArray``. The ``static_params`` are also added as attributes of the returned ``FieldArray``. Parameters ----------- fp : InferenceFile An open file handler to read the samples from. parameters : (list of) strings The parameter(s) to retrieve. array_class : FieldArray-like class, optional The type of array to return. The class must have ``from_kwargs`` and ``parse_parameters`` methods. If None, will return a ``FieldArray``. \**kwargs : All other keyword arguments are passed to ``read_raw_samples``. Returns ------- FieldArray : The samples as a ``FieldArray``.
['Reads', 'samples', 'for', 'the', 'given', 'parameter', '(', 's', ')', '.']
train
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/io/base_hdf.py#L141-L188
4,143
danielfrg/word2vec
word2vec/wordclusters.py
WordClusters.get_cluster
def get_cluster(self, word): """ Returns the cluster number for a word in the vocabulary """ idx = self.ix(word) return self.clusters[idx]
python
def get_cluster(self, word): """ Returns the cluster number for a word in the vocabulary """ idx = self.ix(word) return self.clusters[idx]
['def', 'get_cluster', '(', 'self', ',', 'word', ')', ':', 'idx', '=', 'self', '.', 'ix', '(', 'word', ')', 'return', 'self', '.', 'clusters', '[', 'idx', ']']
Returns the cluster number for a word in the vocabulary
['Returns', 'the', 'cluster', 'number', 'for', 'a', 'word', 'in', 'the', 'vocabulary']
train
https://github.com/danielfrg/word2vec/blob/762200acec2941a030abed69e946838af35eb2ae/word2vec/wordclusters.py#L23-L28
4,144
adrn/gala
gala/dynamics/core.py
PhaseSpacePosition.angular_momentum
def angular_momentum(self): r""" Compute the angular momentum for the phase-space positions contained in this object:: .. math:: \boldsymbol{{L}} = \boldsymbol{{q}} \times \boldsymbol{{p}} See :ref:`shape-conventions` for more information about the shapes of input and output objects. Returns ------- L : :class:`~astropy.units.Quantity` Array of angular momentum vectors. Examples -------- >>> import numpy as np >>> import astropy.units as u >>> pos = np.array([1., 0, 0]) * u.au >>> vel = np.array([0, 2*np.pi, 0]) * u.au/u.yr >>> w = PhaseSpacePosition(pos, vel) >>> w.angular_momentum() # doctest: +FLOAT_CMP <Quantity [0. ,0. ,6.28318531] AU2 / yr> """ cart = self.represent_as(coord.CartesianRepresentation) return cart.pos.cross(cart.vel).xyz
python
def angular_momentum(self): r""" Compute the angular momentum for the phase-space positions contained in this object:: .. math:: \boldsymbol{{L}} = \boldsymbol{{q}} \times \boldsymbol{{p}} See :ref:`shape-conventions` for more information about the shapes of input and output objects. Returns ------- L : :class:`~astropy.units.Quantity` Array of angular momentum vectors. Examples -------- >>> import numpy as np >>> import astropy.units as u >>> pos = np.array([1., 0, 0]) * u.au >>> vel = np.array([0, 2*np.pi, 0]) * u.au/u.yr >>> w = PhaseSpacePosition(pos, vel) >>> w.angular_momentum() # doctest: +FLOAT_CMP <Quantity [0. ,0. ,6.28318531] AU2 / yr> """ cart = self.represent_as(coord.CartesianRepresentation) return cart.pos.cross(cart.vel).xyz
['def', 'angular_momentum', '(', 'self', ')', ':', 'cart', '=', 'self', '.', 'represent_as', '(', 'coord', '.', 'CartesianRepresentation', ')', 'return', 'cart', '.', 'pos', '.', 'cross', '(', 'cart', '.', 'vel', ')', '.', 'xyz']
r""" Compute the angular momentum for the phase-space positions contained in this object:: .. math:: \boldsymbol{{L}} = \boldsymbol{{q}} \times \boldsymbol{{p}} See :ref:`shape-conventions` for more information about the shapes of input and output objects. Returns ------- L : :class:`~astropy.units.Quantity` Array of angular momentum vectors. Examples -------- >>> import numpy as np >>> import astropy.units as u >>> pos = np.array([1., 0, 0]) * u.au >>> vel = np.array([0, 2*np.pi, 0]) * u.au/u.yr >>> w = PhaseSpacePosition(pos, vel) >>> w.angular_momentum() # doctest: +FLOAT_CMP <Quantity [0. ,0. ,6.28318531] AU2 / yr>
['r', 'Compute', 'the', 'angular', 'momentum', 'for', 'the', 'phase', '-', 'space', 'positions', 'contained', 'in', 'this', 'object', '::']
train
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/dynamics/core.py#L689-L718
4,145
projectatomic/atomic-reactor
docs/manpage/generate_manpage.py
ManPageFormatter.create_subcommand_synopsis
def create_subcommand_synopsis(self, parser): """ show usage with description for commands """ self.add_usage(parser.usage, parser._get_positional_actions(), None, prefix='') usage = self._format_usage(parser.usage, parser._get_positional_actions(), None, '') return self._bold(usage)
python
def create_subcommand_synopsis(self, parser): """ show usage with description for commands """ self.add_usage(parser.usage, parser._get_positional_actions(), None, prefix='') usage = self._format_usage(parser.usage, parser._get_positional_actions(), None, '') return self._bold(usage)
['def', 'create_subcommand_synopsis', '(', 'self', ',', 'parser', ')', ':', 'self', '.', 'add_usage', '(', 'parser', '.', 'usage', ',', 'parser', '.', '_get_positional_actions', '(', ')', ',', 'None', ',', 'prefix', '=', "''", ')', 'usage', '=', 'self', '.', '_format_usage', '(', 'parser', '.', 'usage', ',', 'parser', '.', '_get_positional_actions', '(', ')', ',', 'None', ',', "''", ')', 'return', 'self', '.', '_bold', '(', 'usage', ')']
show usage with description for commands
['show', 'usage', 'with', 'description', 'for', 'commands']
train
https://github.com/projectatomic/atomic-reactor/blob/fd31c01b964097210bf169960d051e5f04019a80/docs/manpage/generate_manpage.py#L89-L95
4,146
tanghaibao/jcvi
jcvi/formats/coords.py
blast
def blast(args): """ %prog blast <deltafile|coordsfile> Covert delta or coordsfile to BLAST tabular output. """ p = OptionParser(blast.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) deltafile, = args blastfile = deltafile.rsplit(".", 1)[0] + ".blast" if need_update(deltafile, blastfile): coords = Coords(deltafile) fw = open(blastfile, "w") for c in coords: print(c.blastline, file=fw)
python
def blast(args): """ %prog blast <deltafile|coordsfile> Covert delta or coordsfile to BLAST tabular output. """ p = OptionParser(blast.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) deltafile, = args blastfile = deltafile.rsplit(".", 1)[0] + ".blast" if need_update(deltafile, blastfile): coords = Coords(deltafile) fw = open(blastfile, "w") for c in coords: print(c.blastline, file=fw)
['def', 'blast', '(', 'args', ')', ':', 'p', '=', 'OptionParser', '(', 'blast', '.', '__doc__', ')', 'opts', ',', 'args', '=', 'p', '.', 'parse_args', '(', 'args', ')', 'if', 'len', '(', 'args', ')', '!=', '1', ':', 'sys', '.', 'exit', '(', 'not', 'p', '.', 'print_help', '(', ')', ')', 'deltafile', ',', '=', 'args', 'blastfile', '=', 'deltafile', '.', 'rsplit', '(', '"."', ',', '1', ')', '[', '0', ']', '+', '".blast"', 'if', 'need_update', '(', 'deltafile', ',', 'blastfile', ')', ':', 'coords', '=', 'Coords', '(', 'deltafile', ')', 'fw', '=', 'open', '(', 'blastfile', ',', '"w"', ')', 'for', 'c', 'in', 'coords', ':', 'print', '(', 'c', '.', 'blastline', ',', 'file', '=', 'fw', ')']
%prog blast <deltafile|coordsfile> Covert delta or coordsfile to BLAST tabular output.
['%prog', 'blast', '<deltafile|coordsfile', '>']
train
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/coords.py#L294-L313
4,147
spyder-ide/spyder
spyder/plugins/ipythonconsole/plugin.py
IPythonConsole.interpreter_versions
def interpreter_versions(self): """Python and IPython versions used by clients""" if CONF.get('main_interpreter', 'default'): from IPython.core import release versions = dict( python_version = sys.version.split("\n")[0].strip(), ipython_version = release.version ) else: import subprocess versions = {} pyexec = CONF.get('main_interpreter', 'executable') py_cmd = "%s -c 'import sys; print(sys.version.split(\"\\n\")[0])'" % \ pyexec ipy_cmd = "%s -c 'import IPython.core.release as r; print(r.version)'" \ % pyexec for cmd in [py_cmd, ipy_cmd]: try: proc = programs.run_shell_command(cmd) output, _err = proc.communicate() except subprocess.CalledProcessError: output = '' output = output.decode().split('\n')[0].strip() if 'IPython' in cmd: versions['ipython_version'] = output else: versions['python_version'] = output return versions
python
def interpreter_versions(self): """Python and IPython versions used by clients""" if CONF.get('main_interpreter', 'default'): from IPython.core import release versions = dict( python_version = sys.version.split("\n")[0].strip(), ipython_version = release.version ) else: import subprocess versions = {} pyexec = CONF.get('main_interpreter', 'executable') py_cmd = "%s -c 'import sys; print(sys.version.split(\"\\n\")[0])'" % \ pyexec ipy_cmd = "%s -c 'import IPython.core.release as r; print(r.version)'" \ % pyexec for cmd in [py_cmd, ipy_cmd]: try: proc = programs.run_shell_command(cmd) output, _err = proc.communicate() except subprocess.CalledProcessError: output = '' output = output.decode().split('\n')[0].strip() if 'IPython' in cmd: versions['ipython_version'] = output else: versions['python_version'] = output return versions
['def', 'interpreter_versions', '(', 'self', ')', ':', 'if', 'CONF', '.', 'get', '(', "'main_interpreter'", ',', "'default'", ')', ':', 'from', 'IPython', '.', 'core', 'import', 'release', 'versions', '=', 'dict', '(', 'python_version', '=', 'sys', '.', 'version', '.', 'split', '(', '"\\n"', ')', '[', '0', ']', '.', 'strip', '(', ')', ',', 'ipython_version', '=', 'release', '.', 'version', ')', 'else', ':', 'import', 'subprocess', 'versions', '=', '{', '}', 'pyexec', '=', 'CONF', '.', 'get', '(', "'main_interpreter'", ',', "'executable'", ')', 'py_cmd', '=', '"%s -c \'import sys; print(sys.version.split(\\"\\\\n\\")[0])\'"', '%', 'pyexec', 'ipy_cmd', '=', '"%s -c \'import IPython.core.release as r; print(r.version)\'"', '%', 'pyexec', 'for', 'cmd', 'in', '[', 'py_cmd', ',', 'ipy_cmd', ']', ':', 'try', ':', 'proc', '=', 'programs', '.', 'run_shell_command', '(', 'cmd', ')', 'output', ',', '_err', '=', 'proc', '.', 'communicate', '(', ')', 'except', 'subprocess', '.', 'CalledProcessError', ':', 'output', '=', "''", 'output', '=', 'output', '.', 'decode', '(', ')', '.', 'split', '(', "'\\n'", ')', '[', '0', ']', '.', 'strip', '(', ')', 'if', "'IPython'", 'in', 'cmd', ':', 'versions', '[', "'ipython_version'", ']', '=', 'output', 'else', ':', 'versions', '[', "'python_version'", ']', '=', 'output', 'return', 'versions']
Python and IPython versions used by clients
['Python', 'and', 'IPython', 'versions', 'used', 'by', 'clients']
train
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/ipythonconsole/plugin.py#L824-L852
4,148
JoeVirtual/KonFoo
konfoo/utils.py
d3flare_json
def d3flare_json(metadata, file=None, **options): """ Converts the *metadata* dictionary of a container or field into a ``flare.json`` formatted string or formatted stream written to the *file* The ``flare.json`` format is defined by the `d3.js <https://d3js.org/>`_ graphic library. The ``flare.json`` format looks like this: .. code-block:: JSON { "class": "class of the field or container", "name": "name of the field or container", "size": "bit size of the field", "value": "value of the field", "children": [] } :param dict metadata: metadata generated from a :class:`Structure`, :class:`Sequence`, :class:`Array` or any :class:`Field` instance. :param file file: file-like object. """ def convert(root): dct = OrderedDict() item_type = root.get('type') dct['class'] = root.get('class') dct['name'] = root.get('name') if item_type is ItemClass.Field.name: dct['size'] = root.get('size') dct['value'] = root.get('value') children = root.get('member') if children: # Any containable class with children dct['children'] = list() if item_type is ItemClass.Pointer.name: # Create pointer address field as child field = OrderedDict() field['class'] = dct['class'] field['name'] = '*' + dct['name'] field['size'] = root.get('size') field['value'] = root.get('value') dct['children'].append(field) for child in map(convert, children): # Recursive function call map(fnc, args). dct['children'].append(child) elif item_type is ItemClass.Pointer.name: # Null pointer (None pointer) dct['size'] = root.get('size') dct['value'] = root.get('value') return dct options['indent'] = options.get('indent', 2) if file: return json.dump(convert(metadata), file, **options) else: return json.dumps(convert(metadata), **options)
python
def d3flare_json(metadata, file=None, **options): """ Converts the *metadata* dictionary of a container or field into a ``flare.json`` formatted string or formatted stream written to the *file* The ``flare.json`` format is defined by the `d3.js <https://d3js.org/>`_ graphic library. The ``flare.json`` format looks like this: .. code-block:: JSON { "class": "class of the field or container", "name": "name of the field or container", "size": "bit size of the field", "value": "value of the field", "children": [] } :param dict metadata: metadata generated from a :class:`Structure`, :class:`Sequence`, :class:`Array` or any :class:`Field` instance. :param file file: file-like object. """ def convert(root): dct = OrderedDict() item_type = root.get('type') dct['class'] = root.get('class') dct['name'] = root.get('name') if item_type is ItemClass.Field.name: dct['size'] = root.get('size') dct['value'] = root.get('value') children = root.get('member') if children: # Any containable class with children dct['children'] = list() if item_type is ItemClass.Pointer.name: # Create pointer address field as child field = OrderedDict() field['class'] = dct['class'] field['name'] = '*' + dct['name'] field['size'] = root.get('size') field['value'] = root.get('value') dct['children'].append(field) for child in map(convert, children): # Recursive function call map(fnc, args). dct['children'].append(child) elif item_type is ItemClass.Pointer.name: # Null pointer (None pointer) dct['size'] = root.get('size') dct['value'] = root.get('value') return dct options['indent'] = options.get('indent', 2) if file: return json.dump(convert(metadata), file, **options) else: return json.dumps(convert(metadata), **options)
['def', 'd3flare_json', '(', 'metadata', ',', 'file', '=', 'None', ',', '*', '*', 'options', ')', ':', 'def', 'convert', '(', 'root', ')', ':', 'dct', '=', 'OrderedDict', '(', ')', 'item_type', '=', 'root', '.', 'get', '(', "'type'", ')', 'dct', '[', "'class'", ']', '=', 'root', '.', 'get', '(', "'class'", ')', 'dct', '[', "'name'", ']', '=', 'root', '.', 'get', '(', "'name'", ')', 'if', 'item_type', 'is', 'ItemClass', '.', 'Field', '.', 'name', ':', 'dct', '[', "'size'", ']', '=', 'root', '.', 'get', '(', "'size'", ')', 'dct', '[', "'value'", ']', '=', 'root', '.', 'get', '(', "'value'", ')', 'children', '=', 'root', '.', 'get', '(', "'member'", ')', 'if', 'children', ':', '# Any containable class with children', 'dct', '[', "'children'", ']', '=', 'list', '(', ')', 'if', 'item_type', 'is', 'ItemClass', '.', 'Pointer', '.', 'name', ':', '# Create pointer address field as child', 'field', '=', 'OrderedDict', '(', ')', 'field', '[', "'class'", ']', '=', 'dct', '[', "'class'", ']', 'field', '[', "'name'", ']', '=', "'*'", '+', 'dct', '[', "'name'", ']', 'field', '[', "'size'", ']', '=', 'root', '.', 'get', '(', "'size'", ')', 'field', '[', "'value'", ']', '=', 'root', '.', 'get', '(', "'value'", ')', 'dct', '[', "'children'", ']', '.', 'append', '(', 'field', ')', 'for', 'child', 'in', 'map', '(', 'convert', ',', 'children', ')', ':', '# Recursive function call map(fnc, args).', 'dct', '[', "'children'", ']', '.', 'append', '(', 'child', ')', 'elif', 'item_type', 'is', 'ItemClass', '.', 'Pointer', '.', 'name', ':', '# Null pointer (None pointer)', 'dct', '[', "'size'", ']', '=', 'root', '.', 'get', '(', "'size'", ')', 'dct', '[', "'value'", ']', '=', 'root', '.', 'get', '(', "'value'", ')', 'return', 'dct', 'options', '[', "'indent'", ']', '=', 'options', '.', 'get', '(', "'indent'", ',', '2', ')', 'if', 'file', ':', 'return', 'json', '.', 'dump', '(', 'convert', '(', 'metadata', ')', ',', 'file', ',', '*', '*', 'options', ')', 'else', ':', 'return', 'json', '.', 'dumps', '(', 'convert', '(', 'metadata', ')', ',', '*', '*', 'options', ')']
Converts the *metadata* dictionary of a container or field into a ``flare.json`` formatted string or formatted stream written to the *file* The ``flare.json`` format is defined by the `d3.js <https://d3js.org/>`_ graphic library. The ``flare.json`` format looks like this: .. code-block:: JSON { "class": "class of the field or container", "name": "name of the field or container", "size": "bit size of the field", "value": "value of the field", "children": [] } :param dict metadata: metadata generated from a :class:`Structure`, :class:`Sequence`, :class:`Array` or any :class:`Field` instance. :param file file: file-like object.
['Converts', 'the', '*', 'metadata', '*', 'dictionary', 'of', 'a', 'container', 'or', 'field', 'into', 'a', 'flare', '.', 'json', 'formatted', 'string', 'or', 'formatted', 'stream', 'written', 'to', 'the', '*', 'file', '*']
train
https://github.com/JoeVirtual/KonFoo/blob/0c62ef5c2bed4deaf908b34082e4de2544532fdc/konfoo/utils.py#L168-L228
4,149
santoshphilip/eppy
eppy/geometry/height_surface.py
height
def height(poly): """height""" num = len(poly) hgt = 0.0 for i in range(num): hgt += (poly[i][2]) return hgt/num
python
def height(poly): """height""" num = len(poly) hgt = 0.0 for i in range(num): hgt += (poly[i][2]) return hgt/num
['def', 'height', '(', 'poly', ')', ':', 'num', '=', 'len', '(', 'poly', ')', 'hgt', '=', '0.0', 'for', 'i', 'in', 'range', '(', 'num', ')', ':', 'hgt', '+=', '(', 'poly', '[', 'i', ']', '[', '2', ']', ')', 'return', 'hgt', '/', 'num']
height
['height']
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/geometry/height_surface.py#L40-L46
4,150
RudolfCardinal/pythonlib
cardinal_pythonlib/exceptions.py
recover_info_from_exception
def recover_info_from_exception(err: Exception) -> Dict: """ Retrives the information added to an exception by :func:`add_info_to_exception`. """ if len(err.args) < 1: return {} info = err.args[-1] if not isinstance(info, dict): return {} return info
python
def recover_info_from_exception(err: Exception) -> Dict: """ Retrives the information added to an exception by :func:`add_info_to_exception`. """ if len(err.args) < 1: return {} info = err.args[-1] if not isinstance(info, dict): return {} return info
['def', 'recover_info_from_exception', '(', 'err', ':', 'Exception', ')', '->', 'Dict', ':', 'if', 'len', '(', 'err', '.', 'args', ')', '<', '1', ':', 'return', '{', '}', 'info', '=', 'err', '.', 'args', '[', '-', '1', ']', 'if', 'not', 'isinstance', '(', 'info', ',', 'dict', ')', ':', 'return', '{', '}', 'return', 'info']
Retrives the information added to an exception by :func:`add_info_to_exception`.
['Retrives', 'the', 'information', 'added', 'to', 'an', 'exception', 'by', ':', 'func', ':', 'add_info_to_exception', '.']
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/exceptions.py#L58-L68
4,151
spulec/moto
moto/core/models.py
BaseBackend.url_paths
def url_paths(self): """ A dictionary of the paths of the urls to be mocked with this service and the handlers that should be called in their place """ unformatted_paths = self._url_module.url_paths paths = {} for unformatted_path, handler in unformatted_paths.items(): path = unformatted_path.format("") paths[path] = handler return paths
python
def url_paths(self): """ A dictionary of the paths of the urls to be mocked with this service and the handlers that should be called in their place """ unformatted_paths = self._url_module.url_paths paths = {} for unformatted_path, handler in unformatted_paths.items(): path = unformatted_path.format("") paths[path] = handler return paths
['def', 'url_paths', '(', 'self', ')', ':', 'unformatted_paths', '=', 'self', '.', '_url_module', '.', 'url_paths', 'paths', '=', '{', '}', 'for', 'unformatted_path', ',', 'handler', 'in', 'unformatted_paths', '.', 'items', '(', ')', ':', 'path', '=', 'unformatted_path', '.', 'format', '(', '""', ')', 'paths', '[', 'path', ']', '=', 'handler', 'return', 'paths']
A dictionary of the paths of the urls to be mocked with this service and the handlers that should be called in their place
['A', 'dictionary', 'of', 'the', 'paths', 'of', 'the', 'urls', 'to', 'be', 'mocked', 'with', 'this', 'service', 'and', 'the', 'handlers', 'that', 'should', 'be', 'called', 'in', 'their', 'place']
train
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/core/models.py#L501-L513
4,152
eventbrite/pysoa
pysoa/common/transport/local.py
LocalClientTransport.send_request_message
def send_request_message(self, request_id, meta, body, _=None): """ Receives a request from the client and handles and dispatches in in-thread. `message_expiry_in_seconds` is not supported. Messages do not expire, as the server handles the request immediately in the same thread before this method returns. This method blocks until the server has completed handling the request. """ self._current_request = (request_id, meta, body) try: self.server.handle_next_request() finally: self._current_request = None
python
def send_request_message(self, request_id, meta, body, _=None): """ Receives a request from the client and handles and dispatches in in-thread. `message_expiry_in_seconds` is not supported. Messages do not expire, as the server handles the request immediately in the same thread before this method returns. This method blocks until the server has completed handling the request. """ self._current_request = (request_id, meta, body) try: self.server.handle_next_request() finally: self._current_request = None
['def', 'send_request_message', '(', 'self', ',', 'request_id', ',', 'meta', ',', 'body', ',', '_', '=', 'None', ')', ':', 'self', '.', '_current_request', '=', '(', 'request_id', ',', 'meta', ',', 'body', ')', 'try', ':', 'self', '.', 'server', '.', 'handle_next_request', '(', ')', 'finally', ':', 'self', '.', '_current_request', '=', 'None']
Receives a request from the client and handles and dispatches in in-thread. `message_expiry_in_seconds` is not supported. Messages do not expire, as the server handles the request immediately in the same thread before this method returns. This method blocks until the server has completed handling the request.
['Receives', 'a', 'request', 'from', 'the', 'client', 'and', 'handles', 'and', 'dispatches', 'in', 'in', '-', 'thread', '.', 'message_expiry_in_seconds', 'is', 'not', 'supported', '.', 'Messages', 'do', 'not', 'expire', 'as', 'the', 'server', 'handles', 'the', 'request', 'immediately', 'in', 'the', 'same', 'thread', 'before', 'this', 'method', 'returns', '.', 'This', 'method', 'blocks', 'until', 'the', 'server', 'has', 'completed', 'handling', 'the', 'request', '.']
train
https://github.com/eventbrite/pysoa/blob/9c052cae2397d13de3df8ae2c790846a70b53f18/pysoa/common/transport/local.py#L78-L88
4,153
monarch-initiative/dipper
dipper/models/Genotype.py
Genotype.addPartsToVSLC
def addPartsToVSLC( self, vslc_id, allele1_id, allele2_id, zygosity_id=None, allele1_rel=None, allele2_rel=None): """ Here we add the parts to the VSLC. While traditionally alleles (reference or variant loci) are traditionally added, you can add any node (such as sequence_alterations for unlocated variations) to a vslc if they are known to be paired. However, if a sequence_alteration's loci is unknown, it probably should be added directly to the GVC. :param vslc_id: :param allele1_id: :param allele2_id: :param zygosity_id: :param allele1_rel: :param allele2_rel: :return: """ # vslc has parts allele1/allele2 if allele1_id is not None: self.addParts(allele1_id, vslc_id, allele1_rel) if allele2_id is not None and allele2_id.strip() != '': self.addParts(allele2_id, vslc_id, allele2_rel) # figure out zygosity if it's not supplied if zygosity_id is None: if allele1_id == allele2_id: zygosity_id = self.globaltt['homozygous'] else: zygosity_id = self.globaltt['heterozygous'] if zygosity_id is not None: self.graph.addTriple(vslc_id, self.globaltt['has_zygosity'], zygosity_id) return
python
def addPartsToVSLC( self, vslc_id, allele1_id, allele2_id, zygosity_id=None, allele1_rel=None, allele2_rel=None): """ Here we add the parts to the VSLC. While traditionally alleles (reference or variant loci) are traditionally added, you can add any node (such as sequence_alterations for unlocated variations) to a vslc if they are known to be paired. However, if a sequence_alteration's loci is unknown, it probably should be added directly to the GVC. :param vslc_id: :param allele1_id: :param allele2_id: :param zygosity_id: :param allele1_rel: :param allele2_rel: :return: """ # vslc has parts allele1/allele2 if allele1_id is not None: self.addParts(allele1_id, vslc_id, allele1_rel) if allele2_id is not None and allele2_id.strip() != '': self.addParts(allele2_id, vslc_id, allele2_rel) # figure out zygosity if it's not supplied if zygosity_id is None: if allele1_id == allele2_id: zygosity_id = self.globaltt['homozygous'] else: zygosity_id = self.globaltt['heterozygous'] if zygosity_id is not None: self.graph.addTriple(vslc_id, self.globaltt['has_zygosity'], zygosity_id) return
['def', 'addPartsToVSLC', '(', 'self', ',', 'vslc_id', ',', 'allele1_id', ',', 'allele2_id', ',', 'zygosity_id', '=', 'None', ',', 'allele1_rel', '=', 'None', ',', 'allele2_rel', '=', 'None', ')', ':', '# vslc has parts allele1/allele2', 'if', 'allele1_id', 'is', 'not', 'None', ':', 'self', '.', 'addParts', '(', 'allele1_id', ',', 'vslc_id', ',', 'allele1_rel', ')', 'if', 'allele2_id', 'is', 'not', 'None', 'and', 'allele2_id', '.', 'strip', '(', ')', '!=', "''", ':', 'self', '.', 'addParts', '(', 'allele2_id', ',', 'vslc_id', ',', 'allele2_rel', ')', "# figure out zygosity if it's not supplied", 'if', 'zygosity_id', 'is', 'None', ':', 'if', 'allele1_id', '==', 'allele2_id', ':', 'zygosity_id', '=', 'self', '.', 'globaltt', '[', "'homozygous'", ']', 'else', ':', 'zygosity_id', '=', 'self', '.', 'globaltt', '[', "'heterozygous'", ']', 'if', 'zygosity_id', 'is', 'not', 'None', ':', 'self', '.', 'graph', '.', 'addTriple', '(', 'vslc_id', ',', 'self', '.', 'globaltt', '[', "'has_zygosity'", ']', ',', 'zygosity_id', ')', 'return']
Here we add the parts to the VSLC. While traditionally alleles (reference or variant loci) are traditionally added, you can add any node (such as sequence_alterations for unlocated variations) to a vslc if they are known to be paired. However, if a sequence_alteration's loci is unknown, it probably should be added directly to the GVC. :param vslc_id: :param allele1_id: :param allele2_id: :param zygosity_id: :param allele1_rel: :param allele2_rel: :return:
['Here', 'we', 'add', 'the', 'parts', 'to', 'the', 'VSLC', '.', 'While', 'traditionally', 'alleles', '(', 'reference', 'or', 'variant', 'loci', ')', 'are', 'traditionally', 'added', 'you', 'can', 'add', 'any', 'node', '(', 'such', 'as', 'sequence_alterations', 'for', 'unlocated', 'variations', ')', 'to', 'a', 'vslc', 'if', 'they', 'are', 'known', 'to', 'be', 'paired', '.', 'However', 'if', 'a', 'sequence_alteration', 's', 'loci', 'is', 'unknown', 'it', 'probably', 'should', 'be', 'added', 'directly', 'to', 'the', 'GVC', '.', ':', 'param', 'vslc_id', ':', ':', 'param', 'allele1_id', ':', ':', 'param', 'allele2_id', ':', ':', 'param', 'zygosity_id', ':', ':', 'param', 'allele1_rel', ':', ':', 'param', 'allele2_rel', ':', ':', 'return', ':']
train
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/Genotype.py#L205-L241
4,154
apache/spark
python/pyspark/sql/dataframe.py
_to_corrected_pandas_type
def _to_corrected_pandas_type(dt): """ When converting Spark SQL records to Pandas DataFrame, the inferred data type may be wrong. This method gets the corrected data type for Pandas if that type may be inferred uncorrectly. """ import numpy as np if type(dt) == ByteType: return np.int8 elif type(dt) == ShortType: return np.int16 elif type(dt) == IntegerType: return np.int32 elif type(dt) == FloatType: return np.float32 else: return None
python
def _to_corrected_pandas_type(dt): """ When converting Spark SQL records to Pandas DataFrame, the inferred data type may be wrong. This method gets the corrected data type for Pandas if that type may be inferred uncorrectly. """ import numpy as np if type(dt) == ByteType: return np.int8 elif type(dt) == ShortType: return np.int16 elif type(dt) == IntegerType: return np.int32 elif type(dt) == FloatType: return np.float32 else: return None
['def', '_to_corrected_pandas_type', '(', 'dt', ')', ':', 'import', 'numpy', 'as', 'np', 'if', 'type', '(', 'dt', ')', '==', 'ByteType', ':', 'return', 'np', '.', 'int8', 'elif', 'type', '(', 'dt', ')', '==', 'ShortType', ':', 'return', 'np', '.', 'int16', 'elif', 'type', '(', 'dt', ')', '==', 'IntegerType', ':', 'return', 'np', '.', 'int32', 'elif', 'type', '(', 'dt', ')', '==', 'FloatType', ':', 'return', 'np', '.', 'float32', 'else', ':', 'return', 'None']
When converting Spark SQL records to Pandas DataFrame, the inferred data type may be wrong. This method gets the corrected data type for Pandas if that type may be inferred uncorrectly.
['When', 'converting', 'Spark', 'SQL', 'records', 'to', 'Pandas', 'DataFrame', 'the', 'inferred', 'data', 'type', 'may', 'be', 'wrong', '.', 'This', 'method', 'gets', 'the', 'corrected', 'data', 'type', 'for', 'Pandas', 'if', 'that', 'type', 'may', 'be', 'inferred', 'uncorrectly', '.']
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L2239-L2254
4,155
newville/asteval
asteval/asteval.py
Interpreter.on_return
def on_return(self, node): # ('value',) """Return statement: look for None, return special sentinal.""" self.retval = self.run(node.value) if self.retval is None: self.retval = ReturnedNone return
python
def on_return(self, node): # ('value',) """Return statement: look for None, return special sentinal.""" self.retval = self.run(node.value) if self.retval is None: self.retval = ReturnedNone return
['def', 'on_return', '(', 'self', ',', 'node', ')', ':', "# ('value',)", 'self', '.', 'retval', '=', 'self', '.', 'run', '(', 'node', '.', 'value', ')', 'if', 'self', '.', 'retval', 'is', 'None', ':', 'self', '.', 'retval', '=', 'ReturnedNone', 'return']
Return statement: look for None, return special sentinal.
['Return', 'statement', ':', 'look', 'for', 'None', 'return', 'special', 'sentinal', '.']
train
https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L367-L372
4,156
ewels/MultiQC
multiqc/modules/qualimap/QM_BamQC.py
parse_coverage
def parse_coverage(self, f): """ Parse the contents of the Qualimap BamQC Coverage Histogram file """ # Get the sample name from the parent parent directory # Typical path: <sample name>/raw_data_qualimapReport/coverage_histogram.txt s_name = self.get_s_name(f) d = dict() for l in f['f']: if l.startswith('#'): continue coverage, count = l.split(None, 1) coverage = int(round(float(coverage))) count = float(count) d[coverage] = count if len(d) == 0: log.debug("Couldn't parse contents of coverage histogram file {}".format(f['fn'])) return None # Find median without importing anything to do it for us num_counts = sum(d.values()) cum_counts = 0 median_coverage = None for thiscov, thiscount in d.items(): cum_counts += thiscount if cum_counts >= num_counts/2: median_coverage = thiscov break self.general_stats_data[s_name]['median_coverage'] = median_coverage # Save results if s_name in self.qualimap_bamqc_coverage_hist: log.debug("Duplicate coverage histogram sample name found! Overwriting: {}".format(s_name)) self.qualimap_bamqc_coverage_hist[s_name] = d self.add_data_source(f, s_name=s_name, section='coverage_histogram')
python
def parse_coverage(self, f): """ Parse the contents of the Qualimap BamQC Coverage Histogram file """ # Get the sample name from the parent parent directory # Typical path: <sample name>/raw_data_qualimapReport/coverage_histogram.txt s_name = self.get_s_name(f) d = dict() for l in f['f']: if l.startswith('#'): continue coverage, count = l.split(None, 1) coverage = int(round(float(coverage))) count = float(count) d[coverage] = count if len(d) == 0: log.debug("Couldn't parse contents of coverage histogram file {}".format(f['fn'])) return None # Find median without importing anything to do it for us num_counts = sum(d.values()) cum_counts = 0 median_coverage = None for thiscov, thiscount in d.items(): cum_counts += thiscount if cum_counts >= num_counts/2: median_coverage = thiscov break self.general_stats_data[s_name]['median_coverage'] = median_coverage # Save results if s_name in self.qualimap_bamqc_coverage_hist: log.debug("Duplicate coverage histogram sample name found! Overwriting: {}".format(s_name)) self.qualimap_bamqc_coverage_hist[s_name] = d self.add_data_source(f, s_name=s_name, section='coverage_histogram')
['def', 'parse_coverage', '(', 'self', ',', 'f', ')', ':', '# Get the sample name from the parent parent directory', '# Typical path: <sample name>/raw_data_qualimapReport/coverage_histogram.txt', 's_name', '=', 'self', '.', 'get_s_name', '(', 'f', ')', 'd', '=', 'dict', '(', ')', 'for', 'l', 'in', 'f', '[', "'f'", ']', ':', 'if', 'l', '.', 'startswith', '(', "'#'", ')', ':', 'continue', 'coverage', ',', 'count', '=', 'l', '.', 'split', '(', 'None', ',', '1', ')', 'coverage', '=', 'int', '(', 'round', '(', 'float', '(', 'coverage', ')', ')', ')', 'count', '=', 'float', '(', 'count', ')', 'd', '[', 'coverage', ']', '=', 'count', 'if', 'len', '(', 'd', ')', '==', '0', ':', 'log', '.', 'debug', '(', '"Couldn\'t parse contents of coverage histogram file {}"', '.', 'format', '(', 'f', '[', "'fn'", ']', ')', ')', 'return', 'None', '# Find median without importing anything to do it for us', 'num_counts', '=', 'sum', '(', 'd', '.', 'values', '(', ')', ')', 'cum_counts', '=', '0', 'median_coverage', '=', 'None', 'for', 'thiscov', ',', 'thiscount', 'in', 'd', '.', 'items', '(', ')', ':', 'cum_counts', '+=', 'thiscount', 'if', 'cum_counts', '>=', 'num_counts', '/', '2', ':', 'median_coverage', '=', 'thiscov', 'break', 'self', '.', 'general_stats_data', '[', 's_name', ']', '[', "'median_coverage'", ']', '=', 'median_coverage', '# Save results', 'if', 's_name', 'in', 'self', '.', 'qualimap_bamqc_coverage_hist', ':', 'log', '.', 'debug', '(', '"Duplicate coverage histogram sample name found! Overwriting: {}"', '.', 'format', '(', 's_name', ')', ')', 'self', '.', 'qualimap_bamqc_coverage_hist', '[', 's_name', ']', '=', 'd', 'self', '.', 'add_data_source', '(', 'f', ',', 's_name', '=', 's_name', ',', 'section', '=', "'coverage_histogram'", ')']
Parse the contents of the Qualimap BamQC Coverage Histogram file
['Parse', 'the', 'contents', 'of', 'the', 'Qualimap', 'BamQC', 'Coverage', 'Histogram', 'file']
train
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/qualimap/QM_BamQC.py#L122-L156
4,157
apache/spark
python/pyspark/rdd.py
RDD.mapPartitions
def mapPartitions(self, f, preservesPartitioning=False): """ Return a new RDD by applying a function to each partition of this RDD. >>> rdd = sc.parallelize([1, 2, 3, 4], 2) >>> def f(iterator): yield sum(iterator) >>> rdd.mapPartitions(f).collect() [3, 7] """ def func(s, iterator): return f(iterator) return self.mapPartitionsWithIndex(func, preservesPartitioning)
python
def mapPartitions(self, f, preservesPartitioning=False): """ Return a new RDD by applying a function to each partition of this RDD. >>> rdd = sc.parallelize([1, 2, 3, 4], 2) >>> def f(iterator): yield sum(iterator) >>> rdd.mapPartitions(f).collect() [3, 7] """ def func(s, iterator): return f(iterator) return self.mapPartitionsWithIndex(func, preservesPartitioning)
['def', 'mapPartitions', '(', 'self', ',', 'f', ',', 'preservesPartitioning', '=', 'False', ')', ':', 'def', 'func', '(', 's', ',', 'iterator', ')', ':', 'return', 'f', '(', 'iterator', ')', 'return', 'self', '.', 'mapPartitionsWithIndex', '(', 'func', ',', 'preservesPartitioning', ')']
Return a new RDD by applying a function to each partition of this RDD. >>> rdd = sc.parallelize([1, 2, 3, 4], 2) >>> def f(iterator): yield sum(iterator) >>> rdd.mapPartitions(f).collect() [3, 7]
['Return', 'a', 'new', 'RDD', 'by', 'applying', 'a', 'function', 'to', 'each', 'partition', 'of', 'this', 'RDD', '.']
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L344-L355
4,158
langloisjp/pysvclog
servicelog.py
UDPLogger.send
def send(self, jsonstr): """ Send jsonstr to the UDP collector >>> logger = UDPLogger() >>> logger.send('{"key": "value"}') """ udp_sock = socket(AF_INET, SOCK_DGRAM) udp_sock.sendto(jsonstr.encode('utf-8'), self.addr)
python
def send(self, jsonstr): """ Send jsonstr to the UDP collector >>> logger = UDPLogger() >>> logger.send('{"key": "value"}') """ udp_sock = socket(AF_INET, SOCK_DGRAM) udp_sock.sendto(jsonstr.encode('utf-8'), self.addr)
['def', 'send', '(', 'self', ',', 'jsonstr', ')', ':', 'udp_sock', '=', 'socket', '(', 'AF_INET', ',', 'SOCK_DGRAM', ')', 'udp_sock', '.', 'sendto', '(', 'jsonstr', '.', 'encode', '(', "'utf-8'", ')', ',', 'self', '.', 'addr', ')']
Send jsonstr to the UDP collector >>> logger = UDPLogger() >>> logger.send('{"key": "value"}')
['Send', 'jsonstr', 'to', 'the', 'UDP', 'collector']
train
https://github.com/langloisjp/pysvclog/blob/ab429bb12e13dca63ffce082e633d8879b6e3854/servicelog.py#L66-L74
4,159
fabaff/python-glances-api
glances_api/__init__.py
Glances.get_data
async def get_data(self): """Retrieve the data.""" url = '{}/{}'.format(self.url, 'all') try: with async_timeout.timeout(5, loop=self._loop): if self.password is None: response = await self._session.get(url) else: auth = aiohttp.BasicAuth(self.username, self.password) response = await self._session.get(url, auth=auth) _LOGGER.debug("Response from Glances API: %s", response.status) print(response.status) print(response.text) self.data = await response.json() _LOGGER.debug(self.data) except (asyncio.TimeoutError, aiohttp.ClientError): _LOGGER.error("Can not load data from Glances API") raise exceptions.GlancesApiConnectionError()
python
async def get_data(self): """Retrieve the data.""" url = '{}/{}'.format(self.url, 'all') try: with async_timeout.timeout(5, loop=self._loop): if self.password is None: response = await self._session.get(url) else: auth = aiohttp.BasicAuth(self.username, self.password) response = await self._session.get(url, auth=auth) _LOGGER.debug("Response from Glances API: %s", response.status) print(response.status) print(response.text) self.data = await response.json() _LOGGER.debug(self.data) except (asyncio.TimeoutError, aiohttp.ClientError): _LOGGER.error("Can not load data from Glances API") raise exceptions.GlancesApiConnectionError()
['async', 'def', 'get_data', '(', 'self', ')', ':', 'url', '=', "'{}/{}'", '.', 'format', '(', 'self', '.', 'url', ',', "'all'", ')', 'try', ':', 'with', 'async_timeout', '.', 'timeout', '(', '5', ',', 'loop', '=', 'self', '.', '_loop', ')', ':', 'if', 'self', '.', 'password', 'is', 'None', ':', 'response', '=', 'await', 'self', '.', '_session', '.', 'get', '(', 'url', ')', 'else', ':', 'auth', '=', 'aiohttp', '.', 'BasicAuth', '(', 'self', '.', 'username', ',', 'self', '.', 'password', ')', 'response', '=', 'await', 'self', '.', '_session', '.', 'get', '(', 'url', ',', 'auth', '=', 'auth', ')', '_LOGGER', '.', 'debug', '(', '"Response from Glances API: %s"', ',', 'response', '.', 'status', ')', 'print', '(', 'response', '.', 'status', ')', 'print', '(', 'response', '.', 'text', ')', 'self', '.', 'data', '=', 'await', 'response', '.', 'json', '(', ')', '_LOGGER', '.', 'debug', '(', 'self', '.', 'data', ')', 'except', '(', 'asyncio', '.', 'TimeoutError', ',', 'aiohttp', '.', 'ClientError', ')', ':', '_LOGGER', '.', 'error', '(', '"Can not load data from Glances API"', ')', 'raise', 'exceptions', '.', 'GlancesApiConnectionError', '(', ')']
Retrieve the data.
['Retrieve', 'the', 'data', '.']
train
https://github.com/fabaff/python-glances-api/blob/7ed8a688617d0d0b1c8d5b107559fc4afcdbaaac/glances_api/__init__.py#L31-L50
4,160
ramses-tech/ramses
ramses/utils.py
get_route_name
def get_route_name(resource_uri): """ Get route name from RAML resource URI. :param resource_uri: String representing RAML resource URI. :returns string: String with route name, which is :resource_uri: stripped of non-word characters. """ resource_uri = resource_uri.strip('/') resource_uri = re.sub('\W', '', resource_uri) return resource_uri
python
def get_route_name(resource_uri): """ Get route name from RAML resource URI. :param resource_uri: String representing RAML resource URI. :returns string: String with route name, which is :resource_uri: stripped of non-word characters. """ resource_uri = resource_uri.strip('/') resource_uri = re.sub('\W', '', resource_uri) return resource_uri
['def', 'get_route_name', '(', 'resource_uri', ')', ':', 'resource_uri', '=', 'resource_uri', '.', 'strip', '(', "'/'", ')', 'resource_uri', '=', 're', '.', 'sub', '(', "'\\W'", ',', "''", ',', 'resource_uri', ')', 'return', 'resource_uri']
Get route name from RAML resource URI. :param resource_uri: String representing RAML resource URI. :returns string: String with route name, which is :resource_uri: stripped of non-word characters.
['Get', 'route', 'name', 'from', 'RAML', 'resource', 'URI', '.']
train
https://github.com/ramses-tech/ramses/blob/ea2e1e896325b7256cdf5902309e05fd98e0c14c/ramses/utils.py#L345-L354
4,161
vimalloc/flask-jwt-simple
flask_jwt_simple/jwt_manager.py
JWTManager._set_default_configuration_options
def _set_default_configuration_options(app): """ Sets the default configuration options used by this extension """ # Options for JWTs when the TOKEN_LOCATION is headers app.config.setdefault('JWT_HEADER_NAME', 'Authorization') app.config.setdefault('JWT_HEADER_TYPE', 'Bearer') # How long an a token created with 'create_jwt' will last before # it expires (when using the default jwt_data_callback function). app.config.setdefault('JWT_EXPIRES', datetime.timedelta(hours=1)) # What algorithm to use to sign the token. See here for a list of options: # https://github.com/jpadilla/pyjwt/blob/master/jwt/api_jwt.py app.config.setdefault('JWT_ALGORITHM', 'HS256') # Key that acts as the identity for the JWT app.config.setdefault('JWT_IDENTITY_CLAIM', 'sub') # Expected value of the audience claim app.config.setdefault('JWT_DECODE_AUDIENCE', None) # Secret key to sign JWTs with. Only used if a symmetric algorithm is # used (such as the HS* algorithms). app.config.setdefault('JWT_SECRET_KEY', None) # Keys to sign JWTs with when use when using an asymmetric # (public/private key) algorithms, such as RS* or EC* app.config.setdefault('JWT_PRIVATE_KEY', None) app.config.setdefault('JWT_PUBLIC_KEY', None)
python
def _set_default_configuration_options(app): """ Sets the default configuration options used by this extension """ # Options for JWTs when the TOKEN_LOCATION is headers app.config.setdefault('JWT_HEADER_NAME', 'Authorization') app.config.setdefault('JWT_HEADER_TYPE', 'Bearer') # How long an a token created with 'create_jwt' will last before # it expires (when using the default jwt_data_callback function). app.config.setdefault('JWT_EXPIRES', datetime.timedelta(hours=1)) # What algorithm to use to sign the token. See here for a list of options: # https://github.com/jpadilla/pyjwt/blob/master/jwt/api_jwt.py app.config.setdefault('JWT_ALGORITHM', 'HS256') # Key that acts as the identity for the JWT app.config.setdefault('JWT_IDENTITY_CLAIM', 'sub') # Expected value of the audience claim app.config.setdefault('JWT_DECODE_AUDIENCE', None) # Secret key to sign JWTs with. Only used if a symmetric algorithm is # used (such as the HS* algorithms). app.config.setdefault('JWT_SECRET_KEY', None) # Keys to sign JWTs with when use when using an asymmetric # (public/private key) algorithms, such as RS* or EC* app.config.setdefault('JWT_PRIVATE_KEY', None) app.config.setdefault('JWT_PUBLIC_KEY', None)
['def', '_set_default_configuration_options', '(', 'app', ')', ':', '# Options for JWTs when the TOKEN_LOCATION is headers', 'app', '.', 'config', '.', 'setdefault', '(', "'JWT_HEADER_NAME'", ',', "'Authorization'", ')', 'app', '.', 'config', '.', 'setdefault', '(', "'JWT_HEADER_TYPE'", ',', "'Bearer'", ')', "# How long an a token created with 'create_jwt' will last before", '# it expires (when using the default jwt_data_callback function).', 'app', '.', 'config', '.', 'setdefault', '(', "'JWT_EXPIRES'", ',', 'datetime', '.', 'timedelta', '(', 'hours', '=', '1', ')', ')', '# What algorithm to use to sign the token. See here for a list of options:', '# https://github.com/jpadilla/pyjwt/blob/master/jwt/api_jwt.py', 'app', '.', 'config', '.', 'setdefault', '(', "'JWT_ALGORITHM'", ',', "'HS256'", ')', '# Key that acts as the identity for the JWT', 'app', '.', 'config', '.', 'setdefault', '(', "'JWT_IDENTITY_CLAIM'", ',', "'sub'", ')', '# Expected value of the audience claim', 'app', '.', 'config', '.', 'setdefault', '(', "'JWT_DECODE_AUDIENCE'", ',', 'None', ')', '# Secret key to sign JWTs with. Only used if a symmetric algorithm is', '# used (such as the HS* algorithms).', 'app', '.', 'config', '.', 'setdefault', '(', "'JWT_SECRET_KEY'", ',', 'None', ')', '# Keys to sign JWTs with when use when using an asymmetric', '# (public/private key) algorithms, such as RS* or EC*', 'app', '.', 'config', '.', 'setdefault', '(', "'JWT_PRIVATE_KEY'", ',', 'None', ')', 'app', '.', 'config', '.', 'setdefault', '(', "'JWT_PUBLIC_KEY'", ',', 'None', ')']
Sets the default configuration options used by this extension
['Sets', 'the', 'default', 'configuration', 'options', 'used', 'by', 'this', 'extension']
train
https://github.com/vimalloc/flask-jwt-simple/blob/ed930340cfcff5a6ddc49248d4682e87204dd3be/flask_jwt_simple/jwt_manager.py#L80-L109
4,162
datalib/libextract
libextract/core.py
parse_html
def parse_html(fileobj, encoding): """ Given a file object *fileobj*, get an ElementTree instance. The *encoding* is assumed to be utf8. """ parser = HTMLParser(encoding=encoding, remove_blank_text=True) return parse(fileobj, parser)
python
def parse_html(fileobj, encoding): """ Given a file object *fileobj*, get an ElementTree instance. The *encoding* is assumed to be utf8. """ parser = HTMLParser(encoding=encoding, remove_blank_text=True) return parse(fileobj, parser)
['def', 'parse_html', '(', 'fileobj', ',', 'encoding', ')', ':', 'parser', '=', 'HTMLParser', '(', 'encoding', '=', 'encoding', ',', 'remove_blank_text', '=', 'True', ')', 'return', 'parse', '(', 'fileobj', ',', 'parser', ')']
Given a file object *fileobj*, get an ElementTree instance. The *encoding* is assumed to be utf8.
['Given', 'a', 'file', 'object', '*', 'fileobj', '*', 'get', 'an', 'ElementTree', 'instance', '.', 'The', '*', 'encoding', '*', 'is', 'assumed', 'to', 'be', 'utf8', '.']
train
https://github.com/datalib/libextract/blob/9cf9d55c7f8cd622eab0a50f009385f0a39b1200/libextract/core.py#L20-L26
4,163
econ-ark/HARK
HARK/ConsumptionSaving/ConsIndShockModel.py
ConsIndShockSolver.solve
def solve(self): ''' Solves the single period consumption-saving problem using the method of endogenous gridpoints. Solution includes a consumption function cFunc (using cubic or linear splines), a marginal value function vPfunc, a min- imum acceptable level of normalized market resources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin and MPCmax. It might also have a value function vFunc and marginal marginal value function vPPfunc. Parameters ---------- none Returns ------- solution : ConsumerSolution The solution to the single period consumption-saving problem. ''' # Make arrays of end-of-period assets and end-of-period marginal value aNrm = self.prepareToCalcEndOfPrdvP() EndOfPrdvP = self.calcEndOfPrdvP() # Construct a basic solution for this period if self.CubicBool: solution = self.makeBasicSolution(EndOfPrdvP,aNrm,interpolator=self.makeCubiccFunc) else: solution = self.makeBasicSolution(EndOfPrdvP,aNrm,interpolator=self.makeLinearcFunc) solution = self.addMPCandHumanWealth(solution) # add a few things solution = self.addSSmNrm(solution) # find steady state m # Add the value function if requested, as well as the marginal marginal # value function if cubic splines were used (to prepare for next period) if self.vFuncBool: solution = self.addvFunc(solution,EndOfPrdvP) if self.CubicBool: solution = self.addvPPfunc(solution) return solution
python
def solve(self): ''' Solves the single period consumption-saving problem using the method of endogenous gridpoints. Solution includes a consumption function cFunc (using cubic or linear splines), a marginal value function vPfunc, a min- imum acceptable level of normalized market resources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin and MPCmax. It might also have a value function vFunc and marginal marginal value function vPPfunc. Parameters ---------- none Returns ------- solution : ConsumerSolution The solution to the single period consumption-saving problem. ''' # Make arrays of end-of-period assets and end-of-period marginal value aNrm = self.prepareToCalcEndOfPrdvP() EndOfPrdvP = self.calcEndOfPrdvP() # Construct a basic solution for this period if self.CubicBool: solution = self.makeBasicSolution(EndOfPrdvP,aNrm,interpolator=self.makeCubiccFunc) else: solution = self.makeBasicSolution(EndOfPrdvP,aNrm,interpolator=self.makeLinearcFunc) solution = self.addMPCandHumanWealth(solution) # add a few things solution = self.addSSmNrm(solution) # find steady state m # Add the value function if requested, as well as the marginal marginal # value function if cubic splines were used (to prepare for next period) if self.vFuncBool: solution = self.addvFunc(solution,EndOfPrdvP) if self.CubicBool: solution = self.addvPPfunc(solution) return solution
['def', 'solve', '(', 'self', ')', ':', '# Make arrays of end-of-period assets and end-of-period marginal value', 'aNrm', '=', 'self', '.', 'prepareToCalcEndOfPrdvP', '(', ')', 'EndOfPrdvP', '=', 'self', '.', 'calcEndOfPrdvP', '(', ')', '# Construct a basic solution for this period', 'if', 'self', '.', 'CubicBool', ':', 'solution', '=', 'self', '.', 'makeBasicSolution', '(', 'EndOfPrdvP', ',', 'aNrm', ',', 'interpolator', '=', 'self', '.', 'makeCubiccFunc', ')', 'else', ':', 'solution', '=', 'self', '.', 'makeBasicSolution', '(', 'EndOfPrdvP', ',', 'aNrm', ',', 'interpolator', '=', 'self', '.', 'makeLinearcFunc', ')', 'solution', '=', 'self', '.', 'addMPCandHumanWealth', '(', 'solution', ')', '# add a few things', 'solution', '=', 'self', '.', 'addSSmNrm', '(', 'solution', ')', '# find steady state m', '# Add the value function if requested, as well as the marginal marginal', '# value function if cubic splines were used (to prepare for next period)', 'if', 'self', '.', 'vFuncBool', ':', 'solution', '=', 'self', '.', 'addvFunc', '(', 'solution', ',', 'EndOfPrdvP', ')', 'if', 'self', '.', 'CubicBool', ':', 'solution', '=', 'self', '.', 'addvPPfunc', '(', 'solution', ')', 'return', 'solution']
Solves the single period consumption-saving problem using the method of endogenous gridpoints. Solution includes a consumption function cFunc (using cubic or linear splines), a marginal value function vPfunc, a min- imum acceptable level of normalized market resources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin and MPCmax. It might also have a value function vFunc and marginal marginal value function vPPfunc. Parameters ---------- none Returns ------- solution : ConsumerSolution The solution to the single period consumption-saving problem.
['Solves', 'the', 'single', 'period', 'consumption', '-', 'saving', 'problem', 'using', 'the', 'method', 'of', 'endogenous', 'gridpoints', '.', 'Solution', 'includes', 'a', 'consumption', 'function', 'cFunc', '(', 'using', 'cubic', 'or', 'linear', 'splines', ')', 'a', 'marginal', 'value', 'function', 'vPfunc', 'a', 'min', '-', 'imum', 'acceptable', 'level', 'of', 'normalized', 'market', 'resources', 'mNrmMin', 'normalized', 'human', 'wealth', 'hNrm', 'and', 'bounding', 'MPCs', 'MPCmin', 'and', 'MPCmax', '.', 'It', 'might', 'also', 'have', 'a', 'value', 'function', 'vFunc', 'and', 'marginal', 'marginal', 'value', 'function', 'vPPfunc', '.']
train
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/ConsumptionSaving/ConsIndShockModel.py#L1144-L1180
4,164
gwww/elkm1
elkm1_lib/elements.py
Elements.get_descriptions
def get_descriptions(self, description_type): """ Gets the descriptions for specified type. When complete the callback is called with a list of descriptions """ (desc_type, max_units) = description_type results = [None] * max_units self.elk._descriptions_in_progress[desc_type] = (max_units, results, self._got_desc) self.elk.send(sd_encode(desc_type=desc_type, unit=0))
python
def get_descriptions(self, description_type): """ Gets the descriptions for specified type. When complete the callback is called with a list of descriptions """ (desc_type, max_units) = description_type results = [None] * max_units self.elk._descriptions_in_progress[desc_type] = (max_units, results, self._got_desc) self.elk.send(sd_encode(desc_type=desc_type, unit=0))
['def', 'get_descriptions', '(', 'self', ',', 'description_type', ')', ':', '(', 'desc_type', ',', 'max_units', ')', '=', 'description_type', 'results', '=', '[', 'None', ']', '*', 'max_units', 'self', '.', 'elk', '.', '_descriptions_in_progress', '[', 'desc_type', ']', '=', '(', 'max_units', ',', 'results', ',', 'self', '.', '_got_desc', ')', 'self', '.', 'elk', '.', 'send', '(', 'sd_encode', '(', 'desc_type', '=', 'desc_type', ',', 'unit', '=', '0', ')', ')']
Gets the descriptions for specified type. When complete the callback is called with a list of descriptions
['Gets', 'the', 'descriptions', 'for', 'specified', 'type', '.', 'When', 'complete', 'the', 'callback', 'is', 'called', 'with', 'a', 'list', 'of', 'descriptions']
train
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/elements.py#L90-L100
4,165
CivicSpleen/ambry
ambry/orm/partition.py
Partition.analysis
def analysis(self): """Return an AnalysisPartition proxy, which wraps this partition to provide acess to dataframes, shapely shapes and other analysis services""" if isinstance(self, PartitionProxy): return AnalysisPartition(self._obj) else: return AnalysisPartition(self)
python
def analysis(self): """Return an AnalysisPartition proxy, which wraps this partition to provide acess to dataframes, shapely shapes and other analysis services""" if isinstance(self, PartitionProxy): return AnalysisPartition(self._obj) else: return AnalysisPartition(self)
['def', 'analysis', '(', 'self', ')', ':', 'if', 'isinstance', '(', 'self', ',', 'PartitionProxy', ')', ':', 'return', 'AnalysisPartition', '(', 'self', '.', '_obj', ')', 'else', ':', 'return', 'AnalysisPartition', '(', 'self', ')']
Return an AnalysisPartition proxy, which wraps this partition to provide acess to dataframes, shapely shapes and other analysis services
['Return', 'an', 'AnalysisPartition', 'proxy', 'which', 'wraps', 'this', 'partition', 'to', 'provide', 'acess', 'to', 'dataframes', 'shapely', 'shapes', 'and', 'other', 'analysis', 'services']
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/partition.py#L790-L796
4,166
aloetesting/aloe_django
aloe_django/__init__.py
django_url
def django_url(step, url=None): """ The URL for a page from the test server. :param step: A Gherkin step :param url: If specified, the relative URL to append. """ base_url = step.test.live_server_url if url: return urljoin(base_url, url) else: return base_url
python
def django_url(step, url=None): """ The URL for a page from the test server. :param step: A Gherkin step :param url: If specified, the relative URL to append. """ base_url = step.test.live_server_url if url: return urljoin(base_url, url) else: return base_url
['def', 'django_url', '(', 'step', ',', 'url', '=', 'None', ')', ':', 'base_url', '=', 'step', '.', 'test', '.', 'live_server_url', 'if', 'url', ':', 'return', 'urljoin', '(', 'base_url', ',', 'url', ')', 'else', ':', 'return', 'base_url']
The URL for a page from the test server. :param step: A Gherkin step :param url: If specified, the relative URL to append.
['The', 'URL', 'for', 'a', 'page', 'from', 'the', 'test', 'server', '.']
train
https://github.com/aloetesting/aloe_django/blob/672eac97c97644bfe334e70696a6dc5ddf4ced02/aloe_django/__init__.py#L38-L51
4,167
econ-ark/HARK
HARK/utilities.py
combineIndepDstns
def combineIndepDstns(*distributions): ''' Given n lists (or tuples) whose elements represent n independent, discrete probability spaces (probabilities and values), construct a joint pmf over all combinations of these independent points. Can take multivariate discrete distributions as inputs. Parameters ---------- distributions : [np.array] Arbitrary number of distributions (pmfs). Each pmf is a list or tuple. For each pmf, the first vector is probabilities and all subsequent vectors are values. For each pmf, this should be true: len(X_pmf[0]) == len(X_pmf[j]) for j in range(1,len(distributions)) Returns ------- List of arrays, consisting of: P_out: np.array Probability associated with each point in X_out. X_out: np.array (as many as in *distributions) Discrete points for the joint discrete probability mass function. Written by Nathan Palmer Latest update: 5 July August 2017 by Matthew N White ''' # Very quick and incomplete parameter check: for dist in distributions: assert len(dist[0]) == len(dist[-1]), "len(dist[0]) != len(dist[-1])" # Get information on the distributions dist_lengths = () dist_dims = () for dist in distributions: dist_lengths += (len(dist[0]),) dist_dims += (len(dist)-1,) number_of_distributions = len(distributions) # Initialize lists we will use X_out = [] P_temp = [] # Now loop through the distributions, tiling and flattening as necessary. for dd,dist in enumerate(distributions): # The shape we want before we tile dist_newshape = (1,) * dd + (len(dist[0]),) + \ (1,) * (number_of_distributions - dd) # The tiling we want to do dist_tiles = dist_lengths[:dd] + (1,) + dist_lengths[dd+1:] # Now we are ready to tile. # We don't use the np.meshgrid commands, because they do not # easily support non-symmetric grids. # First deal with probabilities Pmesh = np.tile(dist[0].reshape(dist_newshape),dist_tiles) # Tiling flatP = Pmesh.ravel() # Flatten the tiled arrays P_temp += [flatP,] #Add the flattened arrays to the output lists # Then loop through each value variable for n in range(1,dist_dims[dd]+1): Xmesh = np.tile(dist[n].reshape(dist_newshape),dist_tiles) flatX = Xmesh.ravel() X_out += [flatX,] # We're done getting the flattened X_out arrays we wanted. # However, we have a bunch of flattened P_temp arrays, and just want one # probability array. So get the probability array, P_out, here. P_out = np.prod(np.array(P_temp),axis=0) assert np.isclose(np.sum(P_out),1),'Probabilities do not sum to 1!' return [P_out,] + X_out
python
def combineIndepDstns(*distributions): ''' Given n lists (or tuples) whose elements represent n independent, discrete probability spaces (probabilities and values), construct a joint pmf over all combinations of these independent points. Can take multivariate discrete distributions as inputs. Parameters ---------- distributions : [np.array] Arbitrary number of distributions (pmfs). Each pmf is a list or tuple. For each pmf, the first vector is probabilities and all subsequent vectors are values. For each pmf, this should be true: len(X_pmf[0]) == len(X_pmf[j]) for j in range(1,len(distributions)) Returns ------- List of arrays, consisting of: P_out: np.array Probability associated with each point in X_out. X_out: np.array (as many as in *distributions) Discrete points for the joint discrete probability mass function. Written by Nathan Palmer Latest update: 5 July August 2017 by Matthew N White ''' # Very quick and incomplete parameter check: for dist in distributions: assert len(dist[0]) == len(dist[-1]), "len(dist[0]) != len(dist[-1])" # Get information on the distributions dist_lengths = () dist_dims = () for dist in distributions: dist_lengths += (len(dist[0]),) dist_dims += (len(dist)-1,) number_of_distributions = len(distributions) # Initialize lists we will use X_out = [] P_temp = [] # Now loop through the distributions, tiling and flattening as necessary. for dd,dist in enumerate(distributions): # The shape we want before we tile dist_newshape = (1,) * dd + (len(dist[0]),) + \ (1,) * (number_of_distributions - dd) # The tiling we want to do dist_tiles = dist_lengths[:dd] + (1,) + dist_lengths[dd+1:] # Now we are ready to tile. # We don't use the np.meshgrid commands, because they do not # easily support non-symmetric grids. # First deal with probabilities Pmesh = np.tile(dist[0].reshape(dist_newshape),dist_tiles) # Tiling flatP = Pmesh.ravel() # Flatten the tiled arrays P_temp += [flatP,] #Add the flattened arrays to the output lists # Then loop through each value variable for n in range(1,dist_dims[dd]+1): Xmesh = np.tile(dist[n].reshape(dist_newshape),dist_tiles) flatX = Xmesh.ravel() X_out += [flatX,] # We're done getting the flattened X_out arrays we wanted. # However, we have a bunch of flattened P_temp arrays, and just want one # probability array. So get the probability array, P_out, here. P_out = np.prod(np.array(P_temp),axis=0) assert np.isclose(np.sum(P_out),1),'Probabilities do not sum to 1!' return [P_out,] + X_out
['def', 'combineIndepDstns', '(', '*', 'distributions', ')', ':', '# Very quick and incomplete parameter check:', 'for', 'dist', 'in', 'distributions', ':', 'assert', 'len', '(', 'dist', '[', '0', ']', ')', '==', 'len', '(', 'dist', '[', '-', '1', ']', ')', ',', '"len(dist[0]) != len(dist[-1])"', '# Get information on the distributions', 'dist_lengths', '=', '(', ')', 'dist_dims', '=', '(', ')', 'for', 'dist', 'in', 'distributions', ':', 'dist_lengths', '+=', '(', 'len', '(', 'dist', '[', '0', ']', ')', ',', ')', 'dist_dims', '+=', '(', 'len', '(', 'dist', ')', '-', '1', ',', ')', 'number_of_distributions', '=', 'len', '(', 'distributions', ')', '# Initialize lists we will use', 'X_out', '=', '[', ']', 'P_temp', '=', '[', ']', '# Now loop through the distributions, tiling and flattening as necessary.', 'for', 'dd', ',', 'dist', 'in', 'enumerate', '(', 'distributions', ')', ':', '# The shape we want before we tile', 'dist_newshape', '=', '(', '1', ',', ')', '*', 'dd', '+', '(', 'len', '(', 'dist', '[', '0', ']', ')', ',', ')', '+', '(', '1', ',', ')', '*', '(', 'number_of_distributions', '-', 'dd', ')', '# The tiling we want to do', 'dist_tiles', '=', 'dist_lengths', '[', ':', 'dd', ']', '+', '(', '1', ',', ')', '+', 'dist_lengths', '[', 'dd', '+', '1', ':', ']', '# Now we are ready to tile.', "# We don't use the np.meshgrid commands, because they do not", '# easily support non-symmetric grids.', '# First deal with probabilities', 'Pmesh', '=', 'np', '.', 'tile', '(', 'dist', '[', '0', ']', '.', 'reshape', '(', 'dist_newshape', ')', ',', 'dist_tiles', ')', '# Tiling', 'flatP', '=', 'Pmesh', '.', 'ravel', '(', ')', '# Flatten the tiled arrays', 'P_temp', '+=', '[', 'flatP', ',', ']', '#Add the flattened arrays to the output lists', '# Then loop through each value variable', 'for', 'n', 'in', 'range', '(', '1', ',', 'dist_dims', '[', 'dd', ']', '+', '1', ')', ':', 'Xmesh', '=', 'np', '.', 'tile', '(', 'dist', '[', 'n', ']', '.', 'reshape', '(', 'dist_newshape', ')', ',', 'dist_tiles', ')', 'flatX', '=', 'Xmesh', '.', 'ravel', '(', ')', 'X_out', '+=', '[', 'flatX', ',', ']', "# We're done getting the flattened X_out arrays we wanted.", '# However, we have a bunch of flattened P_temp arrays, and just want one', '# probability array. So get the probability array, P_out, here.', 'P_out', '=', 'np', '.', 'prod', '(', 'np', '.', 'array', '(', 'P_temp', ')', ',', 'axis', '=', '0', ')', 'assert', 'np', '.', 'isclose', '(', 'np', '.', 'sum', '(', 'P_out', ')', ',', '1', ')', ',', "'Probabilities do not sum to 1!'", 'return', '[', 'P_out', ',', ']', '+', 'X_out']
Given n lists (or tuples) whose elements represent n independent, discrete probability spaces (probabilities and values), construct a joint pmf over all combinations of these independent points. Can take multivariate discrete distributions as inputs. Parameters ---------- distributions : [np.array] Arbitrary number of distributions (pmfs). Each pmf is a list or tuple. For each pmf, the first vector is probabilities and all subsequent vectors are values. For each pmf, this should be true: len(X_pmf[0]) == len(X_pmf[j]) for j in range(1,len(distributions)) Returns ------- List of arrays, consisting of: P_out: np.array Probability associated with each point in X_out. X_out: np.array (as many as in *distributions) Discrete points for the joint discrete probability mass function. Written by Nathan Palmer Latest update: 5 July August 2017 by Matthew N White
['Given', 'n', 'lists', '(', 'or', 'tuples', ')', 'whose', 'elements', 'represent', 'n', 'independent', 'discrete', 'probability', 'spaces', '(', 'probabilities', 'and', 'values', ')', 'construct', 'a', 'joint', 'pmf', 'over', 'all', 'combinations', 'of', 'these', 'independent', 'points', '.', 'Can', 'take', 'multivariate', 'discrete', 'distributions', 'as', 'inputs', '.']
train
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/utilities.py#L832-L907
4,168
CiscoUcs/UcsPythonSDK
src/UcsSdk/UcsHandle_Edit.py
UcsHandle.SendUcsFirmware
def SendUcsFirmware(self, path=None, dumpXml=False): """ Uploads a specific CCO Image on UCS. - path specifies the path of the image to be uploaded. """ from UcsBase import WriteUcsWarning, UcsUtils, ManagedObject, WriteObject, UcsUtils, UcsValidationException, \ UcsException from Ucs import ConfigConfig from Mos import FirmwareDownloader if (self._transactionInProgress): raise UcsValidationException( "UCS transaction in progress. Cannot execute SendUcsFirmware. Complete or Undo UCS transaction.") # raise Exception("UCS transaction in progress. Cannot execute SendUcsFirmware. Complete or Undo UCS transaction.") if not path: raise UcsValidationException("path parameter is not provided.") # raise Exception("Please provide path") if not os.path.exists(path): raise UcsValidationException("Image not found <%s>" % (path)) # raise Exception("Image not found <%s>" %(path)) dn = None filePath = path localFile = os.path.basename(filePath) # Exit if image already exist on UCSM topSystem = ManagedObject(NamingId.TOP_SYSTEM) firmwareCatalogue = ManagedObject(NamingId.FIRMWARE_CATALOGUE) firmwareDistributable = ManagedObject(NamingId.FIRMWARE_DISTRIBUTABLE) firmwareDistributable.Name = localFile dn = UcsUtils.MakeDn([topSystem.MakeRn(), firmwareCatalogue.MakeRn(), firmwareDistributable.MakeRn()]) crDn = self.ConfigResolveDn(dn, inHierarchical=YesOrNo.FALSE, dumpXml=dumpXml) if (crDn.OutConfig.GetChildCount() > 0): raise UcsValidationException("Image file <%s> already exist on FI." % (filePath)) # raise Exception("Image file <%s> already exist on FI." %(filePath)) # Create object of type <firmwareDownloader> firmwareDownloader = ManagedObject(NamingId.FIRMWARE_DOWNLOADER) firmwareDownloader.FileName = localFile dn = UcsUtils.MakeDn([topSystem.MakeRn(), firmwareCatalogue.MakeRn(), firmwareDownloader.MakeRn()]) firmwareDownloader.Dn = dn firmwareDownloader.Status = Status.CREATED firmwareDownloader.FileName = localFile firmwareDownloader.Server = FirmwareDownloader.CONST_PROTOCOL_LOCAL firmwareDownloader.Protocol = FirmwareDownloader.CONST_PROTOCOL_LOCAL inConfig = ConfigConfig() inConfig.AddChild(firmwareDownloader) uri = "%s/operations/file-%s/image.txt" % (self.Uri(), localFile) progress = Progress() stream = file_with_callback(filePath, 'rb', progress.update, filePath) request = urllib2.Request(uri) request.add_header('Cookie', 'ucsm-cookie=%s' % (self._cookie)) request.add_data(stream) response = urllib2.urlopen(request).read() if not response: raise UcsValidationException("Unable to upload properly.") # WriteUcsWarning("Unable to upload properly.") ccm = self.ConfigConfMo(dn=dn, inConfig=inConfig, inHierarchical=YesOrNo.FALSE, dumpXml=dumpXml) if (ccm.errorCode != 0): raise UcsException(ccm.errorCode, ccm.errorDescr) return ccm.OutConfig.GetChild()
python
def SendUcsFirmware(self, path=None, dumpXml=False): """ Uploads a specific CCO Image on UCS. - path specifies the path of the image to be uploaded. """ from UcsBase import WriteUcsWarning, UcsUtils, ManagedObject, WriteObject, UcsUtils, UcsValidationException, \ UcsException from Ucs import ConfigConfig from Mos import FirmwareDownloader if (self._transactionInProgress): raise UcsValidationException( "UCS transaction in progress. Cannot execute SendUcsFirmware. Complete or Undo UCS transaction.") # raise Exception("UCS transaction in progress. Cannot execute SendUcsFirmware. Complete or Undo UCS transaction.") if not path: raise UcsValidationException("path parameter is not provided.") # raise Exception("Please provide path") if not os.path.exists(path): raise UcsValidationException("Image not found <%s>" % (path)) # raise Exception("Image not found <%s>" %(path)) dn = None filePath = path localFile = os.path.basename(filePath) # Exit if image already exist on UCSM topSystem = ManagedObject(NamingId.TOP_SYSTEM) firmwareCatalogue = ManagedObject(NamingId.FIRMWARE_CATALOGUE) firmwareDistributable = ManagedObject(NamingId.FIRMWARE_DISTRIBUTABLE) firmwareDistributable.Name = localFile dn = UcsUtils.MakeDn([topSystem.MakeRn(), firmwareCatalogue.MakeRn(), firmwareDistributable.MakeRn()]) crDn = self.ConfigResolveDn(dn, inHierarchical=YesOrNo.FALSE, dumpXml=dumpXml) if (crDn.OutConfig.GetChildCount() > 0): raise UcsValidationException("Image file <%s> already exist on FI." % (filePath)) # raise Exception("Image file <%s> already exist on FI." %(filePath)) # Create object of type <firmwareDownloader> firmwareDownloader = ManagedObject(NamingId.FIRMWARE_DOWNLOADER) firmwareDownloader.FileName = localFile dn = UcsUtils.MakeDn([topSystem.MakeRn(), firmwareCatalogue.MakeRn(), firmwareDownloader.MakeRn()]) firmwareDownloader.Dn = dn firmwareDownloader.Status = Status.CREATED firmwareDownloader.FileName = localFile firmwareDownloader.Server = FirmwareDownloader.CONST_PROTOCOL_LOCAL firmwareDownloader.Protocol = FirmwareDownloader.CONST_PROTOCOL_LOCAL inConfig = ConfigConfig() inConfig.AddChild(firmwareDownloader) uri = "%s/operations/file-%s/image.txt" % (self.Uri(), localFile) progress = Progress() stream = file_with_callback(filePath, 'rb', progress.update, filePath) request = urllib2.Request(uri) request.add_header('Cookie', 'ucsm-cookie=%s' % (self._cookie)) request.add_data(stream) response = urllib2.urlopen(request).read() if not response: raise UcsValidationException("Unable to upload properly.") # WriteUcsWarning("Unable to upload properly.") ccm = self.ConfigConfMo(dn=dn, inConfig=inConfig, inHierarchical=YesOrNo.FALSE, dumpXml=dumpXml) if (ccm.errorCode != 0): raise UcsException(ccm.errorCode, ccm.errorDescr) return ccm.OutConfig.GetChild()
['def', 'SendUcsFirmware', '(', 'self', ',', 'path', '=', 'None', ',', 'dumpXml', '=', 'False', ')', ':', 'from', 'UcsBase', 'import', 'WriteUcsWarning', ',', 'UcsUtils', ',', 'ManagedObject', ',', 'WriteObject', ',', 'UcsUtils', ',', 'UcsValidationException', ',', 'UcsException', 'from', 'Ucs', 'import', 'ConfigConfig', 'from', 'Mos', 'import', 'FirmwareDownloader', 'if', '(', 'self', '.', '_transactionInProgress', ')', ':', 'raise', 'UcsValidationException', '(', '"UCS transaction in progress. Cannot execute SendUcsFirmware. Complete or Undo UCS transaction."', ')', '# raise Exception("UCS transaction in progress. Cannot execute SendUcsFirmware. Complete or Undo UCS transaction.")', 'if', 'not', 'path', ':', 'raise', 'UcsValidationException', '(', '"path parameter is not provided."', ')', '# raise Exception("Please provide path")', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'path', ')', ':', 'raise', 'UcsValidationException', '(', '"Image not found <%s>"', '%', '(', 'path', ')', ')', '# raise Exception("Image not found <%s>" %(path))\t', 'dn', '=', 'None', 'filePath', '=', 'path', 'localFile', '=', 'os', '.', 'path', '.', 'basename', '(', 'filePath', ')', '# Exit if image already exist on UCSM ', 'topSystem', '=', 'ManagedObject', '(', 'NamingId', '.', 'TOP_SYSTEM', ')', 'firmwareCatalogue', '=', 'ManagedObject', '(', 'NamingId', '.', 'FIRMWARE_CATALOGUE', ')', 'firmwareDistributable', '=', 'ManagedObject', '(', 'NamingId', '.', 'FIRMWARE_DISTRIBUTABLE', ')', 'firmwareDistributable', '.', 'Name', '=', 'localFile', 'dn', '=', 'UcsUtils', '.', 'MakeDn', '(', '[', 'topSystem', '.', 'MakeRn', '(', ')', ',', 'firmwareCatalogue', '.', 'MakeRn', '(', ')', ',', 'firmwareDistributable', '.', 'MakeRn', '(', ')', ']', ')', 'crDn', '=', 'self', '.', 'ConfigResolveDn', '(', 'dn', ',', 'inHierarchical', '=', 'YesOrNo', '.', 'FALSE', ',', 'dumpXml', '=', 'dumpXml', ')', 'if', '(', 'crDn', '.', 'OutConfig', '.', 'GetChildCount', '(', ')', '>', '0', ')', ':', 'raise', 'UcsValidationException', '(', '"Image file <%s> already exist on FI."', '%', '(', 'filePath', ')', ')', '# raise Exception("Image file <%s> already exist on FI." %(filePath))', '# Create object of type <firmwareDownloader>', 'firmwareDownloader', '=', 'ManagedObject', '(', 'NamingId', '.', 'FIRMWARE_DOWNLOADER', ')', 'firmwareDownloader', '.', 'FileName', '=', 'localFile', 'dn', '=', 'UcsUtils', '.', 'MakeDn', '(', '[', 'topSystem', '.', 'MakeRn', '(', ')', ',', 'firmwareCatalogue', '.', 'MakeRn', '(', ')', ',', 'firmwareDownloader', '.', 'MakeRn', '(', ')', ']', ')', 'firmwareDownloader', '.', 'Dn', '=', 'dn', 'firmwareDownloader', '.', 'Status', '=', 'Status', '.', 'CREATED', 'firmwareDownloader', '.', 'FileName', '=', 'localFile', 'firmwareDownloader', '.', 'Server', '=', 'FirmwareDownloader', '.', 'CONST_PROTOCOL_LOCAL', 'firmwareDownloader', '.', 'Protocol', '=', 'FirmwareDownloader', '.', 'CONST_PROTOCOL_LOCAL', 'inConfig', '=', 'ConfigConfig', '(', ')', 'inConfig', '.', 'AddChild', '(', 'firmwareDownloader', ')', 'uri', '=', '"%s/operations/file-%s/image.txt"', '%', '(', 'self', '.', 'Uri', '(', ')', ',', 'localFile', ')', 'progress', '=', 'Progress', '(', ')', 'stream', '=', 'file_with_callback', '(', 'filePath', ',', "'rb'", ',', 'progress', '.', 'update', ',', 'filePath', ')', 'request', '=', 'urllib2', '.', 'Request', '(', 'uri', ')', 'request', '.', 'add_header', '(', "'Cookie'", ',', "'ucsm-cookie=%s'", '%', '(', 'self', '.', '_cookie', ')', ')', 'request', '.', 'add_data', '(', 'stream', ')', 'response', '=', 'urllib2', '.', 'urlopen', '(', 'request', ')', '.', 'read', '(', ')', 'if', 'not', 'response', ':', 'raise', 'UcsValidationException', '(', '"Unable to upload properly."', ')', '# WriteUcsWarning("Unable to upload properly.")', 'ccm', '=', 'self', '.', 'ConfigConfMo', '(', 'dn', '=', 'dn', ',', 'inConfig', '=', 'inConfig', ',', 'inHierarchical', '=', 'YesOrNo', '.', 'FALSE', ',', 'dumpXml', '=', 'dumpXml', ')', 'if', '(', 'ccm', '.', 'errorCode', '!=', '0', ')', ':', 'raise', 'UcsException', '(', 'ccm', '.', 'errorCode', ',', 'ccm', '.', 'errorDescr', ')', 'return', 'ccm', '.', 'OutConfig', '.', 'GetChild', '(', ')']
Uploads a specific CCO Image on UCS. - path specifies the path of the image to be uploaded.
['Uploads', 'a', 'specific', 'CCO', 'Image', 'on', 'UCS', '.', '-', 'path', 'specifies', 'the', 'path', 'of', 'the', 'image', 'to', 'be', 'uploaded', '.']
train
https://github.com/CiscoUcs/UcsPythonSDK/blob/bf6b07d6abeacb922c92b198352eda4eb9e4629b/src/UcsSdk/UcsHandle_Edit.py#L1303-L1375
4,169
rpkilby/SurveyGizmo
surveygizmo/api/base.py
Resource.page
def page(self, value): """ Set the page which will be returned. :param value: 'page' parameter value for the rest api call :type value: str Take a look at https://apihelp.surveygizmo.com/help/surveyresponse-sub-object """ instance = copy(self) instance._filters.append({ 'page': value }) return instance
python
def page(self, value): """ Set the page which will be returned. :param value: 'page' parameter value for the rest api call :type value: str Take a look at https://apihelp.surveygizmo.com/help/surveyresponse-sub-object """ instance = copy(self) instance._filters.append({ 'page': value }) return instance
['def', 'page', '(', 'self', ',', 'value', ')', ':', 'instance', '=', 'copy', '(', 'self', ')', 'instance', '.', '_filters', '.', 'append', '(', '{', "'page'", ':', 'value', '}', ')', 'return', 'instance']
Set the page which will be returned. :param value: 'page' parameter value for the rest api call :type value: str Take a look at https://apihelp.surveygizmo.com/help/surveyresponse-sub-object
['Set', 'the', 'page', 'which', 'will', 'be', 'returned', '.', ':', 'param', 'value', ':', 'page', 'parameter', 'value', 'for', 'the', 'rest', 'api', 'call', ':', 'type', 'value', ':', 'str']
train
https://github.com/rpkilby/SurveyGizmo/blob/a097091dc7dcfb58f70242fb1becabc98df049a5/surveygizmo/api/base.py#L86-L100
4,170
edibledinos/pwnypack
pwnypack/shellcode/base.py
BaseEnvironment.reg_add
def reg_add(self, reg, value): """ Add a value to a register. The value can be another :class:`Register`, an :class:`Offset`, a :class:`Buffer`, an integer or ``None``. Arguments: reg(pwnypack.shellcode.types.Register): The register to add the value to. value: The value to add to the register. Returns: list: A list of mnemonics that will add ``value`` to ``reg``. """ if value is None: return [] elif isinstance(value, Register): return self.reg_add_reg(reg, value) elif isinstance(value, (Buffer, six.integer_types)): if isinstance(reg, Buffer): value = sum(len(v) for v in six.iterkeys(self.data)) + value.offset if not value: return [] reg_width = self.REGISTER_WIDTH[reg] if value < -2 ** (reg_width-1): raise ValueError('%d does not fit %s' % (value, reg)) elif value >= 2 ** reg_width: raise ValueError('%d does not fit %s' % (value, reg)) if value > 0: return self.reg_add_imm(reg, value) else: return self.reg_sub_imm(reg, -value) else: raise ValueError('Invalid argument type "%s"' % repr(value))
python
def reg_add(self, reg, value): """ Add a value to a register. The value can be another :class:`Register`, an :class:`Offset`, a :class:`Buffer`, an integer or ``None``. Arguments: reg(pwnypack.shellcode.types.Register): The register to add the value to. value: The value to add to the register. Returns: list: A list of mnemonics that will add ``value`` to ``reg``. """ if value is None: return [] elif isinstance(value, Register): return self.reg_add_reg(reg, value) elif isinstance(value, (Buffer, six.integer_types)): if isinstance(reg, Buffer): value = sum(len(v) for v in six.iterkeys(self.data)) + value.offset if not value: return [] reg_width = self.REGISTER_WIDTH[reg] if value < -2 ** (reg_width-1): raise ValueError('%d does not fit %s' % (value, reg)) elif value >= 2 ** reg_width: raise ValueError('%d does not fit %s' % (value, reg)) if value > 0: return self.reg_add_imm(reg, value) else: return self.reg_sub_imm(reg, -value) else: raise ValueError('Invalid argument type "%s"' % repr(value))
['def', 'reg_add', '(', 'self', ',', 'reg', ',', 'value', ')', ':', 'if', 'value', 'is', 'None', ':', 'return', '[', ']', 'elif', 'isinstance', '(', 'value', ',', 'Register', ')', ':', 'return', 'self', '.', 'reg_add_reg', '(', 'reg', ',', 'value', ')', 'elif', 'isinstance', '(', 'value', ',', '(', 'Buffer', ',', 'six', '.', 'integer_types', ')', ')', ':', 'if', 'isinstance', '(', 'reg', ',', 'Buffer', ')', ':', 'value', '=', 'sum', '(', 'len', '(', 'v', ')', 'for', 'v', 'in', 'six', '.', 'iterkeys', '(', 'self', '.', 'data', ')', ')', '+', 'value', '.', 'offset', 'if', 'not', 'value', ':', 'return', '[', ']', 'reg_width', '=', 'self', '.', 'REGISTER_WIDTH', '[', 'reg', ']', 'if', 'value', '<', '-', '2', '**', '(', 'reg_width', '-', '1', ')', ':', 'raise', 'ValueError', '(', "'%d does not fit %s'", '%', '(', 'value', ',', 'reg', ')', ')', 'elif', 'value', '>=', '2', '**', 'reg_width', ':', 'raise', 'ValueError', '(', "'%d does not fit %s'", '%', '(', 'value', ',', 'reg', ')', ')', 'if', 'value', '>', '0', ':', 'return', 'self', '.', 'reg_add_imm', '(', 'reg', ',', 'value', ')', 'else', ':', 'return', 'self', '.', 'reg_sub_imm', '(', 'reg', ',', '-', 'value', ')', 'else', ':', 'raise', 'ValueError', '(', '\'Invalid argument type "%s"\'', '%', 'repr', '(', 'value', ')', ')']
Add a value to a register. The value can be another :class:`Register`, an :class:`Offset`, a :class:`Buffer`, an integer or ``None``. Arguments: reg(pwnypack.shellcode.types.Register): The register to add the value to. value: The value to add to the register. Returns: list: A list of mnemonics that will add ``value`` to ``reg``.
['Add', 'a', 'value', 'to', 'a', 'register', '.', 'The', 'value', 'can', 'be', 'another', ':', 'class', ':', 'Register', 'an', ':', 'class', ':', 'Offset', 'a', ':', 'class', ':', 'Buffer', 'an', 'integer', 'or', 'None', '.']
train
https://github.com/edibledinos/pwnypack/blob/e0a5a8e6ef3f4f1f7e1b91ee379711f4a49cb0e6/pwnypack/shellcode/base.py#L228-L267
4,171
PyGithub/PyGithub
github/Label.py
Label.edit
def edit(self, name, color, description=github.GithubObject.NotSet): """ :calls: `PATCH /repos/:owner/:repo/labels/:name <http://developer.github.com/v3/issues/labels>`_ :param name: string :param color: string :param description: string :rtype: None """ assert isinstance(name, (str, unicode)), name assert isinstance(color, (str, unicode)), color assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description post_parameters = { "name": name, "color": color, } if description is not github.GithubObject.NotSet: post_parameters["description"] = description headers, data = self._requester.requestJsonAndCheck( "PATCH", self.url, input=post_parameters, headers={'Accept': Consts.mediaTypeLabelDescriptionSearchPreview} ) self._useAttributes(data)
python
def edit(self, name, color, description=github.GithubObject.NotSet): """ :calls: `PATCH /repos/:owner/:repo/labels/:name <http://developer.github.com/v3/issues/labels>`_ :param name: string :param color: string :param description: string :rtype: None """ assert isinstance(name, (str, unicode)), name assert isinstance(color, (str, unicode)), color assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description post_parameters = { "name": name, "color": color, } if description is not github.GithubObject.NotSet: post_parameters["description"] = description headers, data = self._requester.requestJsonAndCheck( "PATCH", self.url, input=post_parameters, headers={'Accept': Consts.mediaTypeLabelDescriptionSearchPreview} ) self._useAttributes(data)
['def', 'edit', '(', 'self', ',', 'name', ',', 'color', ',', 'description', '=', 'github', '.', 'GithubObject', '.', 'NotSet', ')', ':', 'assert', 'isinstance', '(', 'name', ',', '(', 'str', ',', 'unicode', ')', ')', ',', 'name', 'assert', 'isinstance', '(', 'color', ',', '(', 'str', ',', 'unicode', ')', ')', ',', 'color', 'assert', 'description', 'is', 'github', '.', 'GithubObject', '.', 'NotSet', 'or', 'isinstance', '(', 'description', ',', '(', 'str', ',', 'unicode', ')', ')', ',', 'description', 'post_parameters', '=', '{', '"name"', ':', 'name', ',', '"color"', ':', 'color', ',', '}', 'if', 'description', 'is', 'not', 'github', '.', 'GithubObject', '.', 'NotSet', ':', 'post_parameters', '[', '"description"', ']', '=', 'description', 'headers', ',', 'data', '=', 'self', '.', '_requester', '.', 'requestJsonAndCheck', '(', '"PATCH"', ',', 'self', '.', 'url', ',', 'input', '=', 'post_parameters', ',', 'headers', '=', '{', "'Accept'", ':', 'Consts', '.', 'mediaTypeLabelDescriptionSearchPreview', '}', ')', 'self', '.', '_useAttributes', '(', 'data', ')']
:calls: `PATCH /repos/:owner/:repo/labels/:name <http://developer.github.com/v3/issues/labels>`_ :param name: string :param color: string :param description: string :rtype: None
[':', 'calls', ':', 'PATCH', '/', 'repos', '/', ':', 'owner', '/', ':', 'repo', '/', 'labels', '/', ':', 'name', '<http', ':', '//', 'developer', '.', 'github', '.', 'com', '/', 'v3', '/', 'issues', '/', 'labels', '>', '_', ':', 'param', 'name', ':', 'string', ':', 'param', 'color', ':', 'string', ':', 'param', 'description', ':', 'string', ':', 'rtype', ':', 'None']
train
https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/Label.py#L91-L114
4,172
cbclab/MOT
mot/mcmc_diagnostics.py
minimum_multivariate_ess
def minimum_multivariate_ess(nmr_params, alpha=0.05, epsilon=0.05): r"""Calculate the minimum multivariate Effective Sample Size you will need to obtain the desired precision. This implements the inequality from Vats et al. (2016): .. math:: \widehat{ESS} \geq \frac{2^{2/p}\pi}{(p\Gamma(p/2))^{2/p}} \frac{\chi^{2}_{1-\alpha,p}}{\epsilon^{2}} Where :math:`p` is the number of free parameters. Args: nmr_params (int): the number of free parameters in the model alpha (float): the level of confidence of the confidence region. For example, an alpha of 0.05 means that we want to be in a 95% confidence region. epsilon (float): the level of precision in our multivariate ESS estimate. An epsilon of 0.05 means that we expect that the Monte Carlo error is 5% of the uncertainty in the target distribution. Returns: float: the minimum multivariate Effective Sample Size that one should aim for in MCMC sample to obtain the desired confidence region with the desired precision. References: Vats D, Flegal J, Jones G (2016). Multivariate Output Analysis for Markov Chain Monte Carlo. arXiv:1512.07713v2 [math.ST] """ tmp = 2.0 / nmr_params log_min_ess = tmp * np.log(2) + np.log(np.pi) - tmp * (np.log(nmr_params) + gammaln(nmr_params / 2)) \ + np.log(chi2.ppf(1 - alpha, nmr_params)) - 2 * np.log(epsilon) return int(round(np.exp(log_min_ess)))
python
def minimum_multivariate_ess(nmr_params, alpha=0.05, epsilon=0.05): r"""Calculate the minimum multivariate Effective Sample Size you will need to obtain the desired precision. This implements the inequality from Vats et al. (2016): .. math:: \widehat{ESS} \geq \frac{2^{2/p}\pi}{(p\Gamma(p/2))^{2/p}} \frac{\chi^{2}_{1-\alpha,p}}{\epsilon^{2}} Where :math:`p` is the number of free parameters. Args: nmr_params (int): the number of free parameters in the model alpha (float): the level of confidence of the confidence region. For example, an alpha of 0.05 means that we want to be in a 95% confidence region. epsilon (float): the level of precision in our multivariate ESS estimate. An epsilon of 0.05 means that we expect that the Monte Carlo error is 5% of the uncertainty in the target distribution. Returns: float: the minimum multivariate Effective Sample Size that one should aim for in MCMC sample to obtain the desired confidence region with the desired precision. References: Vats D, Flegal J, Jones G (2016). Multivariate Output Analysis for Markov Chain Monte Carlo. arXiv:1512.07713v2 [math.ST] """ tmp = 2.0 / nmr_params log_min_ess = tmp * np.log(2) + np.log(np.pi) - tmp * (np.log(nmr_params) + gammaln(nmr_params / 2)) \ + np.log(chi2.ppf(1 - alpha, nmr_params)) - 2 * np.log(epsilon) return int(round(np.exp(log_min_ess)))
['def', 'minimum_multivariate_ess', '(', 'nmr_params', ',', 'alpha', '=', '0.05', ',', 'epsilon', '=', '0.05', ')', ':', 'tmp', '=', '2.0', '/', 'nmr_params', 'log_min_ess', '=', 'tmp', '*', 'np', '.', 'log', '(', '2', ')', '+', 'np', '.', 'log', '(', 'np', '.', 'pi', ')', '-', 'tmp', '*', '(', 'np', '.', 'log', '(', 'nmr_params', ')', '+', 'gammaln', '(', 'nmr_params', '/', '2', ')', ')', '+', 'np', '.', 'log', '(', 'chi2', '.', 'ppf', '(', '1', '-', 'alpha', ',', 'nmr_params', ')', ')', '-', '2', '*', 'np', '.', 'log', '(', 'epsilon', ')', 'return', 'int', '(', 'round', '(', 'np', '.', 'exp', '(', 'log_min_ess', ')', ')', ')']
r"""Calculate the minimum multivariate Effective Sample Size you will need to obtain the desired precision. This implements the inequality from Vats et al. (2016): .. math:: \widehat{ESS} \geq \frac{2^{2/p}\pi}{(p\Gamma(p/2))^{2/p}} \frac{\chi^{2}_{1-\alpha,p}}{\epsilon^{2}} Where :math:`p` is the number of free parameters. Args: nmr_params (int): the number of free parameters in the model alpha (float): the level of confidence of the confidence region. For example, an alpha of 0.05 means that we want to be in a 95% confidence region. epsilon (float): the level of precision in our multivariate ESS estimate. An epsilon of 0.05 means that we expect that the Monte Carlo error is 5% of the uncertainty in the target distribution. Returns: float: the minimum multivariate Effective Sample Size that one should aim for in MCMC sample to obtain the desired confidence region with the desired precision. References: Vats D, Flegal J, Jones G (2016). Multivariate Output Analysis for Markov Chain Monte Carlo. arXiv:1512.07713v2 [math.ST]
['r', 'Calculate', 'the', 'minimum', 'multivariate', 'Effective', 'Sample', 'Size', 'you', 'will', 'need', 'to', 'obtain', 'the', 'desired', 'precision', '.']
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/mcmc_diagnostics.py#L258-L288
4,173
latchset/custodia
src/custodia/log.py
getLogger
def getLogger(name): """Create logger with custom exception() method """ def exception(self, msg, *args, **kwargs): extra = kwargs.setdefault('extra', {}) extra['exc_fullstack'] = self.isEnabledFor(logging.DEBUG) kwargs['exc_info'] = True self.log(logging.ERROR, msg, *args, **kwargs) logger = logging.getLogger(name) logger.exception = six.create_bound_method(exception, logger) return logger
python
def getLogger(name): """Create logger with custom exception() method """ def exception(self, msg, *args, **kwargs): extra = kwargs.setdefault('extra', {}) extra['exc_fullstack'] = self.isEnabledFor(logging.DEBUG) kwargs['exc_info'] = True self.log(logging.ERROR, msg, *args, **kwargs) logger = logging.getLogger(name) logger.exception = six.create_bound_method(exception, logger) return logger
['def', 'getLogger', '(', 'name', ')', ':', 'def', 'exception', '(', 'self', ',', 'msg', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'extra', '=', 'kwargs', '.', 'setdefault', '(', "'extra'", ',', '{', '}', ')', 'extra', '[', "'exc_fullstack'", ']', '=', 'self', '.', 'isEnabledFor', '(', 'logging', '.', 'DEBUG', ')', 'kwargs', '[', "'exc_info'", ']', '=', 'True', 'self', '.', 'log', '(', 'logging', '.', 'ERROR', ',', 'msg', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'logger', '=', 'logging', '.', 'getLogger', '(', 'name', ')', 'logger', '.', 'exception', '=', 'six', '.', 'create_bound_method', '(', 'exception', ',', 'logger', ')', 'return', 'logger']
Create logger with custom exception() method
['Create', 'logger', 'with', 'custom', 'exception', '()', 'method']
train
https://github.com/latchset/custodia/blob/5ad4cd7a2f40babc6b8b5d16215b7e27ca993b6d/src/custodia/log.py#L68-L79
4,174
timothydmorton/VESPA
vespa/stars/utils.py
semimajor
def semimajor(P,mstar=1): """Returns semimajor axis in AU given P in days, mstar in solar masses. """ return ((P*DAY/2/np.pi)**2*G*mstar*MSUN)**(1./3)/AU
python
def semimajor(P,mstar=1): """Returns semimajor axis in AU given P in days, mstar in solar masses. """ return ((P*DAY/2/np.pi)**2*G*mstar*MSUN)**(1./3)/AU
['def', 'semimajor', '(', 'P', ',', 'mstar', '=', '1', ')', ':', 'return', '(', '(', 'P', '*', 'DAY', '/', '2', '/', 'np', '.', 'pi', ')', '**', '2', '*', 'G', '*', 'mstar', '*', 'MSUN', ')', '**', '(', '1.', '/', '3', ')', '/', 'AU']
Returns semimajor axis in AU given P in days, mstar in solar masses.
['Returns', 'semimajor', 'axis', 'in', 'AU', 'given', 'P', 'in', 'days', 'mstar', 'in', 'solar', 'masses', '.']
train
https://github.com/timothydmorton/VESPA/blob/0446b54d48009f3655cfd1a3957ceea21d3adcaa/vespa/stars/utils.py#L159-L162
4,175
orbingol/NURBS-Python
geomdl/voxelize.py
save_voxel_grid
def save_voxel_grid(voxel_grid, file_name): """ Saves binary voxel grid as a binary file. The binary file is structured in little-endian unsigned int format. :param voxel_grid: binary voxel grid :type voxel_grid: list, tuple :param file_name: file name to save :type file_name: str """ try: with open(file_name, 'wb') as fp: for voxel in voxel_grid: fp.write(struct.pack("<I", voxel)) except IOError as e: print("An error occurred: {}".format(e.args[-1])) raise e except Exception: raise
python
def save_voxel_grid(voxel_grid, file_name): """ Saves binary voxel grid as a binary file. The binary file is structured in little-endian unsigned int format. :param voxel_grid: binary voxel grid :type voxel_grid: list, tuple :param file_name: file name to save :type file_name: str """ try: with open(file_name, 'wb') as fp: for voxel in voxel_grid: fp.write(struct.pack("<I", voxel)) except IOError as e: print("An error occurred: {}".format(e.args[-1])) raise e except Exception: raise
['def', 'save_voxel_grid', '(', 'voxel_grid', ',', 'file_name', ')', ':', 'try', ':', 'with', 'open', '(', 'file_name', ',', "'wb'", ')', 'as', 'fp', ':', 'for', 'voxel', 'in', 'voxel_grid', ':', 'fp', '.', 'write', '(', 'struct', '.', 'pack', '(', '"<I"', ',', 'voxel', ')', ')', 'except', 'IOError', 'as', 'e', ':', 'print', '(', '"An error occurred: {}"', '.', 'format', '(', 'e', '.', 'args', '[', '-', '1', ']', ')', ')', 'raise', 'e', 'except', 'Exception', ':', 'raise']
Saves binary voxel grid as a binary file. The binary file is structured in little-endian unsigned int format. :param voxel_grid: binary voxel grid :type voxel_grid: list, tuple :param file_name: file name to save :type file_name: str
['Saves', 'binary', 'voxel', 'grid', 'as', 'a', 'binary', 'file', '.']
train
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/voxelize.py#L89-L107
4,176
bcbio/bcbio-nextgen
bcbio/variation/vcfanno.py
find_annotations
def find_annotations(data, retriever=None): """Find annotation configuration files for vcfanno, using pre-installed inputs. Creates absolute paths for user specified inputs and finds locally installed defaults. Default annotations: - gemini for variant pipelines - somatic for variant tumor pipelines - rnaedit for RNA-seq variant calling """ conf_files = dd.get_vcfanno(data) if not isinstance(conf_files, (list, tuple)): conf_files = [conf_files] for c in _default_conf_files(data, retriever): if c not in conf_files: conf_files.append(c) conf_checkers = {"gemini": annotate_gemini, "somatic": _annotate_somatic} out = [] annodir = os.path.normpath(os.path.join(os.path.dirname(dd.get_ref_file(data)), os.pardir, "config", "vcfanno")) if not retriever: annodir = os.path.abspath(annodir) for conf_file in conf_files: if objectstore.is_remote(conf_file) or (os.path.exists(conf_file) and os.path.isfile(conf_file)): conffn = conf_file elif not retriever: conffn = os.path.join(annodir, conf_file + ".conf") else: conffn = conf_file + ".conf" luafn = "%s.lua" % utils.splitext_plus(conffn)[0] if retriever: conffn, luafn = [(x if objectstore.is_remote(x) else None) for x in retriever.add_remotes([conffn, luafn], data["config"])] if not conffn: pass elif conf_file in conf_checkers and not conf_checkers[conf_file](data, retriever): logger.warn("Skipping vcfanno configuration: %s. Not all input files found." % conf_file) elif not objectstore.file_exists_or_remote(conffn): build = dd.get_genome_build(data) CONF_NOT_FOUND = ( "The vcfanno configuration {conffn} was not found for {build}, skipping.") logger.warn(CONF_NOT_FOUND.format(**locals())) else: out.append(conffn) if luafn and objectstore.file_exists_or_remote(luafn): out.append(luafn) return out
python
def find_annotations(data, retriever=None): """Find annotation configuration files for vcfanno, using pre-installed inputs. Creates absolute paths for user specified inputs and finds locally installed defaults. Default annotations: - gemini for variant pipelines - somatic for variant tumor pipelines - rnaedit for RNA-seq variant calling """ conf_files = dd.get_vcfanno(data) if not isinstance(conf_files, (list, tuple)): conf_files = [conf_files] for c in _default_conf_files(data, retriever): if c not in conf_files: conf_files.append(c) conf_checkers = {"gemini": annotate_gemini, "somatic": _annotate_somatic} out = [] annodir = os.path.normpath(os.path.join(os.path.dirname(dd.get_ref_file(data)), os.pardir, "config", "vcfanno")) if not retriever: annodir = os.path.abspath(annodir) for conf_file in conf_files: if objectstore.is_remote(conf_file) or (os.path.exists(conf_file) and os.path.isfile(conf_file)): conffn = conf_file elif not retriever: conffn = os.path.join(annodir, conf_file + ".conf") else: conffn = conf_file + ".conf" luafn = "%s.lua" % utils.splitext_plus(conffn)[0] if retriever: conffn, luafn = [(x if objectstore.is_remote(x) else None) for x in retriever.add_remotes([conffn, luafn], data["config"])] if not conffn: pass elif conf_file in conf_checkers and not conf_checkers[conf_file](data, retriever): logger.warn("Skipping vcfanno configuration: %s. Not all input files found." % conf_file) elif not objectstore.file_exists_or_remote(conffn): build = dd.get_genome_build(data) CONF_NOT_FOUND = ( "The vcfanno configuration {conffn} was not found for {build}, skipping.") logger.warn(CONF_NOT_FOUND.format(**locals())) else: out.append(conffn) if luafn and objectstore.file_exists_or_remote(luafn): out.append(luafn) return out
['def', 'find_annotations', '(', 'data', ',', 'retriever', '=', 'None', ')', ':', 'conf_files', '=', 'dd', '.', 'get_vcfanno', '(', 'data', ')', 'if', 'not', 'isinstance', '(', 'conf_files', ',', '(', 'list', ',', 'tuple', ')', ')', ':', 'conf_files', '=', '[', 'conf_files', ']', 'for', 'c', 'in', '_default_conf_files', '(', 'data', ',', 'retriever', ')', ':', 'if', 'c', 'not', 'in', 'conf_files', ':', 'conf_files', '.', 'append', '(', 'c', ')', 'conf_checkers', '=', '{', '"gemini"', ':', 'annotate_gemini', ',', '"somatic"', ':', '_annotate_somatic', '}', 'out', '=', '[', ']', 'annodir', '=', 'os', '.', 'path', '.', 'normpath', '(', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'path', '.', 'dirname', '(', 'dd', '.', 'get_ref_file', '(', 'data', ')', ')', ',', 'os', '.', 'pardir', ',', '"config"', ',', '"vcfanno"', ')', ')', 'if', 'not', 'retriever', ':', 'annodir', '=', 'os', '.', 'path', '.', 'abspath', '(', 'annodir', ')', 'for', 'conf_file', 'in', 'conf_files', ':', 'if', 'objectstore', '.', 'is_remote', '(', 'conf_file', ')', 'or', '(', 'os', '.', 'path', '.', 'exists', '(', 'conf_file', ')', 'and', 'os', '.', 'path', '.', 'isfile', '(', 'conf_file', ')', ')', ':', 'conffn', '=', 'conf_file', 'elif', 'not', 'retriever', ':', 'conffn', '=', 'os', '.', 'path', '.', 'join', '(', 'annodir', ',', 'conf_file', '+', '".conf"', ')', 'else', ':', 'conffn', '=', 'conf_file', '+', '".conf"', 'luafn', '=', '"%s.lua"', '%', 'utils', '.', 'splitext_plus', '(', 'conffn', ')', '[', '0', ']', 'if', 'retriever', ':', 'conffn', ',', 'luafn', '=', '[', '(', 'x', 'if', 'objectstore', '.', 'is_remote', '(', 'x', ')', 'else', 'None', ')', 'for', 'x', 'in', 'retriever', '.', 'add_remotes', '(', '[', 'conffn', ',', 'luafn', ']', ',', 'data', '[', '"config"', ']', ')', ']', 'if', 'not', 'conffn', ':', 'pass', 'elif', 'conf_file', 'in', 'conf_checkers', 'and', 'not', 'conf_checkers', '[', 'conf_file', ']', '(', 'data', ',', 'retriever', ')', ':', 'logger', '.', 'warn', '(', '"Skipping vcfanno configuration: %s. Not all input files found."', '%', 'conf_file', ')', 'elif', 'not', 'objectstore', '.', 'file_exists_or_remote', '(', 'conffn', ')', ':', 'build', '=', 'dd', '.', 'get_genome_build', '(', 'data', ')', 'CONF_NOT_FOUND', '=', '(', '"The vcfanno configuration {conffn} was not found for {build}, skipping."', ')', 'logger', '.', 'warn', '(', 'CONF_NOT_FOUND', '.', 'format', '(', '*', '*', 'locals', '(', ')', ')', ')', 'else', ':', 'out', '.', 'append', '(', 'conffn', ')', 'if', 'luafn', 'and', 'objectstore', '.', 'file_exists_or_remote', '(', 'luafn', ')', ':', 'out', '.', 'append', '(', 'luafn', ')', 'return', 'out']
Find annotation configuration files for vcfanno, using pre-installed inputs. Creates absolute paths for user specified inputs and finds locally installed defaults. Default annotations: - gemini for variant pipelines - somatic for variant tumor pipelines - rnaedit for RNA-seq variant calling
['Find', 'annotation', 'configuration', 'files', 'for', 'vcfanno', 'using', 'pre', '-', 'installed', 'inputs', '.']
train
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfanno.py#L91-L137
4,177
vsoch/pokemon
pokemon/skills.py
get_ascii
def get_ascii(pid=None, name=None, pokemons=None, return_pokemons=False, message=None): '''get_ascii will return ascii art for a pokemon based on a name or pid. :param pid: the pokemon ID to return :param name: the pokemon name to return :param return_pokemons: return catches (default False) :param message: add a message to the ascii ''' pokemon = get_pokemon(name=name,pid=pid,pokemons=pokemons) printme = message if len(pokemon) > 0: for pid,data in pokemon.items(): if message == None: printme = data["name"].capitalize() print("%s\n\n%s" % (data['ascii'],printme)) if return_pokemons == True: return pokemon
python
def get_ascii(pid=None, name=None, pokemons=None, return_pokemons=False, message=None): '''get_ascii will return ascii art for a pokemon based on a name or pid. :param pid: the pokemon ID to return :param name: the pokemon name to return :param return_pokemons: return catches (default False) :param message: add a message to the ascii ''' pokemon = get_pokemon(name=name,pid=pid,pokemons=pokemons) printme = message if len(pokemon) > 0: for pid,data in pokemon.items(): if message == None: printme = data["name"].capitalize() print("%s\n\n%s" % (data['ascii'],printme)) if return_pokemons == True: return pokemon
['def', 'get_ascii', '(', 'pid', '=', 'None', ',', 'name', '=', 'None', ',', 'pokemons', '=', 'None', ',', 'return_pokemons', '=', 'False', ',', 'message', '=', 'None', ')', ':', 'pokemon', '=', 'get_pokemon', '(', 'name', '=', 'name', ',', 'pid', '=', 'pid', ',', 'pokemons', '=', 'pokemons', ')', 'printme', '=', 'message', 'if', 'len', '(', 'pokemon', ')', '>', '0', ':', 'for', 'pid', ',', 'data', 'in', 'pokemon', '.', 'items', '(', ')', ':', 'if', 'message', '==', 'None', ':', 'printme', '=', 'data', '[', '"name"', ']', '.', 'capitalize', '(', ')', 'print', '(', '"%s\\n\\n%s"', '%', '(', 'data', '[', "'ascii'", ']', ',', 'printme', ')', ')', 'if', 'return_pokemons', '==', 'True', ':', 'return', 'pokemon']
get_ascii will return ascii art for a pokemon based on a name or pid. :param pid: the pokemon ID to return :param name: the pokemon name to return :param return_pokemons: return catches (default False) :param message: add a message to the ascii
['get_ascii', 'will', 'return', 'ascii', 'art', 'for', 'a', 'pokemon', 'based', 'on', 'a', 'name', 'or', 'pid', '.', ':', 'param', 'pid', ':', 'the', 'pokemon', 'ID', 'to', 'return', ':', 'param', 'name', ':', 'the', 'pokemon', 'name', 'to', 'return', ':', 'param', 'return_pokemons', ':', 'return', 'catches', '(', 'default', 'False', ')', ':', 'param', 'message', ':', 'add', 'a', 'message', 'to', 'the', 'ascii']
train
https://github.com/vsoch/pokemon/blob/c9cd8c5d64897617867d38d45183476ea64a0620/pokemon/skills.py#L27-L43
4,178
honzamach/pynspect
pynspect/traversers.py
BaseFilteringTreeTraverser.decorate_function
def decorate_function(self, name, decorator): """ Decorate function with given name with given decorator. :param str name: Name of the function. :param callable decorator: Decorator callback. """ self.functions[name] = decorator(self.functions[name])
python
def decorate_function(self, name, decorator): """ Decorate function with given name with given decorator. :param str name: Name of the function. :param callable decorator: Decorator callback. """ self.functions[name] = decorator(self.functions[name])
['def', 'decorate_function', '(', 'self', ',', 'name', ',', 'decorator', ')', ':', 'self', '.', 'functions', '[', 'name', ']', '=', 'decorator', '(', 'self', '.', 'functions', '[', 'name', ']', ')']
Decorate function with given name with given decorator. :param str name: Name of the function. :param callable decorator: Decorator callback.
['Decorate', 'function', 'with', 'given', 'name', 'with', 'given', 'decorator', '.']
train
https://github.com/honzamach/pynspect/blob/0582dcc1f7aafe50e25a21c792ea1b3367ea5881/pynspect/traversers.py#L753-L760
4,179
ic-labs/django-icekit
icekit_events/models.py
get_occurrence_times_for_event
def get_occurrence_times_for_event(event): """ Return a tuple with two sets containing the (start, end) *naive* datetimes of an Event's Occurrences, or the original start datetime if an Occurrence's start was modified by a user. """ occurrences_starts = set() occurrences_ends = set() for o in event.occurrence_list: occurrences_starts.add( coerce_naive(o.original_start or o.start) ) occurrences_ends.add( coerce_naive(o.original_end or o.end) ) return occurrences_starts, occurrences_ends
python
def get_occurrence_times_for_event(event): """ Return a tuple with two sets containing the (start, end) *naive* datetimes of an Event's Occurrences, or the original start datetime if an Occurrence's start was modified by a user. """ occurrences_starts = set() occurrences_ends = set() for o in event.occurrence_list: occurrences_starts.add( coerce_naive(o.original_start or o.start) ) occurrences_ends.add( coerce_naive(o.original_end or o.end) ) return occurrences_starts, occurrences_ends
['def', 'get_occurrence_times_for_event', '(', 'event', ')', ':', 'occurrences_starts', '=', 'set', '(', ')', 'occurrences_ends', '=', 'set', '(', ')', 'for', 'o', 'in', 'event', '.', 'occurrence_list', ':', 'occurrences_starts', '.', 'add', '(', 'coerce_naive', '(', 'o', '.', 'original_start', 'or', 'o', '.', 'start', ')', ')', 'occurrences_ends', '.', 'add', '(', 'coerce_naive', '(', 'o', '.', 'original_end', 'or', 'o', '.', 'end', ')', ')', 'return', 'occurrences_starts', ',', 'occurrences_ends']
Return a tuple with two sets containing the (start, end) *naive* datetimes of an Event's Occurrences, or the original start datetime if an Occurrence's start was modified by a user.
['Return', 'a', 'tuple', 'with', 'two', 'sets', 'containing', 'the', '(', 'start', 'end', ')', '*', 'naive', '*', 'datetimes', 'of', 'an', 'Event', 's', 'Occurrences', 'or', 'the', 'original', 'start', 'datetime', 'if', 'an', 'Occurrence', 's', 'start', 'was', 'modified', 'by', 'a', 'user', '.']
train
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit_events/models.py#L933-L948
4,180
F5Networks/f5-common-python
f5/utils/responses/handlers.py
Stats._get_nest_stats
def _get_nest_stats(self): """Helper method to deal with nestedStats as json format changed in v12.x """ for x in self.rdict: check = urlparse(x) if check.scheme: nested_dict = self.rdict[x]['nestedStats'] tmp_dict = nested_dict['entries'] return self._key_dot_replace(tmp_dict) return self._key_dot_replace(self.rdict)
python
def _get_nest_stats(self): """Helper method to deal with nestedStats as json format changed in v12.x """ for x in self.rdict: check = urlparse(x) if check.scheme: nested_dict = self.rdict[x]['nestedStats'] tmp_dict = nested_dict['entries'] return self._key_dot_replace(tmp_dict) return self._key_dot_replace(self.rdict)
['def', '_get_nest_stats', '(', 'self', ')', ':', 'for', 'x', 'in', 'self', '.', 'rdict', ':', 'check', '=', 'urlparse', '(', 'x', ')', 'if', 'check', '.', 'scheme', ':', 'nested_dict', '=', 'self', '.', 'rdict', '[', 'x', ']', '[', "'nestedStats'", ']', 'tmp_dict', '=', 'nested_dict', '[', "'entries'", ']', 'return', 'self', '.', '_key_dot_replace', '(', 'tmp_dict', ')', 'return', 'self', '.', '_key_dot_replace', '(', 'self', '.', 'rdict', ')']
Helper method to deal with nestedStats as json format changed in v12.x
['Helper', 'method', 'to', 'deal', 'with', 'nestedStats']
train
https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5/utils/responses/handlers.py#L52-L64
4,181
rigetti/pyquil
pyquil/api/_benchmark.py
BenchmarkConnection.generate_rb_sequence
def generate_rb_sequence(self, depth, gateset, seed=None, interleaver=None): """ Construct a randomized benchmarking experiment on the given qubits, decomposing into gateset. If interleaver is not provided, the returned sequence will have the form C_1 C_2 ... C_(depth-1) C_inv , where each C is a Clifford element drawn from gateset, C_{< depth} are randomly selected, and C_inv is selected so that the entire sequence composes to the identity. If an interleaver G (which must be a Clifford, and which will be decomposed into the native gateset) is provided, then the sequence instead takes the form C_1 G C_2 G ... C_(depth-1) G C_inv . The JSON response is a list of lists of indices, or Nones. In the former case, they are the index of the gate in the gateset. :param int depth: The number of Clifford gates to include in the randomized benchmarking experiment. This is different than the number of gates in the resulting experiment. :param list gateset: A list of pyquil gates to decompose the Clifford elements into. These must generate the clifford group on the qubits of interest. e.g. for one qubit [RZ(np.pi/2), RX(np.pi/2)]. :param seed: A positive integer used to seed the PRNG. :param interleaver: A Program object that encodes a Clifford element. :return: A list of pyquil programs. Each pyquil program is a circuit that represents an element of the Clifford group. When these programs are composed, the resulting Program will be the randomized benchmarking experiment of the desired depth. e.g. if the return programs are called cliffords then `sum(cliffords, Program())` will give the randomized benchmarking experiment, which will compose to the identity program. """ # Support QubitPlaceholders: we temporarily index to arbitrary integers. # `generate_rb_sequence` handles mapping back to the original gateset gates. gateset_as_program = address_qubits(sum(gateset, Program())) qubits = len(gateset_as_program.get_qubits()) gateset_for_api = gateset_as_program.out().splitlines() if interleaver: assert(isinstance(interleaver, Program)) interleaver = interleaver.out() depth = int(depth) # needs to be jsonable, no np.int64 please! payload = RandomizedBenchmarkingRequest(depth=depth, qubits=qubits, gateset=gateset_for_api, seed=seed, interleaver=interleaver) response = self.client.call('generate_rb_sequence', payload) # type: RandomizedBenchmarkingResponse programs = [] for clifford in response.sequence: clifford_program = Program() # Like below, we reversed the order because the API currently hands back the Clifford # decomposition right-to-left. for index in reversed(clifford): clifford_program.inst(gateset[index]) programs.append(clifford_program) # The programs are returned in "textbook style" right-to-left order. To compose them into # the correct pyquil program, we reverse the order. return list(reversed(programs))
python
def generate_rb_sequence(self, depth, gateset, seed=None, interleaver=None): """ Construct a randomized benchmarking experiment on the given qubits, decomposing into gateset. If interleaver is not provided, the returned sequence will have the form C_1 C_2 ... C_(depth-1) C_inv , where each C is a Clifford element drawn from gateset, C_{< depth} are randomly selected, and C_inv is selected so that the entire sequence composes to the identity. If an interleaver G (which must be a Clifford, and which will be decomposed into the native gateset) is provided, then the sequence instead takes the form C_1 G C_2 G ... C_(depth-1) G C_inv . The JSON response is a list of lists of indices, or Nones. In the former case, they are the index of the gate in the gateset. :param int depth: The number of Clifford gates to include in the randomized benchmarking experiment. This is different than the number of gates in the resulting experiment. :param list gateset: A list of pyquil gates to decompose the Clifford elements into. These must generate the clifford group on the qubits of interest. e.g. for one qubit [RZ(np.pi/2), RX(np.pi/2)]. :param seed: A positive integer used to seed the PRNG. :param interleaver: A Program object that encodes a Clifford element. :return: A list of pyquil programs. Each pyquil program is a circuit that represents an element of the Clifford group. When these programs are composed, the resulting Program will be the randomized benchmarking experiment of the desired depth. e.g. if the return programs are called cliffords then `sum(cliffords, Program())` will give the randomized benchmarking experiment, which will compose to the identity program. """ # Support QubitPlaceholders: we temporarily index to arbitrary integers. # `generate_rb_sequence` handles mapping back to the original gateset gates. gateset_as_program = address_qubits(sum(gateset, Program())) qubits = len(gateset_as_program.get_qubits()) gateset_for_api = gateset_as_program.out().splitlines() if interleaver: assert(isinstance(interleaver, Program)) interleaver = interleaver.out() depth = int(depth) # needs to be jsonable, no np.int64 please! payload = RandomizedBenchmarkingRequest(depth=depth, qubits=qubits, gateset=gateset_for_api, seed=seed, interleaver=interleaver) response = self.client.call('generate_rb_sequence', payload) # type: RandomizedBenchmarkingResponse programs = [] for clifford in response.sequence: clifford_program = Program() # Like below, we reversed the order because the API currently hands back the Clifford # decomposition right-to-left. for index in reversed(clifford): clifford_program.inst(gateset[index]) programs.append(clifford_program) # The programs are returned in "textbook style" right-to-left order. To compose them into # the correct pyquil program, we reverse the order. return list(reversed(programs))
['def', 'generate_rb_sequence', '(', 'self', ',', 'depth', ',', 'gateset', ',', 'seed', '=', 'None', ',', 'interleaver', '=', 'None', ')', ':', '# Support QubitPlaceholders: we temporarily index to arbitrary integers.', '# `generate_rb_sequence` handles mapping back to the original gateset gates.', 'gateset_as_program', '=', 'address_qubits', '(', 'sum', '(', 'gateset', ',', 'Program', '(', ')', ')', ')', 'qubits', '=', 'len', '(', 'gateset_as_program', '.', 'get_qubits', '(', ')', ')', 'gateset_for_api', '=', 'gateset_as_program', '.', 'out', '(', ')', '.', 'splitlines', '(', ')', 'if', 'interleaver', ':', 'assert', '(', 'isinstance', '(', 'interleaver', ',', 'Program', ')', ')', 'interleaver', '=', 'interleaver', '.', 'out', '(', ')', 'depth', '=', 'int', '(', 'depth', ')', '# needs to be jsonable, no np.int64 please!', 'payload', '=', 'RandomizedBenchmarkingRequest', '(', 'depth', '=', 'depth', ',', 'qubits', '=', 'qubits', ',', 'gateset', '=', 'gateset_for_api', ',', 'seed', '=', 'seed', ',', 'interleaver', '=', 'interleaver', ')', 'response', '=', 'self', '.', 'client', '.', 'call', '(', "'generate_rb_sequence'", ',', 'payload', ')', '# type: RandomizedBenchmarkingResponse', 'programs', '=', '[', ']', 'for', 'clifford', 'in', 'response', '.', 'sequence', ':', 'clifford_program', '=', 'Program', '(', ')', '# Like below, we reversed the order because the API currently hands back the Clifford', '# decomposition right-to-left.', 'for', 'index', 'in', 'reversed', '(', 'clifford', ')', ':', 'clifford_program', '.', 'inst', '(', 'gateset', '[', 'index', ']', ')', 'programs', '.', 'append', '(', 'clifford_program', ')', '# The programs are returned in "textbook style" right-to-left order. To compose them into', '# the correct pyquil program, we reverse the order.', 'return', 'list', '(', 'reversed', '(', 'programs', ')', ')']
Construct a randomized benchmarking experiment on the given qubits, decomposing into gateset. If interleaver is not provided, the returned sequence will have the form C_1 C_2 ... C_(depth-1) C_inv , where each C is a Clifford element drawn from gateset, C_{< depth} are randomly selected, and C_inv is selected so that the entire sequence composes to the identity. If an interleaver G (which must be a Clifford, and which will be decomposed into the native gateset) is provided, then the sequence instead takes the form C_1 G C_2 G ... C_(depth-1) G C_inv . The JSON response is a list of lists of indices, or Nones. In the former case, they are the index of the gate in the gateset. :param int depth: The number of Clifford gates to include in the randomized benchmarking experiment. This is different than the number of gates in the resulting experiment. :param list gateset: A list of pyquil gates to decompose the Clifford elements into. These must generate the clifford group on the qubits of interest. e.g. for one qubit [RZ(np.pi/2), RX(np.pi/2)]. :param seed: A positive integer used to seed the PRNG. :param interleaver: A Program object that encodes a Clifford element. :return: A list of pyquil programs. Each pyquil program is a circuit that represents an element of the Clifford group. When these programs are composed, the resulting Program will be the randomized benchmarking experiment of the desired depth. e.g. if the return programs are called cliffords then `sum(cliffords, Program())` will give the randomized benchmarking experiment, which will compose to the identity program.
['Construct', 'a', 'randomized', 'benchmarking', 'experiment', 'on', 'the', 'given', 'qubits', 'decomposing', 'into', 'gateset', '.', 'If', 'interleaver', 'is', 'not', 'provided', 'the', 'returned', 'sequence', 'will', 'have', 'the', 'form']
train
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/api/_benchmark.py#L82-L141
4,182
dls-controls/pymalcolm
malcolm/core/views.py
Attribute.put_value
def put_value(self, value, timeout=None): """Put a value to the Attribute and wait for completion""" self._context.put(self._data.path + ["value"], value, timeout=timeout)
python
def put_value(self, value, timeout=None): """Put a value to the Attribute and wait for completion""" self._context.put(self._data.path + ["value"], value, timeout=timeout)
['def', 'put_value', '(', 'self', ',', 'value', ',', 'timeout', '=', 'None', ')', ':', 'self', '.', '_context', '.', 'put', '(', 'self', '.', '_data', '.', 'path', '+', '[', '"value"', ']', ',', 'value', ',', 'timeout', '=', 'timeout', ')']
Put a value to the Attribute and wait for completion
['Put', 'a', 'value', 'to', 'the', 'Attribute', 'and', 'wait', 'for', 'completion']
train
https://github.com/dls-controls/pymalcolm/blob/80ea667e4da26365a6cebc0249f52fdc744bd983/malcolm/core/views.py#L78-L80
4,183
evhub/coconut
coconut/compiler/matching.py
Matcher.duplicate
def duplicate(self): """Duplicates the matcher to others.""" other = Matcher(self.loc, self.check_var, self.checkdefs, self.names, self.var_index) other.insert_check(0, "not " + self.check_var) self.others.append(other) return other
python
def duplicate(self): """Duplicates the matcher to others.""" other = Matcher(self.loc, self.check_var, self.checkdefs, self.names, self.var_index) other.insert_check(0, "not " + self.check_var) self.others.append(other) return other
['def', 'duplicate', '(', 'self', ')', ':', 'other', '=', 'Matcher', '(', 'self', '.', 'loc', ',', 'self', '.', 'check_var', ',', 'self', '.', 'checkdefs', ',', 'self', '.', 'names', ',', 'self', '.', 'var_index', ')', 'other', '.', 'insert_check', '(', '0', ',', '"not "', '+', 'self', '.', 'check_var', ')', 'self', '.', 'others', '.', 'append', '(', 'other', ')', 'return', 'other']
Duplicates the matcher to others.
['Duplicates', 'the', 'matcher', 'to', 'others', '.']
train
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/matching.py#L120-L125
4,184
BD2KGenomics/toil-scripts
src/toil_scripts/bwa_alignment/bwa_alignment.py
main
def main(): """ Computational Genomics Lab, Genomics Institute, UC Santa Cruz Toil BWA pipeline Alignment of fastq reads via BWA-kit General usage: 1. Type "toil-bwa generate" to create an editable manifest and config in the current working directory. 2. Parameterize the pipeline by editing the config. 3. Fill in the manifest with information pertaining to your samples. 4. Type "toil-bwa run [jobStore]" to execute the pipeline. Please read the README.md located in the source directory or at: https://github.com/BD2KGenomics/toil-scripts/tree/master/src/toil_scripts/bwa_alignment Structure of the BWA pipeline (per sample) 0 --> 1 0 = Download sample 1 = Run BWA-kit =================================================================== :Dependencies: cURL: apt-get install curl Toil: pip install toil Docker: wget -qO- https://get.docker.com/ | sh Optional: S3AM: pip install --s3am (requires ~/.boto config file) Boto: pip install boto """ # Define Parser object and add to Toil parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawTextHelpFormatter) subparsers = parser.add_subparsers(dest='command') # Generate subparsers subparsers.add_parser('generate-config', help='Generates an editable config in the current working directory.') subparsers.add_parser('generate-manifest', help='Generates an editable manifest in the current working directory.') subparsers.add_parser('generate', help='Generates a config and manifest in the current working directory.') # Run subparser parser_run = subparsers.add_parser('run', help='Runs the BWA alignment pipeline') group = parser_run.add_mutually_exclusive_group() parser_run.add_argument('--config', default='config-toil-bwa.yaml', type=str, help='Path to the (filled in) config file, generated with "generate-config".') group.add_argument('--manifest', default='manifest-toil-bwa.tsv', type=str, help='Path to the (filled in) manifest file, generated with "generate-manifest". ' '\nDefault value: "%(default)s".') group.add_argument('--sample', nargs='+', action=required_length(2, 3), help='Space delimited sample UUID and fastq files in the format: uuid url1 [url2].') # Print docstring help if no arguments provided if len(sys.argv) == 1: parser.print_help() sys.exit(1) Job.Runner.addToilOptions(parser_run) args = parser.parse_args() # Parse subparsers related to generation of config and manifest cwd = os.getcwd() if args.command == 'generate-config' or args.command == 'generate': generate_file(os.path.join(cwd, 'config-toil-bwa.yaml'), generate_config) if args.command == 'generate-manifest' or args.command == 'generate': generate_file(os.path.join(cwd, 'manifest-toil-bwa.tsv'), generate_manifest) # Pipeline execution elif args.command == 'run': require(os.path.exists(args.config), '{} not found. Please run generate-config'.format(args.config)) if not args.sample: args.sample = None require(os.path.exists(args.manifest), '{} not found and no sample provided. ' 'Please run "generate-manifest"'.format(args.manifest)) # Parse config parsed_config = {x.replace('-', '_'): y for x, y in yaml.load(open(args.config).read()).iteritems()} config = argparse.Namespace(**parsed_config) config.maxCores = int(args.maxCores) if args.maxCores else sys.maxint samples = [args.sample[0], args.sample[1:]] if args.sample else parse_manifest(args.manifest) # Sanity checks require(config.ref, 'Missing URL for reference file: {}'.format(config.ref)) require(config.output_dir, 'No output location specified: {}'.format(config.output_dir)) # Launch Pipeline Job.Runner.startToil(Job.wrapJobFn(download_reference_files, config, samples), args)
python
def main(): """ Computational Genomics Lab, Genomics Institute, UC Santa Cruz Toil BWA pipeline Alignment of fastq reads via BWA-kit General usage: 1. Type "toil-bwa generate" to create an editable manifest and config in the current working directory. 2. Parameterize the pipeline by editing the config. 3. Fill in the manifest with information pertaining to your samples. 4. Type "toil-bwa run [jobStore]" to execute the pipeline. Please read the README.md located in the source directory or at: https://github.com/BD2KGenomics/toil-scripts/tree/master/src/toil_scripts/bwa_alignment Structure of the BWA pipeline (per sample) 0 --> 1 0 = Download sample 1 = Run BWA-kit =================================================================== :Dependencies: cURL: apt-get install curl Toil: pip install toil Docker: wget -qO- https://get.docker.com/ | sh Optional: S3AM: pip install --s3am (requires ~/.boto config file) Boto: pip install boto """ # Define Parser object and add to Toil parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawTextHelpFormatter) subparsers = parser.add_subparsers(dest='command') # Generate subparsers subparsers.add_parser('generate-config', help='Generates an editable config in the current working directory.') subparsers.add_parser('generate-manifest', help='Generates an editable manifest in the current working directory.') subparsers.add_parser('generate', help='Generates a config and manifest in the current working directory.') # Run subparser parser_run = subparsers.add_parser('run', help='Runs the BWA alignment pipeline') group = parser_run.add_mutually_exclusive_group() parser_run.add_argument('--config', default='config-toil-bwa.yaml', type=str, help='Path to the (filled in) config file, generated with "generate-config".') group.add_argument('--manifest', default='manifest-toil-bwa.tsv', type=str, help='Path to the (filled in) manifest file, generated with "generate-manifest". ' '\nDefault value: "%(default)s".') group.add_argument('--sample', nargs='+', action=required_length(2, 3), help='Space delimited sample UUID and fastq files in the format: uuid url1 [url2].') # Print docstring help if no arguments provided if len(sys.argv) == 1: parser.print_help() sys.exit(1) Job.Runner.addToilOptions(parser_run) args = parser.parse_args() # Parse subparsers related to generation of config and manifest cwd = os.getcwd() if args.command == 'generate-config' or args.command == 'generate': generate_file(os.path.join(cwd, 'config-toil-bwa.yaml'), generate_config) if args.command == 'generate-manifest' or args.command == 'generate': generate_file(os.path.join(cwd, 'manifest-toil-bwa.tsv'), generate_manifest) # Pipeline execution elif args.command == 'run': require(os.path.exists(args.config), '{} not found. Please run generate-config'.format(args.config)) if not args.sample: args.sample = None require(os.path.exists(args.manifest), '{} not found and no sample provided. ' 'Please run "generate-manifest"'.format(args.manifest)) # Parse config parsed_config = {x.replace('-', '_'): y for x, y in yaml.load(open(args.config).read()).iteritems()} config = argparse.Namespace(**parsed_config) config.maxCores = int(args.maxCores) if args.maxCores else sys.maxint samples = [args.sample[0], args.sample[1:]] if args.sample else parse_manifest(args.manifest) # Sanity checks require(config.ref, 'Missing URL for reference file: {}'.format(config.ref)) require(config.output_dir, 'No output location specified: {}'.format(config.output_dir)) # Launch Pipeline Job.Runner.startToil(Job.wrapJobFn(download_reference_files, config, samples), args)
['def', 'main', '(', ')', ':', '# Define Parser object and add to Toil', 'parser', '=', 'argparse', '.', 'ArgumentParser', '(', 'description', '=', 'main', '.', '__doc__', ',', 'formatter_class', '=', 'argparse', '.', 'RawTextHelpFormatter', ')', 'subparsers', '=', 'parser', '.', 'add_subparsers', '(', 'dest', '=', "'command'", ')', '# Generate subparsers', 'subparsers', '.', 'add_parser', '(', "'generate-config'", ',', 'help', '=', "'Generates an editable config in the current working directory.'", ')', 'subparsers', '.', 'add_parser', '(', "'generate-manifest'", ',', 'help', '=', "'Generates an editable manifest in the current working directory.'", ')', 'subparsers', '.', 'add_parser', '(', "'generate'", ',', 'help', '=', "'Generates a config and manifest in the current working directory.'", ')', '# Run subparser', 'parser_run', '=', 'subparsers', '.', 'add_parser', '(', "'run'", ',', 'help', '=', "'Runs the BWA alignment pipeline'", ')', 'group', '=', 'parser_run', '.', 'add_mutually_exclusive_group', '(', ')', 'parser_run', '.', 'add_argument', '(', "'--config'", ',', 'default', '=', "'config-toil-bwa.yaml'", ',', 'type', '=', 'str', ',', 'help', '=', '\'Path to the (filled in) config file, generated with "generate-config".\'', ')', 'group', '.', 'add_argument', '(', "'--manifest'", ',', 'default', '=', "'manifest-toil-bwa.tsv'", ',', 'type', '=', 'str', ',', 'help', '=', '\'Path to the (filled in) manifest file, generated with "generate-manifest". \'', '\'\\nDefault value: "%(default)s".\'', ')', 'group', '.', 'add_argument', '(', "'--sample'", ',', 'nargs', '=', "'+'", ',', 'action', '=', 'required_length', '(', '2', ',', '3', ')', ',', 'help', '=', "'Space delimited sample UUID and fastq files in the format: uuid url1 [url2].'", ')', '# Print docstring help if no arguments provided', 'if', 'len', '(', 'sys', '.', 'argv', ')', '==', '1', ':', 'parser', '.', 'print_help', '(', ')', 'sys', '.', 'exit', '(', '1', ')', 'Job', '.', 'Runner', '.', 'addToilOptions', '(', 'parser_run', ')', 'args', '=', 'parser', '.', 'parse_args', '(', ')', '# Parse subparsers related to generation of config and manifest', 'cwd', '=', 'os', '.', 'getcwd', '(', ')', 'if', 'args', '.', 'command', '==', "'generate-config'", 'or', 'args', '.', 'command', '==', "'generate'", ':', 'generate_file', '(', 'os', '.', 'path', '.', 'join', '(', 'cwd', ',', "'config-toil-bwa.yaml'", ')', ',', 'generate_config', ')', 'if', 'args', '.', 'command', '==', "'generate-manifest'", 'or', 'args', '.', 'command', '==', "'generate'", ':', 'generate_file', '(', 'os', '.', 'path', '.', 'join', '(', 'cwd', ',', "'manifest-toil-bwa.tsv'", ')', ',', 'generate_manifest', ')', '# Pipeline execution', 'elif', 'args', '.', 'command', '==', "'run'", ':', 'require', '(', 'os', '.', 'path', '.', 'exists', '(', 'args', '.', 'config', ')', ',', "'{} not found. Please run generate-config'", '.', 'format', '(', 'args', '.', 'config', ')', ')', 'if', 'not', 'args', '.', 'sample', ':', 'args', '.', 'sample', '=', 'None', 'require', '(', 'os', '.', 'path', '.', 'exists', '(', 'args', '.', 'manifest', ')', ',', "'{} not found and no sample provided. '", '\'Please run "generate-manifest"\'', '.', 'format', '(', 'args', '.', 'manifest', ')', ')', '# Parse config', 'parsed_config', '=', '{', 'x', '.', 'replace', '(', "'-'", ',', "'_'", ')', ':', 'y', 'for', 'x', ',', 'y', 'in', 'yaml', '.', 'load', '(', 'open', '(', 'args', '.', 'config', ')', '.', 'read', '(', ')', ')', '.', 'iteritems', '(', ')', '}', 'config', '=', 'argparse', '.', 'Namespace', '(', '*', '*', 'parsed_config', ')', 'config', '.', 'maxCores', '=', 'int', '(', 'args', '.', 'maxCores', ')', 'if', 'args', '.', 'maxCores', 'else', 'sys', '.', 'maxint', 'samples', '=', '[', 'args', '.', 'sample', '[', '0', ']', ',', 'args', '.', 'sample', '[', '1', ':', ']', ']', 'if', 'args', '.', 'sample', 'else', 'parse_manifest', '(', 'args', '.', 'manifest', ')', '# Sanity checks', 'require', '(', 'config', '.', 'ref', ',', "'Missing URL for reference file: {}'", '.', 'format', '(', 'config', '.', 'ref', ')', ')', 'require', '(', 'config', '.', 'output_dir', ',', "'No output location specified: {}'", '.', 'format', '(', 'config', '.', 'output_dir', ')', ')', '# Launch Pipeline', 'Job', '.', 'Runner', '.', 'startToil', '(', 'Job', '.', 'wrapJobFn', '(', 'download_reference_files', ',', 'config', ',', 'samples', ')', ',', 'args', ')']
Computational Genomics Lab, Genomics Institute, UC Santa Cruz Toil BWA pipeline Alignment of fastq reads via BWA-kit General usage: 1. Type "toil-bwa generate" to create an editable manifest and config in the current working directory. 2. Parameterize the pipeline by editing the config. 3. Fill in the manifest with information pertaining to your samples. 4. Type "toil-bwa run [jobStore]" to execute the pipeline. Please read the README.md located in the source directory or at: https://github.com/BD2KGenomics/toil-scripts/tree/master/src/toil_scripts/bwa_alignment Structure of the BWA pipeline (per sample) 0 --> 1 0 = Download sample 1 = Run BWA-kit =================================================================== :Dependencies: cURL: apt-get install curl Toil: pip install toil Docker: wget -qO- https://get.docker.com/ | sh Optional: S3AM: pip install --s3am (requires ~/.boto config file) Boto: pip install boto
['Computational', 'Genomics', 'Lab', 'Genomics', 'Institute', 'UC', 'Santa', 'Cruz', 'Toil', 'BWA', 'pipeline']
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/bwa_alignment/bwa_alignment.py#L215-L292
4,185
Alignak-monitoring/alignak
alignak/external_command.py
ExternalCommandManager.launch_host_event_handler
def launch_host_event_handler(self, host): """Launch event handler for a service Format of the line that triggers function call:: LAUNCH_HOST_EVENT_HANDLER;<host_name> :param host: host to execute the event handler :type host: alignak.objects.host.Host :return: None """ host.get_event_handlers(self.hosts, self.daemon.macromodulations, self.daemon.timeperiods, ext_cmd=True)
python
def launch_host_event_handler(self, host): """Launch event handler for a service Format of the line that triggers function call:: LAUNCH_HOST_EVENT_HANDLER;<host_name> :param host: host to execute the event handler :type host: alignak.objects.host.Host :return: None """ host.get_event_handlers(self.hosts, self.daemon.macromodulations, self.daemon.timeperiods, ext_cmd=True)
['def', 'launch_host_event_handler', '(', 'self', ',', 'host', ')', ':', 'host', '.', 'get_event_handlers', '(', 'self', '.', 'hosts', ',', 'self', '.', 'daemon', '.', 'macromodulations', ',', 'self', '.', 'daemon', '.', 'timeperiods', ',', 'ext_cmd', '=', 'True', ')']
Launch event handler for a service Format of the line that triggers function call:: LAUNCH_HOST_EVENT_HANDLER;<host_name> :param host: host to execute the event handler :type host: alignak.objects.host.Host :return: None
['Launch', 'event', 'handler', 'for', 'a', 'service', 'Format', 'of', 'the', 'line', 'that', 'triggers', 'function', 'call', '::']
train
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L4021-L4032
4,186
mitsei/dlkit
dlkit/json_/relationship/managers.py
RelationshipManager.get_relationship_admin_session_for_family
def get_relationship_admin_session_for_family(self, family_id): """Gets the ``OsidSession`` associated with the relationship administration service for the given family. arg: family_id (osid.id.Id): the ``Id`` of the ``Family`` return: (osid.relationship.RelationshipAdminSession) - a ``RelationshipAdminSession`` raise: NotFound - no family found by the given ``Id`` raise: NullArgument - ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_relationship_admin()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_relationship_admin()`` and ``supports_visible_federation()`` are ``true``* """ if not self.supports_relationship_admin(): raise errors.Unimplemented() ## # Also include check to see if the catalog Id is found otherwise raise errors.NotFound ## # pylint: disable=no-member return sessions.RelationshipAdminSession(family_id, runtime=self._runtime)
python
def get_relationship_admin_session_for_family(self, family_id): """Gets the ``OsidSession`` associated with the relationship administration service for the given family. arg: family_id (osid.id.Id): the ``Id`` of the ``Family`` return: (osid.relationship.RelationshipAdminSession) - a ``RelationshipAdminSession`` raise: NotFound - no family found by the given ``Id`` raise: NullArgument - ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_relationship_admin()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_relationship_admin()`` and ``supports_visible_federation()`` are ``true``* """ if not self.supports_relationship_admin(): raise errors.Unimplemented() ## # Also include check to see if the catalog Id is found otherwise raise errors.NotFound ## # pylint: disable=no-member return sessions.RelationshipAdminSession(family_id, runtime=self._runtime)
['def', 'get_relationship_admin_session_for_family', '(', 'self', ',', 'family_id', ')', ':', 'if', 'not', 'self', '.', 'supports_relationship_admin', '(', ')', ':', 'raise', 'errors', '.', 'Unimplemented', '(', ')', '##', '# Also include check to see if the catalog Id is found otherwise raise errors.NotFound', '##', '# pylint: disable=no-member', 'return', 'sessions', '.', 'RelationshipAdminSession', '(', 'family_id', ',', 'runtime', '=', 'self', '.', '_runtime', ')']
Gets the ``OsidSession`` associated with the relationship administration service for the given family. arg: family_id (osid.id.Id): the ``Id`` of the ``Family`` return: (osid.relationship.RelationshipAdminSession) - a ``RelationshipAdminSession`` raise: NotFound - no family found by the given ``Id`` raise: NullArgument - ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_relationship_admin()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_relationship_admin()`` and ``supports_visible_federation()`` are ``true``*
['Gets', 'the', 'OsidSession', 'associated', 'with', 'the', 'relationship', 'administration', 'service', 'for', 'the', 'given', 'family', '.']
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/relationship/managers.py#L334-L356
4,187
PGower/PyCanvas
pycanvas/apis/groups.py
GroupsAPI.get_single_group_membership_users
def get_single_group_membership_users(self, user_id, group_id): """ Get a single group membership. Returns the group membership with the given membership id or user id. """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id self.logger.debug("GET /api/v1/groups/{group_id}/users/{user_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/groups/{group_id}/users/{user_id}".format(**path), data=data, params=params, single_item=True)
python
def get_single_group_membership_users(self, user_id, group_id): """ Get a single group membership. Returns the group membership with the given membership id or user id. """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id self.logger.debug("GET /api/v1/groups/{group_id}/users/{user_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/groups/{group_id}/users/{user_id}".format(**path), data=data, params=params, single_item=True)
['def', 'get_single_group_membership_users', '(', 'self', ',', 'user_id', ',', 'group_id', ')', ':', 'path', '=', '{', '}', 'data', '=', '{', '}', 'params', '=', '{', '}', '# REQUIRED - PATH - group_id\r', '"""ID"""', 'path', '[', '"group_id"', ']', '=', 'group_id', '# REQUIRED - PATH - user_id\r', '"""ID"""', 'path', '[', '"user_id"', ']', '=', 'user_id', 'self', '.', 'logger', '.', 'debug', '(', '"GET /api/v1/groups/{group_id}/users/{user_id} with query params: {params} and form data: {data}"', '.', 'format', '(', 'params', '=', 'params', ',', 'data', '=', 'data', ',', '*', '*', 'path', ')', ')', 'return', 'self', '.', 'generic_request', '(', '"GET"', ',', '"/api/v1/groups/{group_id}/users/{user_id}"', '.', 'format', '(', '*', '*', 'path', ')', ',', 'data', '=', 'data', ',', 'params', '=', 'params', ',', 'single_item', '=', 'True', ')']
Get a single group membership. Returns the group membership with the given membership id or user id.
['Get', 'a', 'single', 'group', 'membership', '.', 'Returns', 'the', 'group', 'membership', 'with', 'the', 'given', 'membership', 'id', 'or', 'user', 'id', '.']
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/groups.py#L478-L497
4,188
chdzq/ARPAbetAndIPAConvertor
arpabetandipaconvertor/model/syllable.py
Syllable.translate_to_international_phonetic_alphabet
def translate_to_international_phonetic_alphabet(self, hide_stress_mark=False): ''' 转换成国际音标。只要一个元音的时候需要隐藏重音标识 :param hide_stress_mark: :return: ''' translations = self.stress.mark_ipa() if (not hide_stress_mark) and self.have_vowel else "" for phoneme in self._phoneme_list: translations += phoneme.ipa return translations
python
def translate_to_international_phonetic_alphabet(self, hide_stress_mark=False): ''' 转换成国际音标。只要一个元音的时候需要隐藏重音标识 :param hide_stress_mark: :return: ''' translations = self.stress.mark_ipa() if (not hide_stress_mark) and self.have_vowel else "" for phoneme in self._phoneme_list: translations += phoneme.ipa return translations
['def', 'translate_to_international_phonetic_alphabet', '(', 'self', ',', 'hide_stress_mark', '=', 'False', ')', ':', 'translations', '=', 'self', '.', 'stress', '.', 'mark_ipa', '(', ')', 'if', '(', 'not', 'hide_stress_mark', ')', 'and', 'self', '.', 'have_vowel', 'else', '""', 'for', 'phoneme', 'in', 'self', '.', '_phoneme_list', ':', 'translations', '+=', 'phoneme', '.', 'ipa', 'return', 'translations']
转换成国际音标。只要一个元音的时候需要隐藏重音标识 :param hide_stress_mark: :return:
['转换成国际音标。只要一个元音的时候需要隐藏重音标识', ':', 'param', 'hide_stress_mark', ':', ':', 'return', ':']
train
https://github.com/chdzq/ARPAbetAndIPAConvertor/blob/e8b2fdbb5b7134c4f779f4d6dcd5dc30979a0a26/arpabetandipaconvertor/model/syllable.py#L86-L98
4,189
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_run_cwltool
def _run_cwltool(args): """Run with cwltool -- reference implementation. """ main_file, json_file, project_name = _get_main_and_json(args.directory) work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "cwltool_work")) tmp_dir = utils.safe_makedir(os.path.join(work_dir, "tmpcwl")) log_file = os.path.join(work_dir, "%s-cwltool.log" % project_name) os.environ["TMPDIR"] = tmp_dir flags = ["--tmpdir-prefix", tmp_dir, "--tmp-outdir-prefix", tmp_dir] if args.no_container: _remove_bcbiovm_path() flags += ["--no-container", "--preserve-environment", "PATH", "--preserve-environment", "HOME"] cmd = ["cwltool"] + flags + args.toolargs + ["--", main_file, json_file] with utils.chdir(work_dir): _run_tool(cmd, not args.no_container, work_dir, log_file=log_file)
python
def _run_cwltool(args): """Run with cwltool -- reference implementation. """ main_file, json_file, project_name = _get_main_and_json(args.directory) work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "cwltool_work")) tmp_dir = utils.safe_makedir(os.path.join(work_dir, "tmpcwl")) log_file = os.path.join(work_dir, "%s-cwltool.log" % project_name) os.environ["TMPDIR"] = tmp_dir flags = ["--tmpdir-prefix", tmp_dir, "--tmp-outdir-prefix", tmp_dir] if args.no_container: _remove_bcbiovm_path() flags += ["--no-container", "--preserve-environment", "PATH", "--preserve-environment", "HOME"] cmd = ["cwltool"] + flags + args.toolargs + ["--", main_file, json_file] with utils.chdir(work_dir): _run_tool(cmd, not args.no_container, work_dir, log_file=log_file)
['def', '_run_cwltool', '(', 'args', ')', ':', 'main_file', ',', 'json_file', ',', 'project_name', '=', '_get_main_and_json', '(', 'args', '.', 'directory', ')', 'work_dir', '=', 'utils', '.', 'safe_makedir', '(', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'getcwd', '(', ')', ',', '"cwltool_work"', ')', ')', 'tmp_dir', '=', 'utils', '.', 'safe_makedir', '(', 'os', '.', 'path', '.', 'join', '(', 'work_dir', ',', '"tmpcwl"', ')', ')', 'log_file', '=', 'os', '.', 'path', '.', 'join', '(', 'work_dir', ',', '"%s-cwltool.log"', '%', 'project_name', ')', 'os', '.', 'environ', '[', '"TMPDIR"', ']', '=', 'tmp_dir', 'flags', '=', '[', '"--tmpdir-prefix"', ',', 'tmp_dir', ',', '"--tmp-outdir-prefix"', ',', 'tmp_dir', ']', 'if', 'args', '.', 'no_container', ':', '_remove_bcbiovm_path', '(', ')', 'flags', '+=', '[', '"--no-container"', ',', '"--preserve-environment"', ',', '"PATH"', ',', '"--preserve-environment"', ',', '"HOME"', ']', 'cmd', '=', '[', '"cwltool"', ']', '+', 'flags', '+', 'args', '.', 'toolargs', '+', '[', '"--"', ',', 'main_file', ',', 'json_file', ']', 'with', 'utils', '.', 'chdir', '(', 'work_dir', ')', ':', '_run_tool', '(', 'cmd', ',', 'not', 'args', '.', 'no_container', ',', 'work_dir', ',', 'log_file', '=', 'log_file', ')']
Run with cwltool -- reference implementation.
['Run', 'with', 'cwltool', '--', 'reference', 'implementation', '.']
train
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L83-L97
4,190
sckott/habanero
habanero/cn/styles.py
csl_styles
def csl_styles(**kwargs): ''' Get list of styles from https://github.com/citation-style-language/styles :param kwargs: any additional arguments will be passed on to `requests.get` :return: list, of CSL styles Usage:: from habanero import cn cn.csl_styles() ''' base = "https://api.github.com/repos/citation-style-language/styles" tt = requests.get(base + '/commits?per_page=1', **kwargs) tt.raise_for_status() check_json(tt) commres = tt.json() sha = commres[0]['sha'] sty = requests.get(base + "/git/trees/" + sha, **kwargs) sty.raise_for_status() check_json(sty) res = sty.json() files = [ z['path'] for z in res['tree'] ] matches = [ re.search(".csl", g) for g in files ] csls = [ x.string for x in filter(None, matches) ] return [ re.sub(".csl", "", x) for x in csls ]
python
def csl_styles(**kwargs): ''' Get list of styles from https://github.com/citation-style-language/styles :param kwargs: any additional arguments will be passed on to `requests.get` :return: list, of CSL styles Usage:: from habanero import cn cn.csl_styles() ''' base = "https://api.github.com/repos/citation-style-language/styles" tt = requests.get(base + '/commits?per_page=1', **kwargs) tt.raise_for_status() check_json(tt) commres = tt.json() sha = commres[0]['sha'] sty = requests.get(base + "/git/trees/" + sha, **kwargs) sty.raise_for_status() check_json(sty) res = sty.json() files = [ z['path'] for z in res['tree'] ] matches = [ re.search(".csl", g) for g in files ] csls = [ x.string for x in filter(None, matches) ] return [ re.sub(".csl", "", x) for x in csls ]
['def', 'csl_styles', '(', '*', '*', 'kwargs', ')', ':', 'base', '=', '"https://api.github.com/repos/citation-style-language/styles"', 'tt', '=', 'requests', '.', 'get', '(', 'base', '+', "'/commits?per_page=1'", ',', '*', '*', 'kwargs', ')', 'tt', '.', 'raise_for_status', '(', ')', 'check_json', '(', 'tt', ')', 'commres', '=', 'tt', '.', 'json', '(', ')', 'sha', '=', 'commres', '[', '0', ']', '[', "'sha'", ']', 'sty', '=', 'requests', '.', 'get', '(', 'base', '+', '"/git/trees/"', '+', 'sha', ',', '*', '*', 'kwargs', ')', 'sty', '.', 'raise_for_status', '(', ')', 'check_json', '(', 'sty', ')', 'res', '=', 'sty', '.', 'json', '(', ')', 'files', '=', '[', 'z', '[', "'path'", ']', 'for', 'z', 'in', 'res', '[', "'tree'", ']', ']', 'matches', '=', '[', 're', '.', 'search', '(', '".csl"', ',', 'g', ')', 'for', 'g', 'in', 'files', ']', 'csls', '=', '[', 'x', '.', 'string', 'for', 'x', 'in', 'filter', '(', 'None', ',', 'matches', ')', ']', 'return', '[', 're', '.', 'sub', '(', '".csl"', ',', '""', ',', 'x', ')', 'for', 'x', 'in', 'csls', ']']
Get list of styles from https://github.com/citation-style-language/styles :param kwargs: any additional arguments will be passed on to `requests.get` :return: list, of CSL styles Usage:: from habanero import cn cn.csl_styles()
['Get', 'list', 'of', 'styles', 'from', 'https', ':', '//', 'github', '.', 'com', '/', 'citation', '-', 'style', '-', 'language', '/', 'styles']
train
https://github.com/sckott/habanero/blob/a17d87070378786bbb138e1c9712ecad9aacf38e/habanero/cn/styles.py#L7-L33
4,191
collectiveacuity/labPack
labpack/platforms/aws/ec2.py
ec2Client.delete_instance
def delete_instance(self, instance_id): ''' method for removing an instance from AWS EC2 :param instance_id: string of instance id on AWS :return: string reporting state of instance ''' title = '%s.delete_instance' % self.__class__.__name__ # validate inputs input_fields = { 'instance_id': instance_id } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # report query self.iam.printer('Removing instance %s from AWS region %s.' % (instance_id, self.iam.region_name)) # retrieve state old_state = self.check_instance_state(instance_id) # discover tags associated with instance id tag_list = [] try: response = self.connection.describe_tags( Filters=[ { 'Name': 'resource-id', 'Values': [ instance_id ] } ] ) import re aws_tag_pattern = re.compile('aws:') for i in range(0, len(response['Tags'])): if not aws_tag_pattern.findall(response['Tags'][i]['Key']): tag = {} tag['Key'] = response['Tags'][i]['Key'] tag['Value'] = response['Tags'][i]['Value'] tag_list.append(tag) except: raise AWSConnectionError(title) # remove tags from instance try: self.connection.delete_tags( Resources=[ instance_id ], Tags=tag_list ) self.iam.printer('Tags have been deleted from %s.' % instance_id) except: raise AWSConnectionError(title) # stop instance try: self.connection.stop_instances( InstanceIds=[ instance_id ] ) except: raise AWSConnectionError(title) # terminate instance try: response = self.connection.terminate_instances( InstanceIds=[ instance_id ] ) new_state = response['TerminatingInstances'][0]['CurrentState']['Name'] except: raise AWSConnectionError(title) # report outcome and return true self.iam.printer('Instance %s was %s.' % (instance_id, old_state)) self.iam.printer('Instance %s is %s.' % (instance_id, new_state)) return new_state
python
def delete_instance(self, instance_id): ''' method for removing an instance from AWS EC2 :param instance_id: string of instance id on AWS :return: string reporting state of instance ''' title = '%s.delete_instance' % self.__class__.__name__ # validate inputs input_fields = { 'instance_id': instance_id } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # report query self.iam.printer('Removing instance %s from AWS region %s.' % (instance_id, self.iam.region_name)) # retrieve state old_state = self.check_instance_state(instance_id) # discover tags associated with instance id tag_list = [] try: response = self.connection.describe_tags( Filters=[ { 'Name': 'resource-id', 'Values': [ instance_id ] } ] ) import re aws_tag_pattern = re.compile('aws:') for i in range(0, len(response['Tags'])): if not aws_tag_pattern.findall(response['Tags'][i]['Key']): tag = {} tag['Key'] = response['Tags'][i]['Key'] tag['Value'] = response['Tags'][i]['Value'] tag_list.append(tag) except: raise AWSConnectionError(title) # remove tags from instance try: self.connection.delete_tags( Resources=[ instance_id ], Tags=tag_list ) self.iam.printer('Tags have been deleted from %s.' % instance_id) except: raise AWSConnectionError(title) # stop instance try: self.connection.stop_instances( InstanceIds=[ instance_id ] ) except: raise AWSConnectionError(title) # terminate instance try: response = self.connection.terminate_instances( InstanceIds=[ instance_id ] ) new_state = response['TerminatingInstances'][0]['CurrentState']['Name'] except: raise AWSConnectionError(title) # report outcome and return true self.iam.printer('Instance %s was %s.' % (instance_id, old_state)) self.iam.printer('Instance %s is %s.' % (instance_id, new_state)) return new_state
['def', 'delete_instance', '(', 'self', ',', 'instance_id', ')', ':', 'title', '=', "'%s.delete_instance'", '%', 'self', '.', '__class__', '.', '__name__', '# validate inputs', 'input_fields', '=', '{', "'instance_id'", ':', 'instance_id', '}', 'for', 'key', ',', 'value', 'in', 'input_fields', '.', 'items', '(', ')', ':', 'object_title', '=', "'%s(%s=%s)'", '%', '(', 'title', ',', 'key', ',', 'str', '(', 'value', ')', ')', 'self', '.', 'fields', '.', 'validate', '(', 'value', ',', "'.%s'", '%', 'key', ',', 'object_title', ')', '# report query', 'self', '.', 'iam', '.', 'printer', '(', "'Removing instance %s from AWS region %s.'", '%', '(', 'instance_id', ',', 'self', '.', 'iam', '.', 'region_name', ')', ')', '# retrieve state', 'old_state', '=', 'self', '.', 'check_instance_state', '(', 'instance_id', ')', '# discover tags associated with instance id', 'tag_list', '=', '[', ']', 'try', ':', 'response', '=', 'self', '.', 'connection', '.', 'describe_tags', '(', 'Filters', '=', '[', '{', "'Name'", ':', "'resource-id'", ',', "'Values'", ':', '[', 'instance_id', ']', '}', ']', ')', 'import', 're', 'aws_tag_pattern', '=', 're', '.', 'compile', '(', "'aws:'", ')', 'for', 'i', 'in', 'range', '(', '0', ',', 'len', '(', 'response', '[', "'Tags'", ']', ')', ')', ':', 'if', 'not', 'aws_tag_pattern', '.', 'findall', '(', 'response', '[', "'Tags'", ']', '[', 'i', ']', '[', "'Key'", ']', ')', ':', 'tag', '=', '{', '}', 'tag', '[', "'Key'", ']', '=', 'response', '[', "'Tags'", ']', '[', 'i', ']', '[', "'Key'", ']', 'tag', '[', "'Value'", ']', '=', 'response', '[', "'Tags'", ']', '[', 'i', ']', '[', "'Value'", ']', 'tag_list', '.', 'append', '(', 'tag', ')', 'except', ':', 'raise', 'AWSConnectionError', '(', 'title', ')', '# remove tags from instance', 'try', ':', 'self', '.', 'connection', '.', 'delete_tags', '(', 'Resources', '=', '[', 'instance_id', ']', ',', 'Tags', '=', 'tag_list', ')', 'self', '.', 'iam', '.', 'printer', '(', "'Tags have been deleted from %s.'", '%', 'instance_id', ')', 'except', ':', 'raise', 'AWSConnectionError', '(', 'title', ')', '# stop instance', 'try', ':', 'self', '.', 'connection', '.', 'stop_instances', '(', 'InstanceIds', '=', '[', 'instance_id', ']', ')', 'except', ':', 'raise', 'AWSConnectionError', '(', 'title', ')', '# terminate instance', 'try', ':', 'response', '=', 'self', '.', 'connection', '.', 'terminate_instances', '(', 'InstanceIds', '=', '[', 'instance_id', ']', ')', 'new_state', '=', 'response', '[', "'TerminatingInstances'", ']', '[', '0', ']', '[', "'CurrentState'", ']', '[', "'Name'", ']', 'except', ':', 'raise', 'AWSConnectionError', '(', 'title', ')', '# report outcome and return true', 'self', '.', 'iam', '.', 'printer', '(', "'Instance %s was %s.'", '%', '(', 'instance_id', ',', 'old_state', ')', ')', 'self', '.', 'iam', '.', 'printer', '(', "'Instance %s is %s.'", '%', '(', 'instance_id', ',', 'new_state', ')', ')', 'return', 'new_state']
method for removing an instance from AWS EC2 :param instance_id: string of instance id on AWS :return: string reporting state of instance
['method', 'for', 'removing', 'an', 'instance', 'from', 'AWS', 'EC2']
train
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/platforms/aws/ec2.py#L613-L685
4,192
telminov/sw-django-utils
djutils/date_utils.py
date_to_timestamp
def date_to_timestamp(date): """ date to unix timestamp in milliseconds """ date_tuple = date.timetuple() timestamp = calendar.timegm(date_tuple) * 1000 return timestamp
python
def date_to_timestamp(date): """ date to unix timestamp in milliseconds """ date_tuple = date.timetuple() timestamp = calendar.timegm(date_tuple) * 1000 return timestamp
['def', 'date_to_timestamp', '(', 'date', ')', ':', 'date_tuple', '=', 'date', '.', 'timetuple', '(', ')', 'timestamp', '=', 'calendar', '.', 'timegm', '(', 'date_tuple', ')', '*', '1000', 'return', 'timestamp']
date to unix timestamp in milliseconds
['date', 'to', 'unix', 'timestamp', 'in', 'milliseconds']
train
https://github.com/telminov/sw-django-utils/blob/43b8491c87a5dd8fce145834c00198f4de14ceb9/djutils/date_utils.py#L39-L45
4,193
pandas-dev/pandas
pandas/io/pytables.py
_convert_string_array
def _convert_string_array(data, encoding, errors, itemsize=None): """ we take a string-like that is object dtype and coerce to a fixed size string type Parameters ---------- data : a numpy array of object dtype encoding : None or string-encoding errors : handler for encoding errors itemsize : integer, optional, defaults to the max length of the strings Returns ------- data in a fixed-length string dtype, encoded to bytes if needed """ # encode if needed if encoding is not None and len(data): data = Series(data.ravel()).str.encode( encoding, errors).values.reshape(data.shape) # create the sized dtype if itemsize is None: ensured = ensure_object(data.ravel()) itemsize = max(1, libwriters.max_len_string_array(ensured)) data = np.asarray(data, dtype="S{size}".format(size=itemsize)) return data
python
def _convert_string_array(data, encoding, errors, itemsize=None): """ we take a string-like that is object dtype and coerce to a fixed size string type Parameters ---------- data : a numpy array of object dtype encoding : None or string-encoding errors : handler for encoding errors itemsize : integer, optional, defaults to the max length of the strings Returns ------- data in a fixed-length string dtype, encoded to bytes if needed """ # encode if needed if encoding is not None and len(data): data = Series(data.ravel()).str.encode( encoding, errors).values.reshape(data.shape) # create the sized dtype if itemsize is None: ensured = ensure_object(data.ravel()) itemsize = max(1, libwriters.max_len_string_array(ensured)) data = np.asarray(data, dtype="S{size}".format(size=itemsize)) return data
['def', '_convert_string_array', '(', 'data', ',', 'encoding', ',', 'errors', ',', 'itemsize', '=', 'None', ')', ':', '# encode if needed', 'if', 'encoding', 'is', 'not', 'None', 'and', 'len', '(', 'data', ')', ':', 'data', '=', 'Series', '(', 'data', '.', 'ravel', '(', ')', ')', '.', 'str', '.', 'encode', '(', 'encoding', ',', 'errors', ')', '.', 'values', '.', 'reshape', '(', 'data', '.', 'shape', ')', '# create the sized dtype', 'if', 'itemsize', 'is', 'None', ':', 'ensured', '=', 'ensure_object', '(', 'data', '.', 'ravel', '(', ')', ')', 'itemsize', '=', 'max', '(', '1', ',', 'libwriters', '.', 'max_len_string_array', '(', 'ensured', ')', ')', 'data', '=', 'np', '.', 'asarray', '(', 'data', ',', 'dtype', '=', '"S{size}"', '.', 'format', '(', 'size', '=', 'itemsize', ')', ')', 'return', 'data']
we take a string-like that is object dtype and coerce to a fixed size string type Parameters ---------- data : a numpy array of object dtype encoding : None or string-encoding errors : handler for encoding errors itemsize : integer, optional, defaults to the max length of the strings Returns ------- data in a fixed-length string dtype, encoded to bytes if needed
['we', 'take', 'a', 'string', '-', 'like', 'that', 'is', 'object', 'dtype', 'and', 'coerce', 'to', 'a', 'fixed', 'size', 'string', 'type']
train
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L4521-L4549
4,194
bsolomon1124/pyfinance
pyfinance/returns.py
TSeries.batting_avg
def batting_avg(self, benchmark): """Percentage of periods when `self` outperformed `benchmark`. Parameters ---------- benchmark : {pd.Series, TSeries, 1d np.ndarray} The benchmark security to which `self` is compared. Returns ------- float """ diff = self.excess_ret(benchmark) return np.count_nonzero(diff > 0.0) / diff.count()
python
def batting_avg(self, benchmark): """Percentage of periods when `self` outperformed `benchmark`. Parameters ---------- benchmark : {pd.Series, TSeries, 1d np.ndarray} The benchmark security to which `self` is compared. Returns ------- float """ diff = self.excess_ret(benchmark) return np.count_nonzero(diff > 0.0) / diff.count()
['def', 'batting_avg', '(', 'self', ',', 'benchmark', ')', ':', 'diff', '=', 'self', '.', 'excess_ret', '(', 'benchmark', ')', 'return', 'np', '.', 'count_nonzero', '(', 'diff', '>', '0.0', ')', '/', 'diff', '.', 'count', '(', ')']
Percentage of periods when `self` outperformed `benchmark`. Parameters ---------- benchmark : {pd.Series, TSeries, 1d np.ndarray} The benchmark security to which `self` is compared. Returns ------- float
['Percentage', 'of', 'periods', 'when', 'self', 'outperformed', 'benchmark', '.']
train
https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L174-L188
4,195
grahame/sedge
sedge/engine.py
SedgeEngine.parse
def parse(self, fd): """very simple parser - but why would we want it to be complex?""" def resolve_args(args): # FIXME break this out, it's in common with the templating stuff elsewhere root = self.sections[0] val_dict = dict(('<' + t + '>', u) for (t, u) in root.get_variables().items()) resolved_args = [] for arg in args: for subst, value in val_dict.items(): arg = arg.replace(subst, value) resolved_args.append(arg) return resolved_args def handle_section_defn(keyword, parts): if keyword == '@HostAttrs': if len(parts) != 1: raise ParserException('usage: @HostAttrs <hostname>') if self.sections[0].has_pending_with(): raise ParserException('@with not supported with @HostAttrs') self.sections.append(HostAttrs(parts[0])) return True if keyword == 'Host': if len(parts) != 1: raise ParserException('usage: Host <hostname>') self.sections.append(Host(parts[0], self.sections[0].pop_pending_with())) return True def handle_vardef(root, keyword, parts): if keyword == '@with': root.add_pending_with(parts) return True def handle_set_args(_, parts): if len(parts) == 0: raise ParserException('usage: @args arg-name ...') if not self.is_include(): return if self._args is None or len(self._args) != len(parts): raise ParserException('required arguments not passed to include {url} ({parts})'.format( url=self._url, parts=', '.join(parts)) ) root = self.sections[0] for key, value in zip(parts, self._args): root.set_value(key, value) def handle_set_value(_, parts): if len(parts) != 2: raise ParserException('usage: @set <key> <value>') root = self.sections[0] root.set_value(*resolve_args(parts)) def handle_add_type(section, parts): if len(parts) != 1: raise ParserException('usage: @is <HostAttrName>') section.add_type(parts[0]) def handle_via(section, parts): if len(parts) != 1: raise ParserException('usage: @via <Hostname>') section.add_line( 'ProxyCommand', ('ssh {args} nc %h %p 2> /dev/null'.format(args=pipes.quote(resolve_args(parts)[0])), ) ) def handle_identity(section, parts): if len(parts) != 1: raise ParserException('usage: @identity <name>') section.add_identity(resolve_args(parts)[0]) def handle_include(_, parts): if len(parts) == 0: raise ParserException('usage: @include <https://...|/path/to/file.sedge> [arg ...]') url = parts[0] parsed_url = urllib.parse.urlparse(url) if parsed_url.scheme == 'https': req = requests.get(url, verify=self._verify_ssl) text = req.text elif parsed_url.scheme == 'file': with open(parsed_url.path) as fd: text = fd.read() elif parsed_url.scheme == '': path = os.path.expanduser(url) with open(path) as fd: text = fd.read() else: raise SecurityException('error: @includes may only use paths or https:// or file:// URLs') subconfig = SedgeEngine( self._key_library, StringIO(text), self._verify_ssl, url=url, args=resolve_args(parts[1:]), parent_keydefs=self.keydefs, via_include=True) self.includes.append((url, subconfig)) def handle_keydef(_, parts): if len(parts) < 2: raise ParserException('usage: @key <name> [fingerprint]...') name = parts[0] fingerprints = parts[1:] self.keydefs[name] = fingerprints def handle_keyword(section, keyword, parts): handlers = { '@set': handle_set_value, '@args': handle_set_args, '@is': handle_add_type, '@via': handle_via, '@include': handle_include, '@key': handle_keydef, '@identity': handle_identity } if keyword in handlers: handlers[keyword](section, parts) return True for line in (t.strip() for t in fd): if line.startswith('#') or line == '': continue keyword, parts = SedgeEngine.parse_config_line(line) if handle_section_defn(keyword, parts): continue if handle_vardef(self.sections[0], keyword, parts): continue current_section = self.sections[-1] if handle_keyword(current_section, keyword, parts): continue if keyword.startswith('@'): raise ParserException("unknown expansion keyword {}".format(keyword)) # use other rather than parts to avoid messing up user # whitespace; we don't handle quotes in here as we don't # need to current_section.add_line(keyword, parts)
python
def parse(self, fd): """very simple parser - but why would we want it to be complex?""" def resolve_args(args): # FIXME break this out, it's in common with the templating stuff elsewhere root = self.sections[0] val_dict = dict(('<' + t + '>', u) for (t, u) in root.get_variables().items()) resolved_args = [] for arg in args: for subst, value in val_dict.items(): arg = arg.replace(subst, value) resolved_args.append(arg) return resolved_args def handle_section_defn(keyword, parts): if keyword == '@HostAttrs': if len(parts) != 1: raise ParserException('usage: @HostAttrs <hostname>') if self.sections[0].has_pending_with(): raise ParserException('@with not supported with @HostAttrs') self.sections.append(HostAttrs(parts[0])) return True if keyword == 'Host': if len(parts) != 1: raise ParserException('usage: Host <hostname>') self.sections.append(Host(parts[0], self.sections[0].pop_pending_with())) return True def handle_vardef(root, keyword, parts): if keyword == '@with': root.add_pending_with(parts) return True def handle_set_args(_, parts): if len(parts) == 0: raise ParserException('usage: @args arg-name ...') if not self.is_include(): return if self._args is None or len(self._args) != len(parts): raise ParserException('required arguments not passed to include {url} ({parts})'.format( url=self._url, parts=', '.join(parts)) ) root = self.sections[0] for key, value in zip(parts, self._args): root.set_value(key, value) def handle_set_value(_, parts): if len(parts) != 2: raise ParserException('usage: @set <key> <value>') root = self.sections[0] root.set_value(*resolve_args(parts)) def handle_add_type(section, parts): if len(parts) != 1: raise ParserException('usage: @is <HostAttrName>') section.add_type(parts[0]) def handle_via(section, parts): if len(parts) != 1: raise ParserException('usage: @via <Hostname>') section.add_line( 'ProxyCommand', ('ssh {args} nc %h %p 2> /dev/null'.format(args=pipes.quote(resolve_args(parts)[0])), ) ) def handle_identity(section, parts): if len(parts) != 1: raise ParserException('usage: @identity <name>') section.add_identity(resolve_args(parts)[0]) def handle_include(_, parts): if len(parts) == 0: raise ParserException('usage: @include <https://...|/path/to/file.sedge> [arg ...]') url = parts[0] parsed_url = urllib.parse.urlparse(url) if parsed_url.scheme == 'https': req = requests.get(url, verify=self._verify_ssl) text = req.text elif parsed_url.scheme == 'file': with open(parsed_url.path) as fd: text = fd.read() elif parsed_url.scheme == '': path = os.path.expanduser(url) with open(path) as fd: text = fd.read() else: raise SecurityException('error: @includes may only use paths or https:// or file:// URLs') subconfig = SedgeEngine( self._key_library, StringIO(text), self._verify_ssl, url=url, args=resolve_args(parts[1:]), parent_keydefs=self.keydefs, via_include=True) self.includes.append((url, subconfig)) def handle_keydef(_, parts): if len(parts) < 2: raise ParserException('usage: @key <name> [fingerprint]...') name = parts[0] fingerprints = parts[1:] self.keydefs[name] = fingerprints def handle_keyword(section, keyword, parts): handlers = { '@set': handle_set_value, '@args': handle_set_args, '@is': handle_add_type, '@via': handle_via, '@include': handle_include, '@key': handle_keydef, '@identity': handle_identity } if keyword in handlers: handlers[keyword](section, parts) return True for line in (t.strip() for t in fd): if line.startswith('#') or line == '': continue keyword, parts = SedgeEngine.parse_config_line(line) if handle_section_defn(keyword, parts): continue if handle_vardef(self.sections[0], keyword, parts): continue current_section = self.sections[-1] if handle_keyword(current_section, keyword, parts): continue if keyword.startswith('@'): raise ParserException("unknown expansion keyword {}".format(keyword)) # use other rather than parts to avoid messing up user # whitespace; we don't handle quotes in here as we don't # need to current_section.add_line(keyword, parts)
['def', 'parse', '(', 'self', ',', 'fd', ')', ':', 'def', 'resolve_args', '(', 'args', ')', ':', "# FIXME break this out, it's in common with the templating stuff elsewhere", 'root', '=', 'self', '.', 'sections', '[', '0', ']', 'val_dict', '=', 'dict', '(', '(', "'<'", '+', 't', '+', "'>'", ',', 'u', ')', 'for', '(', 't', ',', 'u', ')', 'in', 'root', '.', 'get_variables', '(', ')', '.', 'items', '(', ')', ')', 'resolved_args', '=', '[', ']', 'for', 'arg', 'in', 'args', ':', 'for', 'subst', ',', 'value', 'in', 'val_dict', '.', 'items', '(', ')', ':', 'arg', '=', 'arg', '.', 'replace', '(', 'subst', ',', 'value', ')', 'resolved_args', '.', 'append', '(', 'arg', ')', 'return', 'resolved_args', 'def', 'handle_section_defn', '(', 'keyword', ',', 'parts', ')', ':', 'if', 'keyword', '==', "'@HostAttrs'", ':', 'if', 'len', '(', 'parts', ')', '!=', '1', ':', 'raise', 'ParserException', '(', "'usage: @HostAttrs <hostname>'", ')', 'if', 'self', '.', 'sections', '[', '0', ']', '.', 'has_pending_with', '(', ')', ':', 'raise', 'ParserException', '(', "'@with not supported with @HostAttrs'", ')', 'self', '.', 'sections', '.', 'append', '(', 'HostAttrs', '(', 'parts', '[', '0', ']', ')', ')', 'return', 'True', 'if', 'keyword', '==', "'Host'", ':', 'if', 'len', '(', 'parts', ')', '!=', '1', ':', 'raise', 'ParserException', '(', "'usage: Host <hostname>'", ')', 'self', '.', 'sections', '.', 'append', '(', 'Host', '(', 'parts', '[', '0', ']', ',', 'self', '.', 'sections', '[', '0', ']', '.', 'pop_pending_with', '(', ')', ')', ')', 'return', 'True', 'def', 'handle_vardef', '(', 'root', ',', 'keyword', ',', 'parts', ')', ':', 'if', 'keyword', '==', "'@with'", ':', 'root', '.', 'add_pending_with', '(', 'parts', ')', 'return', 'True', 'def', 'handle_set_args', '(', '_', ',', 'parts', ')', ':', 'if', 'len', '(', 'parts', ')', '==', '0', ':', 'raise', 'ParserException', '(', "'usage: @args arg-name ...'", ')', 'if', 'not', 'self', '.', 'is_include', '(', ')', ':', 'return', 'if', 'self', '.', '_args', 'is', 'None', 'or', 'len', '(', 'self', '.', '_args', ')', '!=', 'len', '(', 'parts', ')', ':', 'raise', 'ParserException', '(', "'required arguments not passed to include {url} ({parts})'", '.', 'format', '(', 'url', '=', 'self', '.', '_url', ',', 'parts', '=', "', '", '.', 'join', '(', 'parts', ')', ')', ')', 'root', '=', 'self', '.', 'sections', '[', '0', ']', 'for', 'key', ',', 'value', 'in', 'zip', '(', 'parts', ',', 'self', '.', '_args', ')', ':', 'root', '.', 'set_value', '(', 'key', ',', 'value', ')', 'def', 'handle_set_value', '(', '_', ',', 'parts', ')', ':', 'if', 'len', '(', 'parts', ')', '!=', '2', ':', 'raise', 'ParserException', '(', "'usage: @set <key> <value>'", ')', 'root', '=', 'self', '.', 'sections', '[', '0', ']', 'root', '.', 'set_value', '(', '*', 'resolve_args', '(', 'parts', ')', ')', 'def', 'handle_add_type', '(', 'section', ',', 'parts', ')', ':', 'if', 'len', '(', 'parts', ')', '!=', '1', ':', 'raise', 'ParserException', '(', "'usage: @is <HostAttrName>'", ')', 'section', '.', 'add_type', '(', 'parts', '[', '0', ']', ')', 'def', 'handle_via', '(', 'section', ',', 'parts', ')', ':', 'if', 'len', '(', 'parts', ')', '!=', '1', ':', 'raise', 'ParserException', '(', "'usage: @via <Hostname>'", ')', 'section', '.', 'add_line', '(', "'ProxyCommand'", ',', '(', "'ssh {args} nc %h %p 2> /dev/null'", '.', 'format', '(', 'args', '=', 'pipes', '.', 'quote', '(', 'resolve_args', '(', 'parts', ')', '[', '0', ']', ')', ')', ',', ')', ')', 'def', 'handle_identity', '(', 'section', ',', 'parts', ')', ':', 'if', 'len', '(', 'parts', ')', '!=', '1', ':', 'raise', 'ParserException', '(', "'usage: @identity <name>'", ')', 'section', '.', 'add_identity', '(', 'resolve_args', '(', 'parts', ')', '[', '0', ']', ')', 'def', 'handle_include', '(', '_', ',', 'parts', ')', ':', 'if', 'len', '(', 'parts', ')', '==', '0', ':', 'raise', 'ParserException', '(', "'usage: @include <https://...|/path/to/file.sedge> [arg ...]'", ')', 'url', '=', 'parts', '[', '0', ']', 'parsed_url', '=', 'urllib', '.', 'parse', '.', 'urlparse', '(', 'url', ')', 'if', 'parsed_url', '.', 'scheme', '==', "'https'", ':', 'req', '=', 'requests', '.', 'get', '(', 'url', ',', 'verify', '=', 'self', '.', '_verify_ssl', ')', 'text', '=', 'req', '.', 'text', 'elif', 'parsed_url', '.', 'scheme', '==', "'file'", ':', 'with', 'open', '(', 'parsed_url', '.', 'path', ')', 'as', 'fd', ':', 'text', '=', 'fd', '.', 'read', '(', ')', 'elif', 'parsed_url', '.', 'scheme', '==', "''", ':', 'path', '=', 'os', '.', 'path', '.', 'expanduser', '(', 'url', ')', 'with', 'open', '(', 'path', ')', 'as', 'fd', ':', 'text', '=', 'fd', '.', 'read', '(', ')', 'else', ':', 'raise', 'SecurityException', '(', "'error: @includes may only use paths or https:// or file:// URLs'", ')', 'subconfig', '=', 'SedgeEngine', '(', 'self', '.', '_key_library', ',', 'StringIO', '(', 'text', ')', ',', 'self', '.', '_verify_ssl', ',', 'url', '=', 'url', ',', 'args', '=', 'resolve_args', '(', 'parts', '[', '1', ':', ']', ')', ',', 'parent_keydefs', '=', 'self', '.', 'keydefs', ',', 'via_include', '=', 'True', ')', 'self', '.', 'includes', '.', 'append', '(', '(', 'url', ',', 'subconfig', ')', ')', 'def', 'handle_keydef', '(', '_', ',', 'parts', ')', ':', 'if', 'len', '(', 'parts', ')', '<', '2', ':', 'raise', 'ParserException', '(', "'usage: @key <name> [fingerprint]...'", ')', 'name', '=', 'parts', '[', '0', ']', 'fingerprints', '=', 'parts', '[', '1', ':', ']', 'self', '.', 'keydefs', '[', 'name', ']', '=', 'fingerprints', 'def', 'handle_keyword', '(', 'section', ',', 'keyword', ',', 'parts', ')', ':', 'handlers', '=', '{', "'@set'", ':', 'handle_set_value', ',', "'@args'", ':', 'handle_set_args', ',', "'@is'", ':', 'handle_add_type', ',', "'@via'", ':', 'handle_via', ',', "'@include'", ':', 'handle_include', ',', "'@key'", ':', 'handle_keydef', ',', "'@identity'", ':', 'handle_identity', '}', 'if', 'keyword', 'in', 'handlers', ':', 'handlers', '[', 'keyword', ']', '(', 'section', ',', 'parts', ')', 'return', 'True', 'for', 'line', 'in', '(', 't', '.', 'strip', '(', ')', 'for', 't', 'in', 'fd', ')', ':', 'if', 'line', '.', 'startswith', '(', "'#'", ')', 'or', 'line', '==', "''", ':', 'continue', 'keyword', ',', 'parts', '=', 'SedgeEngine', '.', 'parse_config_line', '(', 'line', ')', 'if', 'handle_section_defn', '(', 'keyword', ',', 'parts', ')', ':', 'continue', 'if', 'handle_vardef', '(', 'self', '.', 'sections', '[', '0', ']', ',', 'keyword', ',', 'parts', ')', ':', 'continue', 'current_section', '=', 'self', '.', 'sections', '[', '-', '1', ']', 'if', 'handle_keyword', '(', 'current_section', ',', 'keyword', ',', 'parts', ')', ':', 'continue', 'if', 'keyword', '.', 'startswith', '(', "'@'", ')', ':', 'raise', 'ParserException', '(', '"unknown expansion keyword {}"', '.', 'format', '(', 'keyword', ')', ')', '# use other rather than parts to avoid messing up user', "# whitespace; we don't handle quotes in here as we don't", '# need to', 'current_section', '.', 'add_line', '(', 'keyword', ',', 'parts', ')']
very simple parser - but why would we want it to be complex?
['very', 'simple', 'parser', '-', 'but', 'why', 'would', 'we', 'want', 'it', 'to', 'be', 'complex?']
train
https://github.com/grahame/sedge/blob/60dc6a0c5ef3bf802fe48a2571a8524a6ea33878/sedge/engine.py#L328-L464
4,196
openstack/networking-cisco
networking_cisco/plugins/cisco/cfg_agent/cfg_agent.py
CiscoCfgAgent.process_services
def process_services(self, device_ids=None, removed_devices_info=None): """Process services managed by this config agent. This method is invoked by any of three scenarios. 1. Invoked by a periodic task running every `RPC_LOOP_INTERVAL` seconds. This is the most common scenario. In this mode, the method is called without any arguments. 2. Called by the `_process_backlogged_hosting_devices()` as part of the backlog processing task. In this mode, a list of device_ids are passed as arguments. These are the list of backlogged hosting devices that are now reachable and we want to sync services on them. 3. Called by the `hosting_devices_removed()` method. This is when the config agent has received a notification from the plugin that some hosting devices are going to be removed. The payload contains the details of the hosting devices and the associated neutron resources on them which should be processed and removed. To avoid race conditions with these scenarios, this function is protected by a lock. This method goes on to invoke `process_service()` on the different service helpers. :param device_ids: List of devices that are now available and needs to be processed :param removed_devices_info: Info about the hosting devices which are going to be removed and details of the resources hosted on them. Expected Format:: { 'hosting_data': {'hd_id1': {'routers': [id1, id2, ...]}, 'hd_id2': {'routers': [id3, id4, ...]}, ...}, 'deconfigure': True/False } :returns: None """ LOG.debug("Processing services started") # Now we process only routing service, additional services will be # added in future if self.routing_service_helper: self.routing_service_helper.process_service(device_ids, removed_devices_info) else: LOG.warning("No routing service helper loaded") LOG.debug("Processing services completed")
python
def process_services(self, device_ids=None, removed_devices_info=None): """Process services managed by this config agent. This method is invoked by any of three scenarios. 1. Invoked by a periodic task running every `RPC_LOOP_INTERVAL` seconds. This is the most common scenario. In this mode, the method is called without any arguments. 2. Called by the `_process_backlogged_hosting_devices()` as part of the backlog processing task. In this mode, a list of device_ids are passed as arguments. These are the list of backlogged hosting devices that are now reachable and we want to sync services on them. 3. Called by the `hosting_devices_removed()` method. This is when the config agent has received a notification from the plugin that some hosting devices are going to be removed. The payload contains the details of the hosting devices and the associated neutron resources on them which should be processed and removed. To avoid race conditions with these scenarios, this function is protected by a lock. This method goes on to invoke `process_service()` on the different service helpers. :param device_ids: List of devices that are now available and needs to be processed :param removed_devices_info: Info about the hosting devices which are going to be removed and details of the resources hosted on them. Expected Format:: { 'hosting_data': {'hd_id1': {'routers': [id1, id2, ...]}, 'hd_id2': {'routers': [id3, id4, ...]}, ...}, 'deconfigure': True/False } :returns: None """ LOG.debug("Processing services started") # Now we process only routing service, additional services will be # added in future if self.routing_service_helper: self.routing_service_helper.process_service(device_ids, removed_devices_info) else: LOG.warning("No routing service helper loaded") LOG.debug("Processing services completed")
['def', 'process_services', '(', 'self', ',', 'device_ids', '=', 'None', ',', 'removed_devices_info', '=', 'None', ')', ':', 'LOG', '.', 'debug', '(', '"Processing services started"', ')', '# Now we process only routing service, additional services will be', '# added in future', 'if', 'self', '.', 'routing_service_helper', ':', 'self', '.', 'routing_service_helper', '.', 'process_service', '(', 'device_ids', ',', 'removed_devices_info', ')', 'else', ':', 'LOG', '.', 'warning', '(', '"No routing service helper loaded"', ')', 'LOG', '.', 'debug', '(', '"Processing services completed"', ')']
Process services managed by this config agent. This method is invoked by any of three scenarios. 1. Invoked by a periodic task running every `RPC_LOOP_INTERVAL` seconds. This is the most common scenario. In this mode, the method is called without any arguments. 2. Called by the `_process_backlogged_hosting_devices()` as part of the backlog processing task. In this mode, a list of device_ids are passed as arguments. These are the list of backlogged hosting devices that are now reachable and we want to sync services on them. 3. Called by the `hosting_devices_removed()` method. This is when the config agent has received a notification from the plugin that some hosting devices are going to be removed. The payload contains the details of the hosting devices and the associated neutron resources on them which should be processed and removed. To avoid race conditions with these scenarios, this function is protected by a lock. This method goes on to invoke `process_service()` on the different service helpers. :param device_ids: List of devices that are now available and needs to be processed :param removed_devices_info: Info about the hosting devices which are going to be removed and details of the resources hosted on them. Expected Format:: { 'hosting_data': {'hd_id1': {'routers': [id1, id2, ...]}, 'hd_id2': {'routers': [id3, id4, ...]}, ...}, 'deconfigure': True/False } :returns: None
['Process', 'services', 'managed', 'by', 'this', 'config', 'agent', '.']
train
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/cfg_agent/cfg_agent.py#L217-L267
4,197
brantai/python-rightscale
rightscale/httpclient.py
HTTPClient.request
def request(self, method, path='/', url=None, ignore_codes=[], **kwargs): """ Wrapper for the ._request method that verifies if we're logged into RightScale before making a call, and sanity checks the oauth expiration time. :param str method: An HTTP method (e.g. 'get', 'post', 'PUT', etc...) :param str path: A path component of the target URL. This will be appended to the value of ``self.endpoint``. If both :attr:`path` and :attr:`url` are specified, the value in :attr:`url` is used and the :attr:`path` is ignored. :param str url: The target URL (e.g. ``http://server.tld/somepath/``). If both :attr:`path` and :attr:`url` are specified, the value in :attr:`url` is used and the :attr:`path` is ignored. :param ignore_codes: List of HTTP error codes (e.g. 404, 500) that should be ignored. If an HTTP error occurs and it is *not* in :attr:`ignore_codes`, then an exception is raised. :type ignore_codes: list of int :param kwargs: Any other kwargs to pass to :meth:`requests.request()`. Returns a :class:`requests.Response` object. """ # On every call, check if we're both logged in, and if the token is # expiring. If it is, we'll re-login with the information passed into # us at instantiation. if time.time() > self.auth_expires_at: self.login() # Now make the actual API call return self._request(method, path, url, ignore_codes, **kwargs)
python
def request(self, method, path='/', url=None, ignore_codes=[], **kwargs): """ Wrapper for the ._request method that verifies if we're logged into RightScale before making a call, and sanity checks the oauth expiration time. :param str method: An HTTP method (e.g. 'get', 'post', 'PUT', etc...) :param str path: A path component of the target URL. This will be appended to the value of ``self.endpoint``. If both :attr:`path` and :attr:`url` are specified, the value in :attr:`url` is used and the :attr:`path` is ignored. :param str url: The target URL (e.g. ``http://server.tld/somepath/``). If both :attr:`path` and :attr:`url` are specified, the value in :attr:`url` is used and the :attr:`path` is ignored. :param ignore_codes: List of HTTP error codes (e.g. 404, 500) that should be ignored. If an HTTP error occurs and it is *not* in :attr:`ignore_codes`, then an exception is raised. :type ignore_codes: list of int :param kwargs: Any other kwargs to pass to :meth:`requests.request()`. Returns a :class:`requests.Response` object. """ # On every call, check if we're both logged in, and if the token is # expiring. If it is, we'll re-login with the information passed into # us at instantiation. if time.time() > self.auth_expires_at: self.login() # Now make the actual API call return self._request(method, path, url, ignore_codes, **kwargs)
['def', 'request', '(', 'self', ',', 'method', ',', 'path', '=', "'/'", ',', 'url', '=', 'None', ',', 'ignore_codes', '=', '[', ']', ',', '*', '*', 'kwargs', ')', ':', "# On every call, check if we're both logged in, and if the token is", "# expiring. If it is, we'll re-login with the information passed into", '# us at instantiation.', 'if', 'time', '.', 'time', '(', ')', '>', 'self', '.', 'auth_expires_at', ':', 'self', '.', 'login', '(', ')', '# Now make the actual API call', 'return', 'self', '.', '_request', '(', 'method', ',', 'path', ',', 'url', ',', 'ignore_codes', ',', '*', '*', 'kwargs', ')']
Wrapper for the ._request method that verifies if we're logged into RightScale before making a call, and sanity checks the oauth expiration time. :param str method: An HTTP method (e.g. 'get', 'post', 'PUT', etc...) :param str path: A path component of the target URL. This will be appended to the value of ``self.endpoint``. If both :attr:`path` and :attr:`url` are specified, the value in :attr:`url` is used and the :attr:`path` is ignored. :param str url: The target URL (e.g. ``http://server.tld/somepath/``). If both :attr:`path` and :attr:`url` are specified, the value in :attr:`url` is used and the :attr:`path` is ignored. :param ignore_codes: List of HTTP error codes (e.g. 404, 500) that should be ignored. If an HTTP error occurs and it is *not* in :attr:`ignore_codes`, then an exception is raised. :type ignore_codes: list of int :param kwargs: Any other kwargs to pass to :meth:`requests.request()`. Returns a :class:`requests.Response` object.
['Wrapper', 'for', 'the', '.', '_request', 'method', 'that', 'verifies', 'if', 'we', 're', 'logged', 'into', 'RightScale', 'before', 'making', 'a', 'call', 'and', 'sanity', 'checks', 'the', 'oauth', 'expiration', 'time', '.']
train
https://github.com/brantai/python-rightscale/blob/5fbf4089922917247be712d58645a7b1504f0944/rightscale/httpclient.py#L94-L127
4,198
cameronbwhite/Flask-CAS
flask_cas/routing.py
login
def login(): """ This route has two purposes. First, it is used by the user to login. Second, it is used by the CAS to respond with the `ticket` after the user logs in successfully. When the user accesses this url, they are redirected to the CAS to login. If the login was successful, the CAS will respond to this route with the ticket in the url. The ticket is then validated. If validation was successful the logged in username is saved in the user's session under the key `CAS_USERNAME_SESSION_KEY` and the user's attributes are saved under the key 'CAS_USERNAME_ATTRIBUTE_KEY' """ cas_token_session_key = current_app.config['CAS_TOKEN_SESSION_KEY'] redirect_url = create_cas_login_url( current_app.config['CAS_SERVER'], current_app.config['CAS_LOGIN_ROUTE'], flask.url_for('.login', origin=flask.session.get('CAS_AFTER_LOGIN_SESSION_URL'), _external=True)) if 'ticket' in flask.request.args: flask.session[cas_token_session_key] = flask.request.args['ticket'] if cas_token_session_key in flask.session: if validate(flask.session[cas_token_session_key]): if 'CAS_AFTER_LOGIN_SESSION_URL' in flask.session: redirect_url = flask.session.pop('CAS_AFTER_LOGIN_SESSION_URL') elif flask.request.args.get('origin'): redirect_url = flask.request.args['origin'] else: redirect_url = flask.url_for( current_app.config['CAS_AFTER_LOGIN']) else: del flask.session[cas_token_session_key] current_app.logger.debug('Redirecting to: {0}'.format(redirect_url)) return flask.redirect(redirect_url)
python
def login(): """ This route has two purposes. First, it is used by the user to login. Second, it is used by the CAS to respond with the `ticket` after the user logs in successfully. When the user accesses this url, they are redirected to the CAS to login. If the login was successful, the CAS will respond to this route with the ticket in the url. The ticket is then validated. If validation was successful the logged in username is saved in the user's session under the key `CAS_USERNAME_SESSION_KEY` and the user's attributes are saved under the key 'CAS_USERNAME_ATTRIBUTE_KEY' """ cas_token_session_key = current_app.config['CAS_TOKEN_SESSION_KEY'] redirect_url = create_cas_login_url( current_app.config['CAS_SERVER'], current_app.config['CAS_LOGIN_ROUTE'], flask.url_for('.login', origin=flask.session.get('CAS_AFTER_LOGIN_SESSION_URL'), _external=True)) if 'ticket' in flask.request.args: flask.session[cas_token_session_key] = flask.request.args['ticket'] if cas_token_session_key in flask.session: if validate(flask.session[cas_token_session_key]): if 'CAS_AFTER_LOGIN_SESSION_URL' in flask.session: redirect_url = flask.session.pop('CAS_AFTER_LOGIN_SESSION_URL') elif flask.request.args.get('origin'): redirect_url = flask.request.args['origin'] else: redirect_url = flask.url_for( current_app.config['CAS_AFTER_LOGIN']) else: del flask.session[cas_token_session_key] current_app.logger.debug('Redirecting to: {0}'.format(redirect_url)) return flask.redirect(redirect_url)
['def', 'login', '(', ')', ':', 'cas_token_session_key', '=', 'current_app', '.', 'config', '[', "'CAS_TOKEN_SESSION_KEY'", ']', 'redirect_url', '=', 'create_cas_login_url', '(', 'current_app', '.', 'config', '[', "'CAS_SERVER'", ']', ',', 'current_app', '.', 'config', '[', "'CAS_LOGIN_ROUTE'", ']', ',', 'flask', '.', 'url_for', '(', "'.login'", ',', 'origin', '=', 'flask', '.', 'session', '.', 'get', '(', "'CAS_AFTER_LOGIN_SESSION_URL'", ')', ',', '_external', '=', 'True', ')', ')', 'if', "'ticket'", 'in', 'flask', '.', 'request', '.', 'args', ':', 'flask', '.', 'session', '[', 'cas_token_session_key', ']', '=', 'flask', '.', 'request', '.', 'args', '[', "'ticket'", ']', 'if', 'cas_token_session_key', 'in', 'flask', '.', 'session', ':', 'if', 'validate', '(', 'flask', '.', 'session', '[', 'cas_token_session_key', ']', ')', ':', 'if', "'CAS_AFTER_LOGIN_SESSION_URL'", 'in', 'flask', '.', 'session', ':', 'redirect_url', '=', 'flask', '.', 'session', '.', 'pop', '(', "'CAS_AFTER_LOGIN_SESSION_URL'", ')', 'elif', 'flask', '.', 'request', '.', 'args', '.', 'get', '(', "'origin'", ')', ':', 'redirect_url', '=', 'flask', '.', 'request', '.', 'args', '[', "'origin'", ']', 'else', ':', 'redirect_url', '=', 'flask', '.', 'url_for', '(', 'current_app', '.', 'config', '[', "'CAS_AFTER_LOGIN'", ']', ')', 'else', ':', 'del', 'flask', '.', 'session', '[', 'cas_token_session_key', ']', 'current_app', '.', 'logger', '.', 'debug', '(', "'Redirecting to: {0}'", '.', 'format', '(', 'redirect_url', ')', ')', 'return', 'flask', '.', 'redirect', '(', 'redirect_url', ')']
This route has two purposes. First, it is used by the user to login. Second, it is used by the CAS to respond with the `ticket` after the user logs in successfully. When the user accesses this url, they are redirected to the CAS to login. If the login was successful, the CAS will respond to this route with the ticket in the url. The ticket is then validated. If validation was successful the logged in username is saved in the user's session under the key `CAS_USERNAME_SESSION_KEY` and the user's attributes are saved under the key 'CAS_USERNAME_ATTRIBUTE_KEY'
['This', 'route', 'has', 'two', 'purposes', '.', 'First', 'it', 'is', 'used', 'by', 'the', 'user', 'to', 'login', '.', 'Second', 'it', 'is', 'used', 'by', 'the', 'CAS', 'to', 'respond', 'with', 'the', 'ticket', 'after', 'the', 'user', 'logs', 'in', 'successfully', '.']
train
https://github.com/cameronbwhite/Flask-CAS/blob/f85173938654cb9b9316a5c869000b74b008422e/flask_cas/routing.py#L18-L58
4,199
markovmodel/PyEMMA
pyemma/datasets/api.py
get_multi_temperature_data
def get_multi_temperature_data(kt0=1.0, kt1=5.0, length0=10000, length1=10000, n0=10, n1=10): """ Continuous MCMC process in an asymmetric double well potential at multiple temperatures. Parameters ---------- kt0: double, optional, default=1.0 Temperature in kT for the first thermodynamic state. kt1: double, optional, default=5.0 Temperature in kT for the second thermodynamic state. length0: int, optional, default=10000 Trajectory length in steps for the first thermodynamic state. length1: int, optional, default=10000 Trajectory length in steps for the second thermodynamic state. n0: int, optional, default=10 Number of trajectories in the first thermodynamic state. n1: int, optional, default=10 Number of trajectories in the second thermodynamic state. Returns ------- dict - keys shown below in brackets Trajectory (trajs), energy (energy_trajs), and temperature (temp_trajs) data from the MCMC runs as well as the discretised version (dtrajs + centers). Energies and temperatures are given in kT, lengths in arbitrary units. """ dws = _DWS() mt_data = dws.mt_sample( kt0=kt0, kt1=kt1, length0=length0, length1=length1, n0=n0, n1=n1) mt_data.update(centers=dws.centers) return mt_data
python
def get_multi_temperature_data(kt0=1.0, kt1=5.0, length0=10000, length1=10000, n0=10, n1=10): """ Continuous MCMC process in an asymmetric double well potential at multiple temperatures. Parameters ---------- kt0: double, optional, default=1.0 Temperature in kT for the first thermodynamic state. kt1: double, optional, default=5.0 Temperature in kT for the second thermodynamic state. length0: int, optional, default=10000 Trajectory length in steps for the first thermodynamic state. length1: int, optional, default=10000 Trajectory length in steps for the second thermodynamic state. n0: int, optional, default=10 Number of trajectories in the first thermodynamic state. n1: int, optional, default=10 Number of trajectories in the second thermodynamic state. Returns ------- dict - keys shown below in brackets Trajectory (trajs), energy (energy_trajs), and temperature (temp_trajs) data from the MCMC runs as well as the discretised version (dtrajs + centers). Energies and temperatures are given in kT, lengths in arbitrary units. """ dws = _DWS() mt_data = dws.mt_sample( kt0=kt0, kt1=kt1, length0=length0, length1=length1, n0=n0, n1=n1) mt_data.update(centers=dws.centers) return mt_data
['def', 'get_multi_temperature_data', '(', 'kt0', '=', '1.0', ',', 'kt1', '=', '5.0', ',', 'length0', '=', '10000', ',', 'length1', '=', '10000', ',', 'n0', '=', '10', ',', 'n1', '=', '10', ')', ':', 'dws', '=', '_DWS', '(', ')', 'mt_data', '=', 'dws', '.', 'mt_sample', '(', 'kt0', '=', 'kt0', ',', 'kt1', '=', 'kt1', ',', 'length0', '=', 'length0', ',', 'length1', '=', 'length1', ',', 'n0', '=', 'n0', ',', 'n1', '=', 'n1', ')', 'mt_data', '.', 'update', '(', 'centers', '=', 'dws', '.', 'centers', ')', 'return', 'mt_data']
Continuous MCMC process in an asymmetric double well potential at multiple temperatures. Parameters ---------- kt0: double, optional, default=1.0 Temperature in kT for the first thermodynamic state. kt1: double, optional, default=5.0 Temperature in kT for the second thermodynamic state. length0: int, optional, default=10000 Trajectory length in steps for the first thermodynamic state. length1: int, optional, default=10000 Trajectory length in steps for the second thermodynamic state. n0: int, optional, default=10 Number of trajectories in the first thermodynamic state. n1: int, optional, default=10 Number of trajectories in the second thermodynamic state. Returns ------- dict - keys shown below in brackets Trajectory (trajs), energy (energy_trajs), and temperature (temp_trajs) data from the MCMC runs as well as the discretised version (dtrajs + centers). Energies and temperatures are given in kT, lengths in arbitrary units.
['Continuous', 'MCMC', 'process', 'in', 'an', 'asymmetric', 'double', 'well', 'potential', 'at', 'multiple', 'temperatures', '.']
train
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/datasets/api.py#L89-L119