Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
3,500
tensorflow/tensorboard
tensorboard/backend/event_processing/event_accumulator.py
_GetPurgeMessage
def _GetPurgeMessage(most_recent_step, most_recent_wall_time, event_step, event_wall_time, num_expired_scalars, num_expired_histos, num_expired_comp_histos, num_expired_images, num_expired_audio): """Return the string message associated with TensorBoard purges.""" return ('Detected out of order event.step likely caused by ' 'a TensorFlow restart. Purging expired events from Tensorboard' ' display between the previous step: {} (timestamp: {}) and ' 'current step: {} (timestamp: {}). Removing {} scalars, {} ' 'histograms, {} compressed histograms, {} images, ' 'and {} audio.').format(most_recent_step, most_recent_wall_time, event_step, event_wall_time, num_expired_scalars, num_expired_histos, num_expired_comp_histos, num_expired_images, num_expired_audio)
python
def _GetPurgeMessage(most_recent_step, most_recent_wall_time, event_step, event_wall_time, num_expired_scalars, num_expired_histos, num_expired_comp_histos, num_expired_images, num_expired_audio): """Return the string message associated with TensorBoard purges.""" return ('Detected out of order event.step likely caused by ' 'a TensorFlow restart. Purging expired events from Tensorboard' ' display between the previous step: {} (timestamp: {}) and ' 'current step: {} (timestamp: {}). Removing {} scalars, {} ' 'histograms, {} compressed histograms, {} images, ' 'and {} audio.').format(most_recent_step, most_recent_wall_time, event_step, event_wall_time, num_expired_scalars, num_expired_histos, num_expired_comp_histos, num_expired_images, num_expired_audio)
['def', '_GetPurgeMessage', '(', 'most_recent_step', ',', 'most_recent_wall_time', ',', 'event_step', ',', 'event_wall_time', ',', 'num_expired_scalars', ',', 'num_expired_histos', ',', 'num_expired_comp_histos', ',', 'num_expired_images', ',', 'num_expired_audio', ')', ':', 'return', '(', "'Detected out of order event.step likely caused by '", "'a TensorFlow restart. Purging expired events from Tensorboard'", "' display between the previous step: {} (timestamp: {}) and '", "'current step: {} (timestamp: {}). Removing {} scalars, {} '", "'histograms, {} compressed histograms, {} images, '", "'and {} audio.'", ')', '.', 'format', '(', 'most_recent_step', ',', 'most_recent_wall_time', ',', 'event_step', ',', 'event_wall_time', ',', 'num_expired_scalars', ',', 'num_expired_histos', ',', 'num_expired_comp_histos', ',', 'num_expired_images', ',', 'num_expired_audio', ')']
Return the string message associated with TensorBoard purges.
['Return', 'the', 'string', 'message', 'associated', 'with', 'TensorBoard', 'purges', '.']
train
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/event_accumulator.py#L719-L733
3,501
wiheto/teneto
teneto/classes/bids.py
TenetoBIDS.set_exclusion_file
def set_exclusion_file(self, confound, exclusion_criteria, confound_stat='mean'): """ Excludes subjects given a certain exclusion criteria. Parameters ---------- confound : str or list string or list of confound name(s) from confound files exclusion_criteria : str or list for each confound, an exclusion_criteria should be expressed as a string. It starts with >,<,>= or <= then the numerical threshold. Ex. '>0.2' will entail every subject with the avg greater than 0.2 of confound will be rejected. confound_stat : str or list Can be median, mean, std. How the confound data is aggregated (so if there is a meaasure per time-point, this is averaged over all time points. If multiple confounds specified, this has to be a list.). Returns -------- calls TenetoBIDS.set_bad_files with the files meeting the exclusion criteria. """ self.add_history(inspect.stack()[0][3], locals(), 1) if isinstance(confound, str): confound = [confound] if isinstance(exclusion_criteria, str): exclusion_criteria = [exclusion_criteria] if isinstance(confound_stat, str): confound_stat = [confound_stat] if len(exclusion_criteria) != len(confound): raise ValueError( 'Same number of confound names and exclusion criteria must be given') if len(confound_stat) != len(confound): raise ValueError( 'Same number of confound names and confound stats must be given') relex, crit = process_exclusion_criteria(exclusion_criteria) files = sorted(self.get_selected_files(quiet=1)) confound_files = sorted( self.get_selected_files(quiet=1, pipeline='confound')) files, confound_files = confound_matching(files, confound_files) bad_files = [] bs = 0 foundconfound = [] foundreason = [] for s, cfile in enumerate(confound_files): df = load_tabular_file(cfile, index_col=None) found_bad_subject = False for i, _ in enumerate(confound): if confound_stat[i] == 'median': if relex[i](df[confound[i]].median(), crit[i]): found_bad_subject = True elif confound_stat[i] == 'mean': if relex[i](df[confound[i]].mean(), crit[i]): found_bad_subject = True elif confound_stat[i] == 'std': if relex[i](df[i][confound[i]].std(), crit[i]): found_bad_subject = True if found_bad_subject: foundconfound.append(confound[i]) foundreason.append(exclusion_criteria[i]) if found_bad_subject: bad_files.append(files[s]) bs += 1 self.set_bad_files( bad_files, reason='excluded file (confound over specfied stat threshold)') for i, f in enumerate(bad_files): sidecar = get_sidecar(f) sidecar['file_exclusion'] = {} sidecar['confound'] = foundconfound[i] sidecar['threshold'] = foundreason[i] for af in ['.tsv', '.nii.gz']: f = f.split(af)[0] f += '.json' with open(f, 'w') as fs: json.dump(sidecar, fs) print('Removed ' + str(bs) + ' files from inclusion.')
python
def set_exclusion_file(self, confound, exclusion_criteria, confound_stat='mean'): """ Excludes subjects given a certain exclusion criteria. Parameters ---------- confound : str or list string or list of confound name(s) from confound files exclusion_criteria : str or list for each confound, an exclusion_criteria should be expressed as a string. It starts with >,<,>= or <= then the numerical threshold. Ex. '>0.2' will entail every subject with the avg greater than 0.2 of confound will be rejected. confound_stat : str or list Can be median, mean, std. How the confound data is aggregated (so if there is a meaasure per time-point, this is averaged over all time points. If multiple confounds specified, this has to be a list.). Returns -------- calls TenetoBIDS.set_bad_files with the files meeting the exclusion criteria. """ self.add_history(inspect.stack()[0][3], locals(), 1) if isinstance(confound, str): confound = [confound] if isinstance(exclusion_criteria, str): exclusion_criteria = [exclusion_criteria] if isinstance(confound_stat, str): confound_stat = [confound_stat] if len(exclusion_criteria) != len(confound): raise ValueError( 'Same number of confound names and exclusion criteria must be given') if len(confound_stat) != len(confound): raise ValueError( 'Same number of confound names and confound stats must be given') relex, crit = process_exclusion_criteria(exclusion_criteria) files = sorted(self.get_selected_files(quiet=1)) confound_files = sorted( self.get_selected_files(quiet=1, pipeline='confound')) files, confound_files = confound_matching(files, confound_files) bad_files = [] bs = 0 foundconfound = [] foundreason = [] for s, cfile in enumerate(confound_files): df = load_tabular_file(cfile, index_col=None) found_bad_subject = False for i, _ in enumerate(confound): if confound_stat[i] == 'median': if relex[i](df[confound[i]].median(), crit[i]): found_bad_subject = True elif confound_stat[i] == 'mean': if relex[i](df[confound[i]].mean(), crit[i]): found_bad_subject = True elif confound_stat[i] == 'std': if relex[i](df[i][confound[i]].std(), crit[i]): found_bad_subject = True if found_bad_subject: foundconfound.append(confound[i]) foundreason.append(exclusion_criteria[i]) if found_bad_subject: bad_files.append(files[s]) bs += 1 self.set_bad_files( bad_files, reason='excluded file (confound over specfied stat threshold)') for i, f in enumerate(bad_files): sidecar = get_sidecar(f) sidecar['file_exclusion'] = {} sidecar['confound'] = foundconfound[i] sidecar['threshold'] = foundreason[i] for af in ['.tsv', '.nii.gz']: f = f.split(af)[0] f += '.json' with open(f, 'w') as fs: json.dump(sidecar, fs) print('Removed ' + str(bs) + ' files from inclusion.')
['def', 'set_exclusion_file', '(', 'self', ',', 'confound', ',', 'exclusion_criteria', ',', 'confound_stat', '=', "'mean'", ')', ':', 'self', '.', 'add_history', '(', 'inspect', '.', 'stack', '(', ')', '[', '0', ']', '[', '3', ']', ',', 'locals', '(', ')', ',', '1', ')', 'if', 'isinstance', '(', 'confound', ',', 'str', ')', ':', 'confound', '=', '[', 'confound', ']', 'if', 'isinstance', '(', 'exclusion_criteria', ',', 'str', ')', ':', 'exclusion_criteria', '=', '[', 'exclusion_criteria', ']', 'if', 'isinstance', '(', 'confound_stat', ',', 'str', ')', ':', 'confound_stat', '=', '[', 'confound_stat', ']', 'if', 'len', '(', 'exclusion_criteria', ')', '!=', 'len', '(', 'confound', ')', ':', 'raise', 'ValueError', '(', "'Same number of confound names and exclusion criteria must be given'", ')', 'if', 'len', '(', 'confound_stat', ')', '!=', 'len', '(', 'confound', ')', ':', 'raise', 'ValueError', '(', "'Same number of confound names and confound stats must be given'", ')', 'relex', ',', 'crit', '=', 'process_exclusion_criteria', '(', 'exclusion_criteria', ')', 'files', '=', 'sorted', '(', 'self', '.', 'get_selected_files', '(', 'quiet', '=', '1', ')', ')', 'confound_files', '=', 'sorted', '(', 'self', '.', 'get_selected_files', '(', 'quiet', '=', '1', ',', 'pipeline', '=', "'confound'", ')', ')', 'files', ',', 'confound_files', '=', 'confound_matching', '(', 'files', ',', 'confound_files', ')', 'bad_files', '=', '[', ']', 'bs', '=', '0', 'foundconfound', '=', '[', ']', 'foundreason', '=', '[', ']', 'for', 's', ',', 'cfile', 'in', 'enumerate', '(', 'confound_files', ')', ':', 'df', '=', 'load_tabular_file', '(', 'cfile', ',', 'index_col', '=', 'None', ')', 'found_bad_subject', '=', 'False', 'for', 'i', ',', '_', 'in', 'enumerate', '(', 'confound', ')', ':', 'if', 'confound_stat', '[', 'i', ']', '==', "'median'", ':', 'if', 'relex', '[', 'i', ']', '(', 'df', '[', 'confound', '[', 'i', ']', ']', '.', 'median', '(', ')', ',', 'crit', '[', 'i', ']', ')', ':', 'found_bad_subject', '=', 'True', 'elif', 'confound_stat', '[', 'i', ']', '==', "'mean'", ':', 'if', 'relex', '[', 'i', ']', '(', 'df', '[', 'confound', '[', 'i', ']', ']', '.', 'mean', '(', ')', ',', 'crit', '[', 'i', ']', ')', ':', 'found_bad_subject', '=', 'True', 'elif', 'confound_stat', '[', 'i', ']', '==', "'std'", ':', 'if', 'relex', '[', 'i', ']', '(', 'df', '[', 'i', ']', '[', 'confound', '[', 'i', ']', ']', '.', 'std', '(', ')', ',', 'crit', '[', 'i', ']', ')', ':', 'found_bad_subject', '=', 'True', 'if', 'found_bad_subject', ':', 'foundconfound', '.', 'append', '(', 'confound', '[', 'i', ']', ')', 'foundreason', '.', 'append', '(', 'exclusion_criteria', '[', 'i', ']', ')', 'if', 'found_bad_subject', ':', 'bad_files', '.', 'append', '(', 'files', '[', 's', ']', ')', 'bs', '+=', '1', 'self', '.', 'set_bad_files', '(', 'bad_files', ',', 'reason', '=', "'excluded file (confound over specfied stat threshold)'", ')', 'for', 'i', ',', 'f', 'in', 'enumerate', '(', 'bad_files', ')', ':', 'sidecar', '=', 'get_sidecar', '(', 'f', ')', 'sidecar', '[', "'file_exclusion'", ']', '=', '{', '}', 'sidecar', '[', "'confound'", ']', '=', 'foundconfound', '[', 'i', ']', 'sidecar', '[', "'threshold'", ']', '=', 'foundreason', '[', 'i', ']', 'for', 'af', 'in', '[', "'.tsv'", ',', "'.nii.gz'", ']', ':', 'f', '=', 'f', '.', 'split', '(', 'af', ')', '[', '0', ']', 'f', '+=', "'.json'", 'with', 'open', '(', 'f', ',', "'w'", ')', 'as', 'fs', ':', 'json', '.', 'dump', '(', 'sidecar', ',', 'fs', ')', 'print', '(', "'Removed '", '+', 'str', '(', 'bs', ')', '+', "' files from inclusion.'", ')']
Excludes subjects given a certain exclusion criteria. Parameters ---------- confound : str or list string or list of confound name(s) from confound files exclusion_criteria : str or list for each confound, an exclusion_criteria should be expressed as a string. It starts with >,<,>= or <= then the numerical threshold. Ex. '>0.2' will entail every subject with the avg greater than 0.2 of confound will be rejected. confound_stat : str or list Can be median, mean, std. How the confound data is aggregated (so if there is a meaasure per time-point, this is averaged over all time points. If multiple confounds specified, this has to be a list.). Returns -------- calls TenetoBIDS.set_bad_files with the files meeting the exclusion criteria.
['Excludes', 'subjects', 'given', 'a', 'certain', 'exclusion', 'criteria', '.']
train
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L650-L719
3,502
lago-project/lago
lago/utils.py
deepcopy
def deepcopy(original_obj): """ Creates a deep copy of an object with no crossed referenced lists or dicts, useful when loading from yaml as anchors generate those cross-referenced dicts and lists Args: original_obj(object): Object to deep copy Return: object: deep copy of the object """ if isinstance(original_obj, list): return list(deepcopy(item) for item in original_obj) elif isinstance(original_obj, dict): return dict((key, deepcopy(val)) for key, val in original_obj.items()) else: return original_obj
python
def deepcopy(original_obj): """ Creates a deep copy of an object with no crossed referenced lists or dicts, useful when loading from yaml as anchors generate those cross-referenced dicts and lists Args: original_obj(object): Object to deep copy Return: object: deep copy of the object """ if isinstance(original_obj, list): return list(deepcopy(item) for item in original_obj) elif isinstance(original_obj, dict): return dict((key, deepcopy(val)) for key, val in original_obj.items()) else: return original_obj
['def', 'deepcopy', '(', 'original_obj', ')', ':', 'if', 'isinstance', '(', 'original_obj', ',', 'list', ')', ':', 'return', 'list', '(', 'deepcopy', '(', 'item', ')', 'for', 'item', 'in', 'original_obj', ')', 'elif', 'isinstance', '(', 'original_obj', ',', 'dict', ')', ':', 'return', 'dict', '(', '(', 'key', ',', 'deepcopy', '(', 'val', ')', ')', 'for', 'key', ',', 'val', 'in', 'original_obj', '.', 'items', '(', ')', ')', 'else', ':', 'return', 'original_obj']
Creates a deep copy of an object with no crossed referenced lists or dicts, useful when loading from yaml as anchors generate those cross-referenced dicts and lists Args: original_obj(object): Object to deep copy Return: object: deep copy of the object
['Creates', 'a', 'deep', 'copy', 'of', 'an', 'object', 'with', 'no', 'crossed', 'referenced', 'lists', 'or', 'dicts', 'useful', 'when', 'loading', 'from', 'yaml', 'as', 'anchors', 'generate', 'those', 'cross', '-', 'referenced', 'dicts', 'and', 'lists']
train
https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/utils.py#L493-L510
3,503
havardgulldahl/jottalib
src/jottalib/jottafuse.py
JottaFuse.truncate
def truncate(self, path, length, fh=None): "Download existing path, truncate and reupload" try: f = self._getpath(path) except JFS.JFSError: raise OSError(errno.ENOENT, '') if isinstance(f, (JFS.JFSFile, JFS.JFSFolder)) and f.is_deleted(): raise OSError(errno.ENOENT) data = StringIO(f.read()) data.truncate(length) try: self.client.up(path, data) # replace file contents self._dirty(path) return ESUCCESS except: raise OSError(errno.ENOENT, '')
python
def truncate(self, path, length, fh=None): "Download existing path, truncate and reupload" try: f = self._getpath(path) except JFS.JFSError: raise OSError(errno.ENOENT, '') if isinstance(f, (JFS.JFSFile, JFS.JFSFolder)) and f.is_deleted(): raise OSError(errno.ENOENT) data = StringIO(f.read()) data.truncate(length) try: self.client.up(path, data) # replace file contents self._dirty(path) return ESUCCESS except: raise OSError(errno.ENOENT, '')
['def', 'truncate', '(', 'self', ',', 'path', ',', 'length', ',', 'fh', '=', 'None', ')', ':', 'try', ':', 'f', '=', 'self', '.', '_getpath', '(', 'path', ')', 'except', 'JFS', '.', 'JFSError', ':', 'raise', 'OSError', '(', 'errno', '.', 'ENOENT', ',', "''", ')', 'if', 'isinstance', '(', 'f', ',', '(', 'JFS', '.', 'JFSFile', ',', 'JFS', '.', 'JFSFolder', ')', ')', 'and', 'f', '.', 'is_deleted', '(', ')', ':', 'raise', 'OSError', '(', 'errno', '.', 'ENOENT', ')', 'data', '=', 'StringIO', '(', 'f', '.', 'read', '(', ')', ')', 'data', '.', 'truncate', '(', 'length', ')', 'try', ':', 'self', '.', 'client', '.', 'up', '(', 'path', ',', 'data', ')', '# replace file contents', 'self', '.', '_dirty', '(', 'path', ')', 'return', 'ESUCCESS', 'except', ':', 'raise', 'OSError', '(', 'errno', '.', 'ENOENT', ',', "''", ')']
Download existing path, truncate and reupload
['Download', 'existing', 'path', 'truncate', 'and', 'reupload']
train
https://github.com/havardgulldahl/jottalib/blob/4d015e4309b1d9055e561ec757363fb2632b4eb7/src/jottalib/jottafuse.py#L330-L345
3,504
bitesofcode/projexui
projexui/widgets/xwalkthroughwidget/xwalkthroughwidget.py
XWalkthroughWidget.autoLayout
def autoLayout(self): """ Automatically lays out the contents for this widget. """ try: direction = self.currentSlide().scene().direction() except AttributeError: direction = QtGui.QBoxLayout.TopToBottom size = self.size() self._slideshow.resize(size) prev = self._previousButton next = self._nextButton if direction == QtGui.QBoxLayout.BottomToTop: y = 9 else: y = size.height() - prev.height() - 9 prev.move(9, y) next.move(size.width() - next.width() - 9, y) # update the layout for the slides for i in range(self._slideshow.count()): widget = self._slideshow.widget(i) widget.scene().autoLayout(size)
python
def autoLayout(self): """ Automatically lays out the contents for this widget. """ try: direction = self.currentSlide().scene().direction() except AttributeError: direction = QtGui.QBoxLayout.TopToBottom size = self.size() self._slideshow.resize(size) prev = self._previousButton next = self._nextButton if direction == QtGui.QBoxLayout.BottomToTop: y = 9 else: y = size.height() - prev.height() - 9 prev.move(9, y) next.move(size.width() - next.width() - 9, y) # update the layout for the slides for i in range(self._slideshow.count()): widget = self._slideshow.widget(i) widget.scene().autoLayout(size)
['def', 'autoLayout', '(', 'self', ')', ':', 'try', ':', 'direction', '=', 'self', '.', 'currentSlide', '(', ')', '.', 'scene', '(', ')', '.', 'direction', '(', ')', 'except', 'AttributeError', ':', 'direction', '=', 'QtGui', '.', 'QBoxLayout', '.', 'TopToBottom', 'size', '=', 'self', '.', 'size', '(', ')', 'self', '.', '_slideshow', '.', 'resize', '(', 'size', ')', 'prev', '=', 'self', '.', '_previousButton', 'next', '=', 'self', '.', '_nextButton', 'if', 'direction', '==', 'QtGui', '.', 'QBoxLayout', '.', 'BottomToTop', ':', 'y', '=', '9', 'else', ':', 'y', '=', 'size', '.', 'height', '(', ')', '-', 'prev', '.', 'height', '(', ')', '-', '9', 'prev', '.', 'move', '(', '9', ',', 'y', ')', 'next', '.', 'move', '(', 'size', '.', 'width', '(', ')', '-', 'next', '.', 'width', '(', ')', '-', '9', ',', 'y', ')', '# update the layout for the slides\r', 'for', 'i', 'in', 'range', '(', 'self', '.', '_slideshow', '.', 'count', '(', ')', ')', ':', 'widget', '=', 'self', '.', '_slideshow', '.', 'widget', '(', 'i', ')', 'widget', '.', 'scene', '(', ')', '.', 'autoLayout', '(', 'size', ')']
Automatically lays out the contents for this widget.
['Automatically', 'lays', 'out', 'the', 'contents', 'for', 'this', 'widget', '.']
train
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xwalkthroughwidget/xwalkthroughwidget.py#L113-L139
3,505
pyviz/holoviews
holoviews/plotting/bokeh/util.py
silence_warnings
def silence_warnings(*warnings): """ Context manager for silencing bokeh validation warnings. """ for warning in warnings: silence(warning) try: yield finally: for warning in warnings: silence(warning, False)
python
def silence_warnings(*warnings): """ Context manager for silencing bokeh validation warnings. """ for warning in warnings: silence(warning) try: yield finally: for warning in warnings: silence(warning, False)
['def', 'silence_warnings', '(', '*', 'warnings', ')', ':', 'for', 'warning', 'in', 'warnings', ':', 'silence', '(', 'warning', ')', 'try', ':', 'yield', 'finally', ':', 'for', 'warning', 'in', 'warnings', ':', 'silence', '(', 'warning', ',', 'False', ')']
Context manager for silencing bokeh validation warnings.
['Context', 'manager', 'for', 'silencing', 'bokeh', 'validation', 'warnings', '.']
train
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/bokeh/util.py#L338-L348
3,506
OnroerendErfgoed/crabpy
crabpy/gateway/crab.py
CrabGateway.list_deelgemeenten_by_gemeente
def list_deelgemeenten_by_gemeente(self, gemeente): ''' List all `deelgemeenten` in a `gemeente`. :param gemeente: The :class:`Gemeente` for which the \ `deelgemeenten` are wanted. Currently only Flanders is supported. :rtype: A :class:`list` of :class:`Deelgemeente`. ''' try: niscode = gemeente.niscode except AttributeError: niscode = gemeente def creator(): return [ Deelgemeente(dg['id'], dg['naam'], dg['gemeente_niscode']) for dg in self.deelgemeenten.values() if dg['gemeente_niscode'] == niscode ] if self.caches['permanent'].is_configured: key = 'ListDeelgemeentenByGemeenteId#%s' % niscode deelgemeenten = self.caches['permanent'].get_or_create(key, creator) else: deelgemeenten = creator() for dg in deelgemeenten: dg.set_gateway(self) return deelgemeenten
python
def list_deelgemeenten_by_gemeente(self, gemeente): ''' List all `deelgemeenten` in a `gemeente`. :param gemeente: The :class:`Gemeente` for which the \ `deelgemeenten` are wanted. Currently only Flanders is supported. :rtype: A :class:`list` of :class:`Deelgemeente`. ''' try: niscode = gemeente.niscode except AttributeError: niscode = gemeente def creator(): return [ Deelgemeente(dg['id'], dg['naam'], dg['gemeente_niscode']) for dg in self.deelgemeenten.values() if dg['gemeente_niscode'] == niscode ] if self.caches['permanent'].is_configured: key = 'ListDeelgemeentenByGemeenteId#%s' % niscode deelgemeenten = self.caches['permanent'].get_or_create(key, creator) else: deelgemeenten = creator() for dg in deelgemeenten: dg.set_gateway(self) return deelgemeenten
['def', 'list_deelgemeenten_by_gemeente', '(', 'self', ',', 'gemeente', ')', ':', 'try', ':', 'niscode', '=', 'gemeente', '.', 'niscode', 'except', 'AttributeError', ':', 'niscode', '=', 'gemeente', 'def', 'creator', '(', ')', ':', 'return', '[', 'Deelgemeente', '(', 'dg', '[', "'id'", ']', ',', 'dg', '[', "'naam'", ']', ',', 'dg', '[', "'gemeente_niscode'", ']', ')', 'for', 'dg', 'in', 'self', '.', 'deelgemeenten', '.', 'values', '(', ')', 'if', 'dg', '[', "'gemeente_niscode'", ']', '==', 'niscode', ']', 'if', 'self', '.', 'caches', '[', "'permanent'", ']', '.', 'is_configured', ':', 'key', '=', "'ListDeelgemeentenByGemeenteId#%s'", '%', 'niscode', 'deelgemeenten', '=', 'self', '.', 'caches', '[', "'permanent'", ']', '.', 'get_or_create', '(', 'key', ',', 'creator', ')', 'else', ':', 'deelgemeenten', '=', 'creator', '(', ')', 'for', 'dg', 'in', 'deelgemeenten', ':', 'dg', '.', 'set_gateway', '(', 'self', ')', 'return', 'deelgemeenten']
List all `deelgemeenten` in a `gemeente`. :param gemeente: The :class:`Gemeente` for which the \ `deelgemeenten` are wanted. Currently only Flanders is supported. :rtype: A :class:`list` of :class:`Deelgemeente`.
['List', 'all', 'deelgemeenten', 'in', 'a', 'gemeente', '.']
train
https://github.com/OnroerendErfgoed/crabpy/blob/3a6fd8bc5aca37c2a173e3ea94e4e468b8aa79c1/crabpy/gateway/crab.py#L387-L413
3,507
programa-stic/barf-project
barf/analysis/graphs/basicblock.py
bb_get_instr_max_width
def bb_get_instr_max_width(basic_block): """Get maximum instruction mnemonic width """ asm_mnemonic_max_width = 0 for instr in basic_block: if len(instr.mnemonic) > asm_mnemonic_max_width: asm_mnemonic_max_width = len(instr.mnemonic) return asm_mnemonic_max_width
python
def bb_get_instr_max_width(basic_block): """Get maximum instruction mnemonic width """ asm_mnemonic_max_width = 0 for instr in basic_block: if len(instr.mnemonic) > asm_mnemonic_max_width: asm_mnemonic_max_width = len(instr.mnemonic) return asm_mnemonic_max_width
['def', 'bb_get_instr_max_width', '(', 'basic_block', ')', ':', 'asm_mnemonic_max_width', '=', '0', 'for', 'instr', 'in', 'basic_block', ':', 'if', 'len', '(', 'instr', '.', 'mnemonic', ')', '>', 'asm_mnemonic_max_width', ':', 'asm_mnemonic_max_width', '=', 'len', '(', 'instr', '.', 'mnemonic', ')', 'return', 'asm_mnemonic_max_width']
Get maximum instruction mnemonic width
['Get', 'maximum', 'instruction', 'mnemonic', 'width']
train
https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/analysis/graphs/basicblock.py#L30-L39
3,508
junzis/pyModeS
pyModeS/decoder/common.py
wrongstatus
def wrongstatus(data, sb, msb, lsb): """Check if the status bit and field bits are consistency. This Function is used for checking BDS code versions. """ # status bit, most significant bit, least significant bit status = int(data[sb-1]) value = bin2int(data[msb-1:lsb]) if not status: if value != 0: return True return False
python
def wrongstatus(data, sb, msb, lsb): """Check if the status bit and field bits are consistency. This Function is used for checking BDS code versions. """ # status bit, most significant bit, least significant bit status = int(data[sb-1]) value = bin2int(data[msb-1:lsb]) if not status: if value != 0: return True return False
['def', 'wrongstatus', '(', 'data', ',', 'sb', ',', 'msb', ',', 'lsb', ')', ':', '# status bit, most significant bit, least significant bit', 'status', '=', 'int', '(', 'data', '[', 'sb', '-', '1', ']', ')', 'value', '=', 'bin2int', '(', 'data', '[', 'msb', '-', '1', ':', 'lsb', ']', ')', 'if', 'not', 'status', ':', 'if', 'value', '!=', '0', ':', 'return', 'True', 'return', 'False']
Check if the status bit and field bits are consistency. This Function is used for checking BDS code versions.
['Check', 'if', 'the', 'status', 'bit', 'and', 'field', 'bits', 'are', 'consistency', '.', 'This', 'Function', 'is', 'used', 'for', 'checking', 'BDS', 'code', 'versions', '.']
train
https://github.com/junzis/pyModeS/blob/8cd5655a04b08171a9ad5f1ffd232b7e0178ea53/pyModeS/decoder/common.py#L300-L313
3,509
brbsix/pip-utils
pip_utils/outdated.py
ListCommand.can_be_updated
def can_be_updated(cls, dist, latest_version): """Determine whether package can be updated or not.""" scheme = get_scheme('default') name = dist.project_name dependants = cls.get_dependants(name) for dependant in dependants: requires = dependant.requires() for requirement in cls.get_requirement(name, requires): req = parse_requirement(requirement) # Ignore error if version in requirement spec can't be parsed try: matcher = scheme.matcher(req.requirement) except UnsupportedVersionError: continue if not matcher.match(str(latest_version)): return False return True
python
def can_be_updated(cls, dist, latest_version): """Determine whether package can be updated or not.""" scheme = get_scheme('default') name = dist.project_name dependants = cls.get_dependants(name) for dependant in dependants: requires = dependant.requires() for requirement in cls.get_requirement(name, requires): req = parse_requirement(requirement) # Ignore error if version in requirement spec can't be parsed try: matcher = scheme.matcher(req.requirement) except UnsupportedVersionError: continue if not matcher.match(str(latest_version)): return False return True
['def', 'can_be_updated', '(', 'cls', ',', 'dist', ',', 'latest_version', ')', ':', 'scheme', '=', 'get_scheme', '(', "'default'", ')', 'name', '=', 'dist', '.', 'project_name', 'dependants', '=', 'cls', '.', 'get_dependants', '(', 'name', ')', 'for', 'dependant', 'in', 'dependants', ':', 'requires', '=', 'dependant', '.', 'requires', '(', ')', 'for', 'requirement', 'in', 'cls', '.', 'get_requirement', '(', 'name', ',', 'requires', ')', ':', 'req', '=', 'parse_requirement', '(', 'requirement', ')', "# Ignore error if version in requirement spec can't be parsed", 'try', ':', 'matcher', '=', 'scheme', '.', 'matcher', '(', 'req', '.', 'requirement', ')', 'except', 'UnsupportedVersionError', ':', 'continue', 'if', 'not', 'matcher', '.', 'match', '(', 'str', '(', 'latest_version', ')', ')', ':', 'return', 'False', 'return', 'True']
Determine whether package can be updated or not.
['Determine', 'whether', 'package', 'can', 'be', 'updated', 'or', 'not', '.']
train
https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/outdated.py#L126-L143
3,510
nok/sklearn-porter
sklearn_porter/estimator/classifier/SVC/__init__.py
SVC.create_method
def create_method(self): """ Build the estimator method or function. Returns ------- :return : string The built method as string. """ n_indents = 1 if self.target_language in ['java', 'js', 'php', 'ruby'] else 0 return self.temp('separated.method', n_indents=n_indents, skipping=True).format(**self.__dict__)
python
def create_method(self): """ Build the estimator method or function. Returns ------- :return : string The built method as string. """ n_indents = 1 if self.target_language in ['java', 'js', 'php', 'ruby'] else 0 return self.temp('separated.method', n_indents=n_indents, skipping=True).format(**self.__dict__)
['def', 'create_method', '(', 'self', ')', ':', 'n_indents', '=', '1', 'if', 'self', '.', 'target_language', 'in', '[', "'java'", ',', "'js'", ',', "'php'", ',', "'ruby'", ']', 'else', '0', 'return', 'self', '.', 'temp', '(', "'separated.method'", ',', 'n_indents', '=', 'n_indents', ',', 'skipping', '=', 'True', ')', '.', 'format', '(', '*', '*', 'self', '.', '__dict__', ')']
Build the estimator method or function. Returns ------- :return : string The built method as string.
['Build', 'the', 'estimator', 'method', 'or', 'function', '.']
train
https://github.com/nok/sklearn-porter/blob/04673f768310bde31f9747a68a5e070592441ef2/sklearn_porter/estimator/classifier/SVC/__init__.py#L258-L270
3,511
JukeboxPipeline/jukebox-core
src/jukeboxcore/gui/widgets/filebrowser.py
FileBrowser.get_current_selection
def get_current_selection(self, i=None): """Get the :class:`TaskFileInfo` for the file selected in the active tab :param i: If None, returns selection of active tab. If 0, assetselection. If 1, shotselection :type i: :returns: The taskfile info in the currently active tab :rtype: :class:`TaskFileInfo` | None :raises: None """ taskfile = None if (i is None and self.selection_tabw.currentIndex() == 0) or (i is not None and i == 0): indexes = self.assetverbrws.selected_indexes(0) if indexes and indexes[0].isValid(): item = indexes[0].internalPointer() taskfile = item.internal_data() elif (i is None and self.selection_tabw.currentIndex() == 1) or (i is not None and i == 1): indexes = self.shotverbrws.selected_indexes(0) if indexes and indexes[0].isValid(): item = indexes[0].internalPointer() taskfile = item.internal_data() return taskfile
python
def get_current_selection(self, i=None): """Get the :class:`TaskFileInfo` for the file selected in the active tab :param i: If None, returns selection of active tab. If 0, assetselection. If 1, shotselection :type i: :returns: The taskfile info in the currently active tab :rtype: :class:`TaskFileInfo` | None :raises: None """ taskfile = None if (i is None and self.selection_tabw.currentIndex() == 0) or (i is not None and i == 0): indexes = self.assetverbrws.selected_indexes(0) if indexes and indexes[0].isValid(): item = indexes[0].internalPointer() taskfile = item.internal_data() elif (i is None and self.selection_tabw.currentIndex() == 1) or (i is not None and i == 1): indexes = self.shotverbrws.selected_indexes(0) if indexes and indexes[0].isValid(): item = indexes[0].internalPointer() taskfile = item.internal_data() return taskfile
['def', 'get_current_selection', '(', 'self', ',', 'i', '=', 'None', ')', ':', 'taskfile', '=', 'None', 'if', '(', 'i', 'is', 'None', 'and', 'self', '.', 'selection_tabw', '.', 'currentIndex', '(', ')', '==', '0', ')', 'or', '(', 'i', 'is', 'not', 'None', 'and', 'i', '==', '0', ')', ':', 'indexes', '=', 'self', '.', 'assetverbrws', '.', 'selected_indexes', '(', '0', ')', 'if', 'indexes', 'and', 'indexes', '[', '0', ']', '.', 'isValid', '(', ')', ':', 'item', '=', 'indexes', '[', '0', ']', '.', 'internalPointer', '(', ')', 'taskfile', '=', 'item', '.', 'internal_data', '(', ')', 'elif', '(', 'i', 'is', 'None', 'and', 'self', '.', 'selection_tabw', '.', 'currentIndex', '(', ')', '==', '1', ')', 'or', '(', 'i', 'is', 'not', 'None', 'and', 'i', '==', '1', ')', ':', 'indexes', '=', 'self', '.', 'shotverbrws', '.', 'selected_indexes', '(', '0', ')', 'if', 'indexes', 'and', 'indexes', '[', '0', ']', '.', 'isValid', '(', ')', ':', 'item', '=', 'indexes', '[', '0', ']', '.', 'internalPointer', '(', ')', 'taskfile', '=', 'item', '.', 'internal_data', '(', ')', 'return', 'taskfile']
Get the :class:`TaskFileInfo` for the file selected in the active tab :param i: If None, returns selection of active tab. If 0, assetselection. If 1, shotselection :type i: :returns: The taskfile info in the currently active tab :rtype: :class:`TaskFileInfo` | None :raises: None
['Get', 'the', ':', 'class', ':', 'TaskFileInfo', 'for', 'the', 'file', 'selected', 'in', 'the', 'active', 'tab']
train
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/filebrowser.py#L767-L787
3,512
alexa/alexa-skills-kit-sdk-for-python
ask-sdk-core/ask_sdk_core/response_helper.py
ResponseFactory.__trim_outputspeech
def __trim_outputspeech(self, speech_output=None): # type: (Union[str, None]) -> str """Trims the output speech if it already has the <speak></speak> tag. :param speech_output: the output speech sent back to user. :type speech_output: str :return: the trimmed output speech. :rtype: Union[bool, None] """ if speech_output is None: return "" speech = speech_output.strip() if speech.startswith("<speak>") and speech.endswith("</speak>"): return speech[7:-8].strip() return speech
python
def __trim_outputspeech(self, speech_output=None): # type: (Union[str, None]) -> str """Trims the output speech if it already has the <speak></speak> tag. :param speech_output: the output speech sent back to user. :type speech_output: str :return: the trimmed output speech. :rtype: Union[bool, None] """ if speech_output is None: return "" speech = speech_output.strip() if speech.startswith("<speak>") and speech.endswith("</speak>"): return speech[7:-8].strip() return speech
['def', '__trim_outputspeech', '(', 'self', ',', 'speech_output', '=', 'None', ')', ':', '# type: (Union[str, None]) -> str', 'if', 'speech_output', 'is', 'None', ':', 'return', '""', 'speech', '=', 'speech_output', '.', 'strip', '(', ')', 'if', 'speech', '.', 'startswith', '(', '"<speak>"', ')', 'and', 'speech', '.', 'endswith', '(', '"</speak>"', ')', ':', 'return', 'speech', '[', '7', ':', '-', '8', ']', '.', 'strip', '(', ')', 'return', 'speech']
Trims the output speech if it already has the <speak></speak> tag. :param speech_output: the output speech sent back to user. :type speech_output: str :return: the trimmed output speech. :rtype: Union[bool, None]
['Trims', 'the', 'output', 'speech', 'if', 'it', 'already', 'has', 'the', '<speak', '>', '<', '/', 'speak', '>', 'tag', '.']
train
https://github.com/alexa/alexa-skills-kit-sdk-for-python/blob/097b6406aa12d5ca0b825b00c936861b530cbf39/ask-sdk-core/ask_sdk_core/response_helper.py#L166-L181
3,513
spyder-ide/spyder
spyder/utils/qthelpers.py
toggle_actions
def toggle_actions(actions, enable): """Enable/disable actions""" if actions is not None: for action in actions: if action is not None: action.setEnabled(enable)
python
def toggle_actions(actions, enable): """Enable/disable actions""" if actions is not None: for action in actions: if action is not None: action.setEnabled(enable)
['def', 'toggle_actions', '(', 'actions', ',', 'enable', ')', ':', 'if', 'actions', 'is', 'not', 'None', ':', 'for', 'action', 'in', 'actions', ':', 'if', 'action', 'is', 'not', 'None', ':', 'action', '.', 'setEnabled', '(', 'enable', ')']
Enable/disable actions
['Enable', '/', 'disable', 'actions']
train
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/qthelpers.py#L228-L233
3,514
SUSE-Enceladus/ipa
ipa/ipa_cloud.py
IpaCloud.put_file
def put_file(self, client, source_file): """ Put file on instance in default SSH directory. """ try: file_name = os.path.basename(source_file) ipa_utils.put_file(client, source_file, file_name) except Exception as error: raise IpaCloudException( 'Failed copying file, "{0}"; {1}.'.format( source_file, error ) ) else: return file_name
python
def put_file(self, client, source_file): """ Put file on instance in default SSH directory. """ try: file_name = os.path.basename(source_file) ipa_utils.put_file(client, source_file, file_name) except Exception as error: raise IpaCloudException( 'Failed copying file, "{0}"; {1}.'.format( source_file, error ) ) else: return file_name
['def', 'put_file', '(', 'self', ',', 'client', ',', 'source_file', ')', ':', 'try', ':', 'file_name', '=', 'os', '.', 'path', '.', 'basename', '(', 'source_file', ')', 'ipa_utils', '.', 'put_file', '(', 'client', ',', 'source_file', ',', 'file_name', ')', 'except', 'Exception', 'as', 'error', ':', 'raise', 'IpaCloudException', '(', '\'Failed copying file, "{0}"; {1}.\'', '.', 'format', '(', 'source_file', ',', 'error', ')', ')', 'else', ':', 'return', 'file_name']
Put file on instance in default SSH directory.
['Put', 'file', 'on', 'instance', 'in', 'default', 'SSH', 'directory', '.']
train
https://github.com/SUSE-Enceladus/ipa/blob/0845eed0ea25a27dbb059ad1016105fa60002228/ipa/ipa_cloud.py#L583-L597
3,515
raiden-network/raiden
raiden/waiting.py
wait_for_close
def wait_for_close( raiden: 'RaidenService', payment_network_id: PaymentNetworkID, token_address: TokenAddress, channel_ids: List[ChannelID], retry_timeout: float, ) -> None: """Wait until all channels are closed. Note: This does not time out, use gevent.Timeout. """ return wait_for_channel_in_states( raiden=raiden, payment_network_id=payment_network_id, token_address=token_address, channel_ids=channel_ids, retry_timeout=retry_timeout, target_states=CHANNEL_AFTER_CLOSE_STATES, )
python
def wait_for_close( raiden: 'RaidenService', payment_network_id: PaymentNetworkID, token_address: TokenAddress, channel_ids: List[ChannelID], retry_timeout: float, ) -> None: """Wait until all channels are closed. Note: This does not time out, use gevent.Timeout. """ return wait_for_channel_in_states( raiden=raiden, payment_network_id=payment_network_id, token_address=token_address, channel_ids=channel_ids, retry_timeout=retry_timeout, target_states=CHANNEL_AFTER_CLOSE_STATES, )
['def', 'wait_for_close', '(', 'raiden', ':', "'RaidenService'", ',', 'payment_network_id', ':', 'PaymentNetworkID', ',', 'token_address', ':', 'TokenAddress', ',', 'channel_ids', ':', 'List', '[', 'ChannelID', ']', ',', 'retry_timeout', ':', 'float', ',', ')', '->', 'None', ':', 'return', 'wait_for_channel_in_states', '(', 'raiden', '=', 'raiden', ',', 'payment_network_id', '=', 'payment_network_id', ',', 'token_address', '=', 'token_address', ',', 'channel_ids', '=', 'channel_ids', ',', 'retry_timeout', '=', 'retry_timeout', ',', 'target_states', '=', 'CHANNEL_AFTER_CLOSE_STATES', ',', ')']
Wait until all channels are closed. Note: This does not time out, use gevent.Timeout.
['Wait', 'until', 'all', 'channels', 'are', 'closed', '.']
train
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/waiting.py#L213-L232
3,516
mjj4791/python-buienradar
buienradar/buienradar_json.py
__getStationName
def __getStationName(name, id): """Construct a staiion name.""" name = name.replace("Meetstation", "") name = name.strip() name += " (%s)" % id return name
python
def __getStationName(name, id): """Construct a staiion name.""" name = name.replace("Meetstation", "") name = name.strip() name += " (%s)" % id return name
['def', '__getStationName', '(', 'name', ',', 'id', ')', ':', 'name', '=', 'name', '.', 'replace', '(', '"Meetstation"', ',', '""', ')', 'name', '=', 'name', '.', 'strip', '(', ')', 'name', '+=', '" (%s)"', '%', 'id', 'return', 'name']
Construct a staiion name.
['Construct', 'a', 'staiion', 'name', '.']
train
https://github.com/mjj4791/python-buienradar/blob/a70436f54e007ce921d5210cb296cf3e4adf9d09/buienradar/buienradar_json.py#L575-L580
3,517
eumis/pyviews
pyviews/rendering/pipeline.py
render_children
def render_children(node: Node, **child_args): """Render node children""" for xml_node in node.xml_node.children: child = render(xml_node, **child_args) node.add_child(child)
python
def render_children(node: Node, **child_args): """Render node children""" for xml_node in node.xml_node.children: child = render(xml_node, **child_args) node.add_child(child)
['def', 'render_children', '(', 'node', ':', 'Node', ',', '*', '*', 'child_args', ')', ':', 'for', 'xml_node', 'in', 'node', '.', 'xml_node', '.', 'children', ':', 'child', '=', 'render', '(', 'xml_node', ',', '*', '*', 'child_args', ')', 'node', '.', 'add_child', '(', 'child', ')']
Render node children
['Render', 'node', 'children']
train
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/rendering/pipeline.py#L103-L107
3,518
tmr232/Sark
sark/qt.py
capture_widget
def capture_widget(widget, path=None): """Grab an image of a Qt widget Args: widget: The Qt Widget to capture path (optional): The path to save to. If not provided - will return image data. Returns: If a path is provided, the image will be saved to it. If not, the PNG buffer will be returned. """ if use_qt5: pixmap = widget.grab() else: pixmap = QtGui.QPixmap.grabWidget(widget) if path: pixmap.save(path) else: image_buffer = QtCore.QBuffer() image_buffer.open(QtCore.QIODevice.ReadWrite) pixmap.save(image_buffer, "PNG") return image_buffer.data().data()
python
def capture_widget(widget, path=None): """Grab an image of a Qt widget Args: widget: The Qt Widget to capture path (optional): The path to save to. If not provided - will return image data. Returns: If a path is provided, the image will be saved to it. If not, the PNG buffer will be returned. """ if use_qt5: pixmap = widget.grab() else: pixmap = QtGui.QPixmap.grabWidget(widget) if path: pixmap.save(path) else: image_buffer = QtCore.QBuffer() image_buffer.open(QtCore.QIODevice.ReadWrite) pixmap.save(image_buffer, "PNG") return image_buffer.data().data()
['def', 'capture_widget', '(', 'widget', ',', 'path', '=', 'None', ')', ':', 'if', 'use_qt5', ':', 'pixmap', '=', 'widget', '.', 'grab', '(', ')', 'else', ':', 'pixmap', '=', 'QtGui', '.', 'QPixmap', '.', 'grabWidget', '(', 'widget', ')', 'if', 'path', ':', 'pixmap', '.', 'save', '(', 'path', ')', 'else', ':', 'image_buffer', '=', 'QtCore', '.', 'QBuffer', '(', ')', 'image_buffer', '.', 'open', '(', 'QtCore', '.', 'QIODevice', '.', 'ReadWrite', ')', 'pixmap', '.', 'save', '(', 'image_buffer', ',', '"PNG"', ')', 'return', 'image_buffer', '.', 'data', '(', ')', '.', 'data', '(', ')']
Grab an image of a Qt widget Args: widget: The Qt Widget to capture path (optional): The path to save to. If not provided - will return image data. Returns: If a path is provided, the image will be saved to it. If not, the PNG buffer will be returned.
['Grab', 'an', 'image', 'of', 'a', 'Qt', 'widget']
train
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/qt.py#L14-L39
3,519
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_tunnels.py
brocade_tunnels.ovsdb_server_name
def ovsdb_server_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ovsdb_server = ET.SubElement(config, "ovsdb-server", xmlns="urn:brocade.com:mgmt:brocade-tunnels") name = ET.SubElement(ovsdb_server, "name") name.text = kwargs.pop('name') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def ovsdb_server_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ovsdb_server = ET.SubElement(config, "ovsdb-server", xmlns="urn:brocade.com:mgmt:brocade-tunnels") name = ET.SubElement(ovsdb_server, "name") name.text = kwargs.pop('name') callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'ovsdb_server_name', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'ovsdb_server', '=', 'ET', '.', 'SubElement', '(', 'config', ',', '"ovsdb-server"', ',', 'xmlns', '=', '"urn:brocade.com:mgmt:brocade-tunnels"', ')', 'name', '=', 'ET', '.', 'SubElement', '(', 'ovsdb_server', ',', '"name"', ')', 'name', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'name'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_tunnels.py#L860-L869
3,520
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py
brocade_interface_ext.get_vlan_brief_output_vlan_vlan_type
def get_vlan_brief_output_vlan_vlan_type(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vlan_brief = ET.Element("get_vlan_brief") config = get_vlan_brief output = ET.SubElement(get_vlan_brief, "output") vlan = ET.SubElement(output, "vlan") vlan_id_key = ET.SubElement(vlan, "vlan-id") vlan_id_key.text = kwargs.pop('vlan_id') vlan_type = ET.SubElement(vlan, "vlan-type") vlan_type.text = kwargs.pop('vlan_type') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def get_vlan_brief_output_vlan_vlan_type(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vlan_brief = ET.Element("get_vlan_brief") config = get_vlan_brief output = ET.SubElement(get_vlan_brief, "output") vlan = ET.SubElement(output, "vlan") vlan_id_key = ET.SubElement(vlan, "vlan-id") vlan_id_key.text = kwargs.pop('vlan_id') vlan_type = ET.SubElement(vlan, "vlan-type") vlan_type.text = kwargs.pop('vlan_type') callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'get_vlan_brief_output_vlan_vlan_type', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'get_vlan_brief', '=', 'ET', '.', 'Element', '(', '"get_vlan_brief"', ')', 'config', '=', 'get_vlan_brief', 'output', '=', 'ET', '.', 'SubElement', '(', 'get_vlan_brief', ',', '"output"', ')', 'vlan', '=', 'ET', '.', 'SubElement', '(', 'output', ',', '"vlan"', ')', 'vlan_id_key', '=', 'ET', '.', 'SubElement', '(', 'vlan', ',', '"vlan-id"', ')', 'vlan_id_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'vlan_id'", ')', 'vlan_type', '=', 'ET', '.', 'SubElement', '(', 'vlan', ',', '"vlan-type"', ')', 'vlan_type', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'vlan_type'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py#L95-L109
3,521
nerdvegas/rez
src/rez/vendor/distlib/locators.py
Locator.convert_url_to_download_info
def convert_url_to_download_info(self, url, project_name): """ See if a URL is a candidate for a download URL for a project (the URL has typically been scraped from an HTML page). If it is, a dictionary is returned with keys "name", "version", "filename" and "url"; otherwise, None is returned. """ def same_project(name1, name2): return normalize_name(name1) == normalize_name(name2) result = None scheme, netloc, path, params, query, frag = urlparse(url) if frag.lower().startswith('egg='): logger.debug('%s: version hint in fragment: %r', project_name, frag) m = HASHER_HASH.match(frag) if m: algo, digest = m.groups() else: algo, digest = None, None origpath = path if path and path[-1] == '/': path = path[:-1] if path.endswith('.whl'): try: wheel = Wheel(path) if is_compatible(wheel, self.wheel_tags): if project_name is None: include = True else: include = same_project(wheel.name, project_name) if include: result = { 'name': wheel.name, 'version': wheel.version, 'filename': wheel.filename, 'url': urlunparse((scheme, netloc, origpath, params, query, '')), 'python-version': ', '.join( ['.'.join(list(v[2:])) for v in wheel.pyver]), } except Exception as e: logger.warning('invalid path for wheel: %s', path) elif path.endswith(self.downloadable_extensions): path = filename = posixpath.basename(path) for ext in self.downloadable_extensions: if path.endswith(ext): path = path[:-len(ext)] t = self.split_filename(path, project_name) if not t: logger.debug('No match for project/version: %s', path) else: name, version, pyver = t if not project_name or same_project(project_name, name): result = { 'name': name, 'version': version, 'filename': filename, 'url': urlunparse((scheme, netloc, origpath, params, query, '')), #'packagetype': 'sdist', } if pyver: result['python-version'] = pyver break if result and algo: result['%s_digest' % algo] = digest return result
python
def convert_url_to_download_info(self, url, project_name): """ See if a URL is a candidate for a download URL for a project (the URL has typically been scraped from an HTML page). If it is, a dictionary is returned with keys "name", "version", "filename" and "url"; otherwise, None is returned. """ def same_project(name1, name2): return normalize_name(name1) == normalize_name(name2) result = None scheme, netloc, path, params, query, frag = urlparse(url) if frag.lower().startswith('egg='): logger.debug('%s: version hint in fragment: %r', project_name, frag) m = HASHER_HASH.match(frag) if m: algo, digest = m.groups() else: algo, digest = None, None origpath = path if path and path[-1] == '/': path = path[:-1] if path.endswith('.whl'): try: wheel = Wheel(path) if is_compatible(wheel, self.wheel_tags): if project_name is None: include = True else: include = same_project(wheel.name, project_name) if include: result = { 'name': wheel.name, 'version': wheel.version, 'filename': wheel.filename, 'url': urlunparse((scheme, netloc, origpath, params, query, '')), 'python-version': ', '.join( ['.'.join(list(v[2:])) for v in wheel.pyver]), } except Exception as e: logger.warning('invalid path for wheel: %s', path) elif path.endswith(self.downloadable_extensions): path = filename = posixpath.basename(path) for ext in self.downloadable_extensions: if path.endswith(ext): path = path[:-len(ext)] t = self.split_filename(path, project_name) if not t: logger.debug('No match for project/version: %s', path) else: name, version, pyver = t if not project_name or same_project(project_name, name): result = { 'name': name, 'version': version, 'filename': filename, 'url': urlunparse((scheme, netloc, origpath, params, query, '')), #'packagetype': 'sdist', } if pyver: result['python-version'] = pyver break if result and algo: result['%s_digest' % algo] = digest return result
['def', 'convert_url_to_download_info', '(', 'self', ',', 'url', ',', 'project_name', ')', ':', 'def', 'same_project', '(', 'name1', ',', 'name2', ')', ':', 'return', 'normalize_name', '(', 'name1', ')', '==', 'normalize_name', '(', 'name2', ')', 'result', '=', 'None', 'scheme', ',', 'netloc', ',', 'path', ',', 'params', ',', 'query', ',', 'frag', '=', 'urlparse', '(', 'url', ')', 'if', 'frag', '.', 'lower', '(', ')', '.', 'startswith', '(', "'egg='", ')', ':', 'logger', '.', 'debug', '(', "'%s: version hint in fragment: %r'", ',', 'project_name', ',', 'frag', ')', 'm', '=', 'HASHER_HASH', '.', 'match', '(', 'frag', ')', 'if', 'm', ':', 'algo', ',', 'digest', '=', 'm', '.', 'groups', '(', ')', 'else', ':', 'algo', ',', 'digest', '=', 'None', ',', 'None', 'origpath', '=', 'path', 'if', 'path', 'and', 'path', '[', '-', '1', ']', '==', "'/'", ':', 'path', '=', 'path', '[', ':', '-', '1', ']', 'if', 'path', '.', 'endswith', '(', "'.whl'", ')', ':', 'try', ':', 'wheel', '=', 'Wheel', '(', 'path', ')', 'if', 'is_compatible', '(', 'wheel', ',', 'self', '.', 'wheel_tags', ')', ':', 'if', 'project_name', 'is', 'None', ':', 'include', '=', 'True', 'else', ':', 'include', '=', 'same_project', '(', 'wheel', '.', 'name', ',', 'project_name', ')', 'if', 'include', ':', 'result', '=', '{', "'name'", ':', 'wheel', '.', 'name', ',', "'version'", ':', 'wheel', '.', 'version', ',', "'filename'", ':', 'wheel', '.', 'filename', ',', "'url'", ':', 'urlunparse', '(', '(', 'scheme', ',', 'netloc', ',', 'origpath', ',', 'params', ',', 'query', ',', "''", ')', ')', ',', "'python-version'", ':', "', '", '.', 'join', '(', '[', "'.'", '.', 'join', '(', 'list', '(', 'v', '[', '2', ':', ']', ')', ')', 'for', 'v', 'in', 'wheel', '.', 'pyver', ']', ')', ',', '}', 'except', 'Exception', 'as', 'e', ':', 'logger', '.', 'warning', '(', "'invalid path for wheel: %s'", ',', 'path', ')', 'elif', 'path', '.', 'endswith', '(', 'self', '.', 'downloadable_extensions', ')', ':', 'path', '=', 'filename', '=', 'posixpath', '.', 'basename', '(', 'path', ')', 'for', 'ext', 'in', 'self', '.', 'downloadable_extensions', ':', 'if', 'path', '.', 'endswith', '(', 'ext', ')', ':', 'path', '=', 'path', '[', ':', '-', 'len', '(', 'ext', ')', ']', 't', '=', 'self', '.', 'split_filename', '(', 'path', ',', 'project_name', ')', 'if', 'not', 't', ':', 'logger', '.', 'debug', '(', "'No match for project/version: %s'", ',', 'path', ')', 'else', ':', 'name', ',', 'version', ',', 'pyver', '=', 't', 'if', 'not', 'project_name', 'or', 'same_project', '(', 'project_name', ',', 'name', ')', ':', 'result', '=', '{', "'name'", ':', 'name', ',', "'version'", ':', 'version', ',', "'filename'", ':', 'filename', ',', "'url'", ':', 'urlunparse', '(', '(', 'scheme', ',', 'netloc', ',', 'origpath', ',', 'params', ',', 'query', ',', "''", ')', ')', ',', "#'packagetype': 'sdist',", '}', 'if', 'pyver', ':', 'result', '[', "'python-version'", ']', '=', 'pyver', 'break', 'if', 'result', 'and', 'algo', ':', 'result', '[', "'%s_digest'", '%', 'algo', ']', '=', 'digest', 'return', 'result']
See if a URL is a candidate for a download URL for a project (the URL has typically been scraped from an HTML page). If it is, a dictionary is returned with keys "name", "version", "filename" and "url"; otherwise, None is returned.
['See', 'if', 'a', 'URL', 'is', 'a', 'candidate', 'for', 'a', 'download', 'URL', 'for', 'a', 'project', '(', 'the', 'URL', 'has', 'typically', 'been', 'scraped', 'from', 'an', 'HTML', 'page', ')', '.']
train
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/distlib/locators.py#L204-L272
3,522
rueckstiess/mtools
mtools/mlogfilter/filters/mask_filter.py
MaskFilter.setup
def setup(self): """ Create mask list. Consists of all tuples between which this filter accepts lines. """ # get start and end of the mask and set a start_limit if not self.mask_source.start: raise SystemExit("Can't parse format of %s. Is this a log file or " "system.profile collection?" % self.mlogfilter.args['mask']) self.mask_half_td = timedelta(seconds=self.mlogfilter.args ['mask_size'] / 2) # load filter mask file logevent_list = list(self.mask_source) # define start and end of total mask self.mask_start = self.mask_source.start - self.mask_half_td self.mask_end = self.mask_source.end + self.mask_half_td # consider --mask-center if self.mlogfilter.args['mask_center'] in ['start', 'both']: if logevent_list[0].duration: self.mask_start -= timedelta(milliseconds=logevent_list[0] .duration) if self.mlogfilter.args['mask_center'] == 'start': if logevent_list[-1].duration: self.mask_end -= timedelta(milliseconds=logevent_list[-1] .duration) self.start_limit = self.mask_start # different center points if 'mask_center' in self.mlogfilter.args: if self.mlogfilter.args['mask_center'] in ['start', 'both']: starts = ([(le.datetime - timedelta(milliseconds=le.duration)) if le.duration is not None else le.datetime for le in logevent_list if le.datetime]) if self.mlogfilter.args['mask_center'] in ['end', 'both']: ends = [le.datetime for le in logevent_list if le.datetime] if self.mlogfilter.args['mask_center'] == 'start': event_list = sorted(starts) elif self.mlogfilter.args['mask_center'] == 'end': event_list = sorted(ends) elif self.mlogfilter.args['mask_center'] == 'both': event_list = sorted(zip(starts, ends)) mask_list = [] if len(event_list) == 0: return start_point = end_point = None for e in event_list: if start_point is None: start_point, end_point = self._pad_event(e) continue next_start = (e[0] if type(e) == tuple else e) - self.mask_half_td if next_start <= end_point: end_point = ((e[1] if type(e) == tuple else e) + self.mask_half_td) else: mask_list.append((start_point, end_point)) start_point, end_point = self._pad_event(e) if start_point: mask_list.append((start_point, end_point)) self.mask_list = mask_list
python
def setup(self): """ Create mask list. Consists of all tuples between which this filter accepts lines. """ # get start and end of the mask and set a start_limit if not self.mask_source.start: raise SystemExit("Can't parse format of %s. Is this a log file or " "system.profile collection?" % self.mlogfilter.args['mask']) self.mask_half_td = timedelta(seconds=self.mlogfilter.args ['mask_size'] / 2) # load filter mask file logevent_list = list(self.mask_source) # define start and end of total mask self.mask_start = self.mask_source.start - self.mask_half_td self.mask_end = self.mask_source.end + self.mask_half_td # consider --mask-center if self.mlogfilter.args['mask_center'] in ['start', 'both']: if logevent_list[0].duration: self.mask_start -= timedelta(milliseconds=logevent_list[0] .duration) if self.mlogfilter.args['mask_center'] == 'start': if logevent_list[-1].duration: self.mask_end -= timedelta(milliseconds=logevent_list[-1] .duration) self.start_limit = self.mask_start # different center points if 'mask_center' in self.mlogfilter.args: if self.mlogfilter.args['mask_center'] in ['start', 'both']: starts = ([(le.datetime - timedelta(milliseconds=le.duration)) if le.duration is not None else le.datetime for le in logevent_list if le.datetime]) if self.mlogfilter.args['mask_center'] in ['end', 'both']: ends = [le.datetime for le in logevent_list if le.datetime] if self.mlogfilter.args['mask_center'] == 'start': event_list = sorted(starts) elif self.mlogfilter.args['mask_center'] == 'end': event_list = sorted(ends) elif self.mlogfilter.args['mask_center'] == 'both': event_list = sorted(zip(starts, ends)) mask_list = [] if len(event_list) == 0: return start_point = end_point = None for e in event_list: if start_point is None: start_point, end_point = self._pad_event(e) continue next_start = (e[0] if type(e) == tuple else e) - self.mask_half_td if next_start <= end_point: end_point = ((e[1] if type(e) == tuple else e) + self.mask_half_td) else: mask_list.append((start_point, end_point)) start_point, end_point = self._pad_event(e) if start_point: mask_list.append((start_point, end_point)) self.mask_list = mask_list
['def', 'setup', '(', 'self', ')', ':', '# get start and end of the mask and set a start_limit', 'if', 'not', 'self', '.', 'mask_source', '.', 'start', ':', 'raise', 'SystemExit', '(', '"Can\'t parse format of %s. Is this a log file or "', '"system.profile collection?"', '%', 'self', '.', 'mlogfilter', '.', 'args', '[', "'mask'", ']', ')', 'self', '.', 'mask_half_td', '=', 'timedelta', '(', 'seconds', '=', 'self', '.', 'mlogfilter', '.', 'args', '[', "'mask_size'", ']', '/', '2', ')', '# load filter mask file', 'logevent_list', '=', 'list', '(', 'self', '.', 'mask_source', ')', '# define start and end of total mask', 'self', '.', 'mask_start', '=', 'self', '.', 'mask_source', '.', 'start', '-', 'self', '.', 'mask_half_td', 'self', '.', 'mask_end', '=', 'self', '.', 'mask_source', '.', 'end', '+', 'self', '.', 'mask_half_td', '# consider --mask-center', 'if', 'self', '.', 'mlogfilter', '.', 'args', '[', "'mask_center'", ']', 'in', '[', "'start'", ',', "'both'", ']', ':', 'if', 'logevent_list', '[', '0', ']', '.', 'duration', ':', 'self', '.', 'mask_start', '-=', 'timedelta', '(', 'milliseconds', '=', 'logevent_list', '[', '0', ']', '.', 'duration', ')', 'if', 'self', '.', 'mlogfilter', '.', 'args', '[', "'mask_center'", ']', '==', "'start'", ':', 'if', 'logevent_list', '[', '-', '1', ']', '.', 'duration', ':', 'self', '.', 'mask_end', '-=', 'timedelta', '(', 'milliseconds', '=', 'logevent_list', '[', '-', '1', ']', '.', 'duration', ')', 'self', '.', 'start_limit', '=', 'self', '.', 'mask_start', '# different center points', 'if', "'mask_center'", 'in', 'self', '.', 'mlogfilter', '.', 'args', ':', 'if', 'self', '.', 'mlogfilter', '.', 'args', '[', "'mask_center'", ']', 'in', '[', "'start'", ',', "'both'", ']', ':', 'starts', '=', '(', '[', '(', 'le', '.', 'datetime', '-', 'timedelta', '(', 'milliseconds', '=', 'le', '.', 'duration', ')', ')', 'if', 'le', '.', 'duration', 'is', 'not', 'None', 'else', 'le', '.', 'datetime', 'for', 'le', 'in', 'logevent_list', 'if', 'le', '.', 'datetime', ']', ')', 'if', 'self', '.', 'mlogfilter', '.', 'args', '[', "'mask_center'", ']', 'in', '[', "'end'", ',', "'both'", ']', ':', 'ends', '=', '[', 'le', '.', 'datetime', 'for', 'le', 'in', 'logevent_list', 'if', 'le', '.', 'datetime', ']', 'if', 'self', '.', 'mlogfilter', '.', 'args', '[', "'mask_center'", ']', '==', "'start'", ':', 'event_list', '=', 'sorted', '(', 'starts', ')', 'elif', 'self', '.', 'mlogfilter', '.', 'args', '[', "'mask_center'", ']', '==', "'end'", ':', 'event_list', '=', 'sorted', '(', 'ends', ')', 'elif', 'self', '.', 'mlogfilter', '.', 'args', '[', "'mask_center'", ']', '==', "'both'", ':', 'event_list', '=', 'sorted', '(', 'zip', '(', 'starts', ',', 'ends', ')', ')', 'mask_list', '=', '[', ']', 'if', 'len', '(', 'event_list', ')', '==', '0', ':', 'return', 'start_point', '=', 'end_point', '=', 'None', 'for', 'e', 'in', 'event_list', ':', 'if', 'start_point', 'is', 'None', ':', 'start_point', ',', 'end_point', '=', 'self', '.', '_pad_event', '(', 'e', ')', 'continue', 'next_start', '=', '(', 'e', '[', '0', ']', 'if', 'type', '(', 'e', ')', '==', 'tuple', 'else', 'e', ')', '-', 'self', '.', 'mask_half_td', 'if', 'next_start', '<=', 'end_point', ':', 'end_point', '=', '(', '(', 'e', '[', '1', ']', 'if', 'type', '(', 'e', ')', '==', 'tuple', 'else', 'e', ')', '+', 'self', '.', 'mask_half_td', ')', 'else', ':', 'mask_list', '.', 'append', '(', '(', 'start_point', ',', 'end_point', ')', ')', 'start_point', ',', 'end_point', '=', 'self', '.', '_pad_event', '(', 'e', ')', 'if', 'start_point', ':', 'mask_list', '.', 'append', '(', '(', 'start_point', ',', 'end_point', ')', ')', 'self', '.', 'mask_list', '=', 'mask_list']
Create mask list. Consists of all tuples between which this filter accepts lines.
['Create', 'mask', 'list', '.']
train
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlogfilter/filters/mask_filter.py#L60-L135
3,523
xlzd/xtls
xtls/codehelper.py
no_exception
def no_exception(on_exception, logger=None): """ 处理函数抛出异常的装饰器, ATT: on_exception必填 :param on_exception: 遇到异常时函数返回什么内容 """ def decorator(function): def wrapper(*args, **kwargs): try: result = function(*args, **kwargs) except Exception, e: if hasattr(logger, 'exception'): logger.exception(e) else: print traceback.format_exc() result = on_exception return result return wrapper return decorator
python
def no_exception(on_exception, logger=None): """ 处理函数抛出异常的装饰器, ATT: on_exception必填 :param on_exception: 遇到异常时函数返回什么内容 """ def decorator(function): def wrapper(*args, **kwargs): try: result = function(*args, **kwargs) except Exception, e: if hasattr(logger, 'exception'): logger.exception(e) else: print traceback.format_exc() result = on_exception return result return wrapper return decorator
['def', 'no_exception', '(', 'on_exception', ',', 'logger', '=', 'None', ')', ':', 'def', 'decorator', '(', 'function', ')', ':', 'def', 'wrapper', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'try', ':', 'result', '=', 'function', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'except', 'Exception', ',', 'e', ':', 'if', 'hasattr', '(', 'logger', ',', "'exception'", ')', ':', 'logger', '.', 'exception', '(', 'e', ')', 'else', ':', 'print', 'traceback', '.', 'format_exc', '(', ')', 'result', '=', 'on_exception', 'return', 'result', 'return', 'wrapper', 'return', 'decorator']
处理函数抛出异常的装饰器, ATT: on_exception必填 :param on_exception: 遇到异常时函数返回什么内容
['处理函数抛出异常的装饰器,', 'ATT', ':', 'on_exception必填', ':', 'param', 'on_exception', ':', '遇到异常时函数返回什么内容']
train
https://github.com/xlzd/xtls/blob/b3cc0ab24197ecaa39adcad7cd828cada9c04a4e/xtls/codehelper.py#L78-L98
3,524
ipapi-co/ipapi-python
ipapi/ipapi.py
location
def location(ip=None, key=None, field=None): ''' Get geolocation data for a given IP address If field is specified, get specific field as text Else get complete location data as JSON ''' if field and (field not in field_list): return 'Invalid field' if field: if ip: url = 'https://ipapi.co/{}/{}/'.format(ip, field) else: url = 'https://ipapi.co/{}/'.format(field) else: if ip: url = 'https://ipapi.co/{}/json/'.format(ip) else: url = 'https://ipapi.co/json/' if key or API_KEY: url = '{}?key={}'.format(url, (key or API_KEY)) response = get(url, headers=headers) if field: return response.text else: return response.json()
python
def location(ip=None, key=None, field=None): ''' Get geolocation data for a given IP address If field is specified, get specific field as text Else get complete location data as JSON ''' if field and (field not in field_list): return 'Invalid field' if field: if ip: url = 'https://ipapi.co/{}/{}/'.format(ip, field) else: url = 'https://ipapi.co/{}/'.format(field) else: if ip: url = 'https://ipapi.co/{}/json/'.format(ip) else: url = 'https://ipapi.co/json/' if key or API_KEY: url = '{}?key={}'.format(url, (key or API_KEY)) response = get(url, headers=headers) if field: return response.text else: return response.json()
['def', 'location', '(', 'ip', '=', 'None', ',', 'key', '=', 'None', ',', 'field', '=', 'None', ')', ':', 'if', 'field', 'and', '(', 'field', 'not', 'in', 'field_list', ')', ':', 'return', "'Invalid field'", 'if', 'field', ':', 'if', 'ip', ':', 'url', '=', "'https://ipapi.co/{}/{}/'", '.', 'format', '(', 'ip', ',', 'field', ')', 'else', ':', 'url', '=', "'https://ipapi.co/{}/'", '.', 'format', '(', 'field', ')', 'else', ':', 'if', 'ip', ':', 'url', '=', "'https://ipapi.co/{}/json/'", '.', 'format', '(', 'ip', ')', 'else', ':', 'url', '=', "'https://ipapi.co/json/'", 'if', 'key', 'or', 'API_KEY', ':', 'url', '=', "'{}?key={}'", '.', 'format', '(', 'url', ',', '(', 'key', 'or', 'API_KEY', ')', ')', 'response', '=', 'get', '(', 'url', ',', 'headers', '=', 'headers', ')', 'if', 'field', ':', 'return', 'response', '.', 'text', 'else', ':', 'return', 'response', '.', 'json', '(', ')']
Get geolocation data for a given IP address If field is specified, get specific field as text Else get complete location data as JSON
['Get', 'geolocation', 'data', 'for', 'a', 'given', 'IP', 'address', 'If', 'field', 'is', 'specified', 'get', 'specific', 'field', 'as', 'text', 'Else', 'get', 'complete', 'location', 'data', 'as', 'JSON']
train
https://github.com/ipapi-co/ipapi-python/blob/45896291d5e6a70fc6234c9e070dc9077ef45ccc/ipapi/ipapi.py#L19-L47
3,525
delph-in/pydelphin
delphin/itsdb.py
Relations.path
def path(self, source, target): """ Find the path of id fields connecting two tables. This is just a basic breadth-first-search. The relations file should be small enough to not be a problem. Returns: list: (table, fieldname) pairs describing the path from the source to target tables Raises: :class:`delphin.exceptions.ItsdbError`: when no path is found Example: >>> relations.path('item', 'result') [('parse', 'i-id'), ('result', 'parse-id')] >>> relations.path('parse', 'item') [('item', 'i-id')] >>> relations.path('item', 'item') [] """ visited = set(source.split('+')) # split on + for joins targets = set(target.split('+')) - visited # ensure sources and targets exists for tablename in visited.union(targets): self[tablename] # base case; nothing to do if len(targets) == 0: return [] paths = [[(tablename, None)] for tablename in visited] while True: newpaths = [] for path in paths: laststep, pivot = path[-1] if laststep in targets: return path[1:] else: for key in self[laststep].keys(): for step in set(self.find(key)) - visited: visited.add(step) newpaths.append(path + [(step, key)]) if newpaths: paths = newpaths else: break raise ItsdbError('no relation path found from {} to {}' .format(source, target))
python
def path(self, source, target): """ Find the path of id fields connecting two tables. This is just a basic breadth-first-search. The relations file should be small enough to not be a problem. Returns: list: (table, fieldname) pairs describing the path from the source to target tables Raises: :class:`delphin.exceptions.ItsdbError`: when no path is found Example: >>> relations.path('item', 'result') [('parse', 'i-id'), ('result', 'parse-id')] >>> relations.path('parse', 'item') [('item', 'i-id')] >>> relations.path('item', 'item') [] """ visited = set(source.split('+')) # split on + for joins targets = set(target.split('+')) - visited # ensure sources and targets exists for tablename in visited.union(targets): self[tablename] # base case; nothing to do if len(targets) == 0: return [] paths = [[(tablename, None)] for tablename in visited] while True: newpaths = [] for path in paths: laststep, pivot = path[-1] if laststep in targets: return path[1:] else: for key in self[laststep].keys(): for step in set(self.find(key)) - visited: visited.add(step) newpaths.append(path + [(step, key)]) if newpaths: paths = newpaths else: break raise ItsdbError('no relation path found from {} to {}' .format(source, target))
['def', 'path', '(', 'self', ',', 'source', ',', 'target', ')', ':', 'visited', '=', 'set', '(', 'source', '.', 'split', '(', "'+'", ')', ')', '# split on + for joins', 'targets', '=', 'set', '(', 'target', '.', 'split', '(', "'+'", ')', ')', '-', 'visited', '# ensure sources and targets exists', 'for', 'tablename', 'in', 'visited', '.', 'union', '(', 'targets', ')', ':', 'self', '[', 'tablename', ']', '# base case; nothing to do', 'if', 'len', '(', 'targets', ')', '==', '0', ':', 'return', '[', ']', 'paths', '=', '[', '[', '(', 'tablename', ',', 'None', ')', ']', 'for', 'tablename', 'in', 'visited', ']', 'while', 'True', ':', 'newpaths', '=', '[', ']', 'for', 'path', 'in', 'paths', ':', 'laststep', ',', 'pivot', '=', 'path', '[', '-', '1', ']', 'if', 'laststep', 'in', 'targets', ':', 'return', 'path', '[', '1', ':', ']', 'else', ':', 'for', 'key', 'in', 'self', '[', 'laststep', ']', '.', 'keys', '(', ')', ':', 'for', 'step', 'in', 'set', '(', 'self', '.', 'find', '(', 'key', ')', ')', '-', 'visited', ':', 'visited', '.', 'add', '(', 'step', ')', 'newpaths', '.', 'append', '(', 'path', '+', '[', '(', 'step', ',', 'key', ')', ']', ')', 'if', 'newpaths', ':', 'paths', '=', 'newpaths', 'else', ':', 'break', 'raise', 'ItsdbError', '(', "'no relation path found from {} to {}'", '.', 'format', '(', 'source', ',', 'target', ')', ')']
Find the path of id fields connecting two tables. This is just a basic breadth-first-search. The relations file should be small enough to not be a problem. Returns: list: (table, fieldname) pairs describing the path from the source to target tables Raises: :class:`delphin.exceptions.ItsdbError`: when no path is found Example: >>> relations.path('item', 'result') [('parse', 'i-id'), ('result', 'parse-id')] >>> relations.path('parse', 'item') [('item', 'i-id')] >>> relations.path('item', 'item') []
['Find', 'the', 'path', 'of', 'id', 'fields', 'connecting', 'two', 'tables', '.']
train
https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/itsdb.py#L405-L452
3,526
saltstack/salt
salt/modules/kubernetesmod.py
ping
def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status
python
def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status
['def', 'ping', '(', '*', '*', 'kwargs', ')', ':', 'status', '=', 'True', 'try', ':', 'nodes', '(', '*', '*', 'kwargs', ')', 'except', 'CommandExecutionError', ':', 'status', '=', 'False', 'return', 'status']
Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping
['Checks', 'connections', 'with', 'the', 'kubernetes', 'API', 'server', '.', 'Returns', 'True', 'if', 'the', 'connection', 'can', 'be', 'established', 'False', 'otherwise', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L251-L265
3,527
spencerahill/aospy
aospy/utils/times.py
yearly_average
def yearly_average(arr, dt): """Average a sub-yearly time-series over each year. Resulting timeseries comprises one value for each year in which the original array had valid data. Accounts for (i.e. ignores) masked values in original data when computing the annual averages. Parameters ---------- arr : xarray.DataArray The array to be averaged dt : xarray.DataArray Array of the duration of each timestep Returns ------- xarray.DataArray Has the same shape and mask as the original ``arr``, except for the time dimension, which is truncated to one value for each year that ``arr`` spanned """ assert_matching_time_coord(arr, dt) yr_str = TIME_STR + '.year' # Retain original data's mask. dt = dt.where(np.isfinite(arr)) return ((arr*dt).groupby(yr_str).sum(TIME_STR) / dt.groupby(yr_str).sum(TIME_STR))
python
def yearly_average(arr, dt): """Average a sub-yearly time-series over each year. Resulting timeseries comprises one value for each year in which the original array had valid data. Accounts for (i.e. ignores) masked values in original data when computing the annual averages. Parameters ---------- arr : xarray.DataArray The array to be averaged dt : xarray.DataArray Array of the duration of each timestep Returns ------- xarray.DataArray Has the same shape and mask as the original ``arr``, except for the time dimension, which is truncated to one value for each year that ``arr`` spanned """ assert_matching_time_coord(arr, dt) yr_str = TIME_STR + '.year' # Retain original data's mask. dt = dt.where(np.isfinite(arr)) return ((arr*dt).groupby(yr_str).sum(TIME_STR) / dt.groupby(yr_str).sum(TIME_STR))
['def', 'yearly_average', '(', 'arr', ',', 'dt', ')', ':', 'assert_matching_time_coord', '(', 'arr', ',', 'dt', ')', 'yr_str', '=', 'TIME_STR', '+', "'.year'", "# Retain original data's mask.", 'dt', '=', 'dt', '.', 'where', '(', 'np', '.', 'isfinite', '(', 'arr', ')', ')', 'return', '(', '(', 'arr', '*', 'dt', ')', '.', 'groupby', '(', 'yr_str', ')', '.', 'sum', '(', 'TIME_STR', ')', '/', 'dt', '.', 'groupby', '(', 'yr_str', ')', '.', 'sum', '(', 'TIME_STR', ')', ')']
Average a sub-yearly time-series over each year. Resulting timeseries comprises one value for each year in which the original array had valid data. Accounts for (i.e. ignores) masked values in original data when computing the annual averages. Parameters ---------- arr : xarray.DataArray The array to be averaged dt : xarray.DataArray Array of the duration of each timestep Returns ------- xarray.DataArray Has the same shape and mask as the original ``arr``, except for the time dimension, which is truncated to one value for each year that ``arr`` spanned
['Average', 'a', 'sub', '-', 'yearly', 'time', '-', 'series', 'over', 'each', 'year', '.']
train
https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/utils/times.py#L148-L175
3,528
andreasjansson/head-in-the-clouds
headintheclouds/dependencies/PyDbLite/PyDbLite_conversions.py
fromCSV
def fromCSV(csvfile,out=None,fieldnames=None,fmtparams=None,conv_func={}, empty_to_None=[]): """Conversion from CSV to PyDbLite csvfile : name of the CSV file in the file system out : path for the new PyDbLite base in the file system fieldnames : list of field names. If set to None, the field names must be present in the first line of the CSV file fmtparams : the format parameters for the CSV file, as described in the csv module of the standard distribution conv_func is a dictionary mapping a field name to the function used to convert the string read in the CSV to the appropriate Python type. For instance if field "age" must be converted to an integer : conv_func["age"] = int empty_to_None is a list of the fields such that when the value read in the CSV file is the empty string, the field value is set to None """ import csv import time import datetime if out is None: out = os.path.splitext(csvfile)[0]+".pdl" if fieldnames is None: # read field names in the first line of CSV file reader = csv.reader(open(csvfile)) fieldnames = reader.next() reader = csv.DictReader(open(csvfile),fieldnames,fmtparams) reader.next() # skip first line db = PyDbLite.Base(out) conv_func.update({"__id__":int}) auto_id = not "__id__" in fieldnames fieldnames = [ f for f in fieldnames if not f in ("__id__") ] kw = {"mode":"override"} db.create(*fieldnames,**kw) print db.fields next_id = 0 records = {} while True: try: record = reader.next() except StopIteration: break if auto_id: record["__id__"] = next_id next_id += 1 # replace empty strings by None for field in empty_to_None: if not record[field]: record[field] = None # type conversion for field in conv_func: if not isinstance(conv_func[field],(tuple,list)): record[field] = conv_func[field](record[field]) else: # date or datetime date_class,date_fmt = conv_func[field] if not record[field]: record[field] = None else: time_tuple = time.strptime(record[field],date_fmt) if date_class is datetime.date: time_tuple = time_tuple[:3] record[field] = date_class(*time_tuple) records[record["__id__"]] = record db.records = records db.commit() print len(db) return db
python
def fromCSV(csvfile,out=None,fieldnames=None,fmtparams=None,conv_func={}, empty_to_None=[]): """Conversion from CSV to PyDbLite csvfile : name of the CSV file in the file system out : path for the new PyDbLite base in the file system fieldnames : list of field names. If set to None, the field names must be present in the first line of the CSV file fmtparams : the format parameters for the CSV file, as described in the csv module of the standard distribution conv_func is a dictionary mapping a field name to the function used to convert the string read in the CSV to the appropriate Python type. For instance if field "age" must be converted to an integer : conv_func["age"] = int empty_to_None is a list of the fields such that when the value read in the CSV file is the empty string, the field value is set to None """ import csv import time import datetime if out is None: out = os.path.splitext(csvfile)[0]+".pdl" if fieldnames is None: # read field names in the first line of CSV file reader = csv.reader(open(csvfile)) fieldnames = reader.next() reader = csv.DictReader(open(csvfile),fieldnames,fmtparams) reader.next() # skip first line db = PyDbLite.Base(out) conv_func.update({"__id__":int}) auto_id = not "__id__" in fieldnames fieldnames = [ f for f in fieldnames if not f in ("__id__") ] kw = {"mode":"override"} db.create(*fieldnames,**kw) print db.fields next_id = 0 records = {} while True: try: record = reader.next() except StopIteration: break if auto_id: record["__id__"] = next_id next_id += 1 # replace empty strings by None for field in empty_to_None: if not record[field]: record[field] = None # type conversion for field in conv_func: if not isinstance(conv_func[field],(tuple,list)): record[field] = conv_func[field](record[field]) else: # date or datetime date_class,date_fmt = conv_func[field] if not record[field]: record[field] = None else: time_tuple = time.strptime(record[field],date_fmt) if date_class is datetime.date: time_tuple = time_tuple[:3] record[field] = date_class(*time_tuple) records[record["__id__"]] = record db.records = records db.commit() print len(db) return db
['def', 'fromCSV', '(', 'csvfile', ',', 'out', '=', 'None', ',', 'fieldnames', '=', 'None', ',', 'fmtparams', '=', 'None', ',', 'conv_func', '=', '{', '}', ',', 'empty_to_None', '=', '[', ']', ')', ':', 'import', 'csv', 'import', 'time', 'import', 'datetime', 'if', 'out', 'is', 'None', ':', 'out', '=', 'os', '.', 'path', '.', 'splitext', '(', 'csvfile', ')', '[', '0', ']', '+', '".pdl"', 'if', 'fieldnames', 'is', 'None', ':', '# read field names in the first line of CSV file\r', 'reader', '=', 'csv', '.', 'reader', '(', 'open', '(', 'csvfile', ')', ')', 'fieldnames', '=', 'reader', '.', 'next', '(', ')', 'reader', '=', 'csv', '.', 'DictReader', '(', 'open', '(', 'csvfile', ')', ',', 'fieldnames', ',', 'fmtparams', ')', 'reader', '.', 'next', '(', ')', '# skip first line\r', 'db', '=', 'PyDbLite', '.', 'Base', '(', 'out', ')', 'conv_func', '.', 'update', '(', '{', '"__id__"', ':', 'int', '}', ')', 'auto_id', '=', 'not', '"__id__"', 'in', 'fieldnames', 'fieldnames', '=', '[', 'f', 'for', 'f', 'in', 'fieldnames', 'if', 'not', 'f', 'in', '(', '"__id__"', ')', ']', 'kw', '=', '{', '"mode"', ':', '"override"', '}', 'db', '.', 'create', '(', '*', 'fieldnames', ',', '*', '*', 'kw', ')', 'print', 'db', '.', 'fields', 'next_id', '=', '0', 'records', '=', '{', '}', 'while', 'True', ':', 'try', ':', 'record', '=', 'reader', '.', 'next', '(', ')', 'except', 'StopIteration', ':', 'break', 'if', 'auto_id', ':', 'record', '[', '"__id__"', ']', '=', 'next_id', 'next_id', '+=', '1', '# replace empty strings by None\r', 'for', 'field', 'in', 'empty_to_None', ':', 'if', 'not', 'record', '[', 'field', ']', ':', 'record', '[', 'field', ']', '=', 'None', '# type conversion\r', 'for', 'field', 'in', 'conv_func', ':', 'if', 'not', 'isinstance', '(', 'conv_func', '[', 'field', ']', ',', '(', 'tuple', ',', 'list', ')', ')', ':', 'record', '[', 'field', ']', '=', 'conv_func', '[', 'field', ']', '(', 'record', '[', 'field', ']', ')', 'else', ':', '# date or datetime\r', 'date_class', ',', 'date_fmt', '=', 'conv_func', '[', 'field', ']', 'if', 'not', 'record', '[', 'field', ']', ':', 'record', '[', 'field', ']', '=', 'None', 'else', ':', 'time_tuple', '=', 'time', '.', 'strptime', '(', 'record', '[', 'field', ']', ',', 'date_fmt', ')', 'if', 'date_class', 'is', 'datetime', '.', 'date', ':', 'time_tuple', '=', 'time_tuple', '[', ':', '3', ']', 'record', '[', 'field', ']', '=', 'date_class', '(', '*', 'time_tuple', ')', 'records', '[', 'record', '[', '"__id__"', ']', ']', '=', 'record', 'db', '.', 'records', '=', 'records', 'db', '.', 'commit', '(', ')', 'print', 'len', '(', 'db', ')', 'return', 'db']
Conversion from CSV to PyDbLite csvfile : name of the CSV file in the file system out : path for the new PyDbLite base in the file system fieldnames : list of field names. If set to None, the field names must be present in the first line of the CSV file fmtparams : the format parameters for the CSV file, as described in the csv module of the standard distribution conv_func is a dictionary mapping a field name to the function used to convert the string read in the CSV to the appropriate Python type. For instance if field "age" must be converted to an integer : conv_func["age"] = int empty_to_None is a list of the fields such that when the value read in the CSV file is the empty string, the field value is set to None
['Conversion', 'from', 'CSV', 'to', 'PyDbLite', 'csvfile', ':', 'name', 'of', 'the', 'CSV', 'file', 'in', 'the', 'file', 'system', 'out', ':', 'path', 'for', 'the', 'new', 'PyDbLite', 'base', 'in', 'the', 'file', 'system', 'fieldnames', ':', 'list', 'of', 'field', 'names', '.', 'If', 'set', 'to', 'None', 'the', 'field', 'names', 'must', 'be', 'present', 'in', 'the', 'first', 'line', 'of', 'the', 'CSV', 'file', 'fmtparams', ':', 'the', 'format', 'parameters', 'for', 'the', 'CSV', 'file', 'as', 'described', 'in', 'the', 'csv', 'module', 'of', 'the', 'standard', 'distribution', 'conv_func', 'is', 'a', 'dictionary', 'mapping', 'a', 'field', 'name', 'to', 'the', 'function', 'used', 'to', 'convert', 'the', 'string', 'read', 'in', 'the', 'CSV', 'to', 'the', 'appropriate', 'Python', 'type', '.', 'For', 'instance', 'if', 'field', 'age', 'must', 'be', 'converted', 'to', 'an', 'integer', ':', 'conv_func', '[', 'age', ']', '=', 'int', 'empty_to_None', 'is', 'a', 'list', 'of', 'the', 'fields', 'such', 'that', 'when', 'the', 'value', 'read', 'in', 'the', 'CSV', 'file', 'is', 'the', 'empty', 'string', 'the', 'field', 'value', 'is', 'set', 'to', 'None']
train
https://github.com/andreasjansson/head-in-the-clouds/blob/32c1d00d01036834dc94368e7f38b0afd3f7a82f/headintheclouds/dependencies/PyDbLite/PyDbLite_conversions.py#L28-L106
3,529
twilio/twilio-python
twilio/rest/taskrouter/v1/workspace/worker/worker_channel.py
WorkerChannelInstance.update
def update(self, capacity=values.unset, available=values.unset): """ Update the WorkerChannelInstance :param unicode capacity: The total number of Tasks worker should handle for this TaskChannel type. :param bool available: Toggle the availability of the WorkerChannel. :returns: Updated WorkerChannelInstance :rtype: twilio.rest.taskrouter.v1.workspace.worker.worker_channel.WorkerChannelInstance """ return self._proxy.update(capacity=capacity, available=available, )
python
def update(self, capacity=values.unset, available=values.unset): """ Update the WorkerChannelInstance :param unicode capacity: The total number of Tasks worker should handle for this TaskChannel type. :param bool available: Toggle the availability of the WorkerChannel. :returns: Updated WorkerChannelInstance :rtype: twilio.rest.taskrouter.v1.workspace.worker.worker_channel.WorkerChannelInstance """ return self._proxy.update(capacity=capacity, available=available, )
['def', 'update', '(', 'self', ',', 'capacity', '=', 'values', '.', 'unset', ',', 'available', '=', 'values', '.', 'unset', ')', ':', 'return', 'self', '.', '_proxy', '.', 'update', '(', 'capacity', '=', 'capacity', ',', 'available', '=', 'available', ',', ')']
Update the WorkerChannelInstance :param unicode capacity: The total number of Tasks worker should handle for this TaskChannel type. :param bool available: Toggle the availability of the WorkerChannel. :returns: Updated WorkerChannelInstance :rtype: twilio.rest.taskrouter.v1.workspace.worker.worker_channel.WorkerChannelInstance
['Update', 'the', 'WorkerChannelInstance']
train
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/taskrouter/v1/workspace/worker/worker_channel.py#L455-L465
3,530
vlasovskikh/funcparserlib
funcparserlib/parser.py
a
def a(value): """Eq(a) -> Parser(a, a) Returns a parser that parses a token that is equal to the value value. """ name = getattr(value, 'name', value) return some(lambda t: t == value).named(u'(a "%s")' % (name,))
python
def a(value): """Eq(a) -> Parser(a, a) Returns a parser that parses a token that is equal to the value value. """ name = getattr(value, 'name', value) return some(lambda t: t == value).named(u'(a "%s")' % (name,))
['def', 'a', '(', 'value', ')', ':', 'name', '=', 'getattr', '(', 'value', ',', "'name'", ',', 'value', ')', 'return', 'some', '(', 'lambda', 't', ':', 't', '==', 'value', ')', '.', 'named', '(', 'u\'(a "%s")\'', '%', '(', 'name', ',', ')', ')']
Eq(a) -> Parser(a, a) Returns a parser that parses a token that is equal to the value value.
['Eq', '(', 'a', ')', '-', '>', 'Parser', '(', 'a', 'a', ')']
train
https://github.com/vlasovskikh/funcparserlib/blob/0b689920babcf6079a4b3e8721cc10bbc089d81c/funcparserlib/parser.py#L328-L334
3,531
timothycrosley/simple_ci
simple_ci.py
ci_data
def ci_data(namespace, name, branch='master'): '''Returns or starts the ci data collection process''' with repository(namespace, name, branch) as (path, latest, cache): if not path or not latest: return {'build_success': NOT_FOUND, 'status': NOT_FOUND} elif latest in cache: return json.loads(cache[latest]) starting = {'status': 'starting'} cache[latest] = json.dumps(starting) ci_worker(namespace, name, branch=branch, _bg=True) return starting
python
def ci_data(namespace, name, branch='master'): '''Returns or starts the ci data collection process''' with repository(namespace, name, branch) as (path, latest, cache): if not path or not latest: return {'build_success': NOT_FOUND, 'status': NOT_FOUND} elif latest in cache: return json.loads(cache[latest]) starting = {'status': 'starting'} cache[latest] = json.dumps(starting) ci_worker(namespace, name, branch=branch, _bg=True) return starting
['def', 'ci_data', '(', 'namespace', ',', 'name', ',', 'branch', '=', "'master'", ')', ':', 'with', 'repository', '(', 'namespace', ',', 'name', ',', 'branch', ')', 'as', '(', 'path', ',', 'latest', ',', 'cache', ')', ':', 'if', 'not', 'path', 'or', 'not', 'latest', ':', 'return', '{', "'build_success'", ':', 'NOT_FOUND', ',', "'status'", ':', 'NOT_FOUND', '}', 'elif', 'latest', 'in', 'cache', ':', 'return', 'json', '.', 'loads', '(', 'cache', '[', 'latest', ']', ')', 'starting', '=', '{', "'status'", ':', "'starting'", '}', 'cache', '[', 'latest', ']', '=', 'json', '.', 'dumps', '(', 'starting', ')', 'ci_worker', '(', 'namespace', ',', 'name', ',', 'branch', '=', 'branch', ',', '_bg', '=', 'True', ')', 'return', 'starting']
Returns or starts the ci data collection process
['Returns', 'or', 'starts', 'the', 'ci', 'data', 'collection', 'process']
train
https://github.com/timothycrosley/simple_ci/blob/3d2a7b0c527d34731f15b752ff200e76d5addd67/simple_ci.py#L59-L70
3,532
inspirehep/refextract
refextract/references/text.py
strip_footer
def strip_footer(ref_lines, section_title): """Remove footer title from references lines""" pattern = ur'\(?\[?\d{0,4}\]?\)?\.?\s*%s\s*$' % re.escape(section_title) re_footer = re.compile(pattern, re.UNICODE) return [l for l in ref_lines if not re_footer.match(l)]
python
def strip_footer(ref_lines, section_title): """Remove footer title from references lines""" pattern = ur'\(?\[?\d{0,4}\]?\)?\.?\s*%s\s*$' % re.escape(section_title) re_footer = re.compile(pattern, re.UNICODE) return [l for l in ref_lines if not re_footer.match(l)]
['def', 'strip_footer', '(', 'ref_lines', ',', 'section_title', ')', ':', 'pattern', '=', "ur'\\(?\\[?\\d{0,4}\\]?\\)?\\.?\\s*%s\\s*$'", '%', 're', '.', 'escape', '(', 'section_title', ')', 're_footer', '=', 're', '.', 'compile', '(', 'pattern', ',', 're', '.', 'UNICODE', ')', 'return', '[', 'l', 'for', 'l', 'in', 'ref_lines', 'if', 'not', 're_footer', '.', 'match', '(', 'l', ')', ']']
Remove footer title from references lines
['Remove', 'footer', 'title', 'from', 'references', 'lines']
train
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/text.py#L155-L159
3,533
bioasp/caspo
caspo/core/clamping.py
ClampingList.to_csv
def to_csv(self, filename, stimuli=None, inhibitors=None, prepend=""): """ Writes the list of clampings to a CSV file Parameters ---------- filename : str Absolute path where to write the CSV file stimuli : Optional[list[str]] List of stimuli names. If given, stimuli are converted to {0,1} instead of {-1,1}. inhibitors : Optional[list[str]] List of inhibitors names. If given, inhibitors are renamed and converted to {0,1} instead of {-1,1}. prepend : str Columns are renamed using the given string at the beginning """ self.to_dataframe(stimuli, inhibitors, prepend).to_csv(filename, index=False)
python
def to_csv(self, filename, stimuli=None, inhibitors=None, prepend=""): """ Writes the list of clampings to a CSV file Parameters ---------- filename : str Absolute path where to write the CSV file stimuli : Optional[list[str]] List of stimuli names. If given, stimuli are converted to {0,1} instead of {-1,1}. inhibitors : Optional[list[str]] List of inhibitors names. If given, inhibitors are renamed and converted to {0,1} instead of {-1,1}. prepend : str Columns are renamed using the given string at the beginning """ self.to_dataframe(stimuli, inhibitors, prepend).to_csv(filename, index=False)
['def', 'to_csv', '(', 'self', ',', 'filename', ',', 'stimuli', '=', 'None', ',', 'inhibitors', '=', 'None', ',', 'prepend', '=', '""', ')', ':', 'self', '.', 'to_dataframe', '(', 'stimuli', ',', 'inhibitors', ',', 'prepend', ')', '.', 'to_csv', '(', 'filename', ',', 'index', '=', 'False', ')']
Writes the list of clampings to a CSV file Parameters ---------- filename : str Absolute path where to write the CSV file stimuli : Optional[list[str]] List of stimuli names. If given, stimuli are converted to {0,1} instead of {-1,1}. inhibitors : Optional[list[str]] List of inhibitors names. If given, inhibitors are renamed and converted to {0,1} instead of {-1,1}. prepend : str Columns are renamed using the given string at the beginning
['Writes', 'the', 'list', 'of', 'clampings', 'to', 'a', 'CSV', 'file']
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/clamping.py#L105-L123
3,534
dranjan/python-plyfile
plyfile.py
PlyProperty._from_fields
def _from_fields(self, fields): ''' Parse from generator. Raise StopIteration if the property could not be read. ''' return _np.dtype(self.dtype()).type(next(fields))
python
def _from_fields(self, fields): ''' Parse from generator. Raise StopIteration if the property could not be read. ''' return _np.dtype(self.dtype()).type(next(fields))
['def', '_from_fields', '(', 'self', ',', 'fields', ')', ':', 'return', '_np', '.', 'dtype', '(', 'self', '.', 'dtype', '(', ')', ')', '.', 'type', '(', 'next', '(', 'fields', ')', ')']
Parse from generator. Raise StopIteration if the property could not be read.
['Parse', 'from', 'generator', '.', 'Raise', 'StopIteration', 'if', 'the', 'property', 'could', 'not', 'be', 'read', '.']
train
https://github.com/dranjan/python-plyfile/blob/9f8e8708d3a071229cf292caae7d13264e11c88b/plyfile.py#L826-L832
3,535
petl-developers/petl
petl/transform/intervals.py
facetintervallookupone
def facetintervallookupone(table, key, start='start', stop='stop', value=None, include_stop=False, strict=True): """ Construct a faceted interval lookup for the given table, returning at most one result for each query. If ``strict=True``, queries returning more than one result will raise a `DuplicateKeyError`. If ``strict=False`` and there is more than one result, the first result is returned. """ trees = facettupletrees(table, key, start=start, stop=stop, value=value) out = dict() for k in trees: out[k] = IntervalTreeLookupOne(trees[k], include_stop=include_stop, strict=strict) return out
python
def facetintervallookupone(table, key, start='start', stop='stop', value=None, include_stop=False, strict=True): """ Construct a faceted interval lookup for the given table, returning at most one result for each query. If ``strict=True``, queries returning more than one result will raise a `DuplicateKeyError`. If ``strict=False`` and there is more than one result, the first result is returned. """ trees = facettupletrees(table, key, start=start, stop=stop, value=value) out = dict() for k in trees: out[k] = IntervalTreeLookupOne(trees[k], include_stop=include_stop, strict=strict) return out
['def', 'facetintervallookupone', '(', 'table', ',', 'key', ',', 'start', '=', "'start'", ',', 'stop', '=', "'stop'", ',', 'value', '=', 'None', ',', 'include_stop', '=', 'False', ',', 'strict', '=', 'True', ')', ':', 'trees', '=', 'facettupletrees', '(', 'table', ',', 'key', ',', 'start', '=', 'start', ',', 'stop', '=', 'stop', ',', 'value', '=', 'value', ')', 'out', '=', 'dict', '(', ')', 'for', 'k', 'in', 'trees', ':', 'out', '[', 'k', ']', '=', 'IntervalTreeLookupOne', '(', 'trees', '[', 'k', ']', ',', 'include_stop', '=', 'include_stop', ',', 'strict', '=', 'strict', ')', 'return', 'out']
Construct a faceted interval lookup for the given table, returning at most one result for each query. If ``strict=True``, queries returning more than one result will raise a `DuplicateKeyError`. If ``strict=False`` and there is more than one result, the first result is returned.
['Construct', 'a', 'faceted', 'interval', 'lookup', 'for', 'the', 'given', 'table', 'returning', 'at', 'most', 'one', 'result', 'for', 'each', 'query', '.', 'If', 'strict', '=', 'True', 'queries', 'returning', 'more', 'than', 'one', 'result', 'will', 'raise', 'a', 'DuplicateKeyError', '.', 'If', 'strict', '=', 'False', 'and', 'there', 'is', 'more', 'than', 'one', 'result', 'the', 'first', 'result', 'is', 'returned', '.']
train
https://github.com/petl-developers/petl/blob/1d33ca055f7e04e0d28a772041c9fd30c8d415d6/petl/transform/intervals.py#L363-L380
3,536
foremast/foremast
src/foremast/utils/lookups.py
_get_ami_dict
def _get_ami_dict(json_url): """Get ami from a web url. Args: region (str): AWS Region to find AMI ID. Returns: dict: Contents in dictionary format. """ LOG.info("Getting AMI from %s", json_url) response = requests.get(json_url) assert response.ok, "Error getting ami info from {}".format(json_url) ami_dict = response.json() LOG.debug('AMI json contents: %s', ami_dict) return ami_dict
python
def _get_ami_dict(json_url): """Get ami from a web url. Args: region (str): AWS Region to find AMI ID. Returns: dict: Contents in dictionary format. """ LOG.info("Getting AMI from %s", json_url) response = requests.get(json_url) assert response.ok, "Error getting ami info from {}".format(json_url) ami_dict = response.json() LOG.debug('AMI json contents: %s', ami_dict) return ami_dict
['def', '_get_ami_dict', '(', 'json_url', ')', ':', 'LOG', '.', 'info', '(', '"Getting AMI from %s"', ',', 'json_url', ')', 'response', '=', 'requests', '.', 'get', '(', 'json_url', ')', 'assert', 'response', '.', 'ok', ',', '"Error getting ami info from {}"', '.', 'format', '(', 'json_url', ')', 'ami_dict', '=', 'response', '.', 'json', '(', ')', 'LOG', '.', 'debug', '(', "'AMI json contents: %s'", ',', 'ami_dict', ')', 'return', 'ami_dict']
Get ami from a web url. Args: region (str): AWS Region to find AMI ID. Returns: dict: Contents in dictionary format.
['Get', 'ami', 'from', 'a', 'web', 'url', '.']
train
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/utils/lookups.py#L80-L95
3,537
juju/theblues
theblues/charmstore.py
CharmStore.entity_readme_content
def entity_readme_content(self, entity_id, channel=None): '''Get the readme for an entity. @entity_id The id of the entity (i.e. charm, bundle). @param channel Optional channel name. ''' readme_url = self.entity_readme_url(entity_id, channel=channel) response = self._get(readme_url) return response.text
python
def entity_readme_content(self, entity_id, channel=None): '''Get the readme for an entity. @entity_id The id of the entity (i.e. charm, bundle). @param channel Optional channel name. ''' readme_url = self.entity_readme_url(entity_id, channel=channel) response = self._get(readme_url) return response.text
['def', 'entity_readme_content', '(', 'self', ',', 'entity_id', ',', 'channel', '=', 'None', ')', ':', 'readme_url', '=', 'self', '.', 'entity_readme_url', '(', 'entity_id', ',', 'channel', '=', 'channel', ')', 'response', '=', 'self', '.', '_get', '(', 'readme_url', ')', 'return', 'response', '.', 'text']
Get the readme for an entity. @entity_id The id of the entity (i.e. charm, bundle). @param channel Optional channel name.
['Get', 'the', 'readme', 'for', 'an', 'entity', '.']
train
https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/charmstore.py#L226-L234
3,538
rigetti/pyquil
pyquil/noise.py
_noise_model_program_header
def _noise_model_program_header(noise_model): """ Generate the header for a pyquil Program that uses ``noise_model`` to overload noisy gates. The program header consists of 3 sections: - The ``DEFGATE`` statements that define the meaning of the newly introduced "noisy" gate names. - The ``PRAGMA ADD-KRAUS`` statements to overload these noisy gates on specific qubit targets with their noisy implementation. - THe ``PRAGMA READOUT-POVM`` statements that define the noisy readout per qubit. :param NoiseModel noise_model: The assumed noise model. :return: A quil Program with the noise pragmas. :rtype: pyquil.quil.Program """ from pyquil.quil import Program p = Program() defgates = set() for k in noise_model.gates: # obtain ideal gate matrix and new, noisy name by looking it up in the NOISY_GATES dict try: ideal_gate, new_name = get_noisy_gate(k.gate, tuple(k.params)) # if ideal version of gate has not yet been DEFGATE'd, do this if new_name not in defgates: p.defgate(new_name, ideal_gate) defgates.add(new_name) except NoisyGateUndefined: print("WARNING: Could not find ideal gate definition for gate {}".format(k.gate), file=sys.stderr) new_name = k.gate # define noisy version of gate on specific targets p.define_noisy_gate(new_name, k.targets, k.kraus_ops) # define noisy readouts for q, ap in noise_model.assignment_probs.items(): p.define_noisy_readout(q, p00=ap[0, 0], p11=ap[1, 1]) return p
python
def _noise_model_program_header(noise_model): """ Generate the header for a pyquil Program that uses ``noise_model`` to overload noisy gates. The program header consists of 3 sections: - The ``DEFGATE`` statements that define the meaning of the newly introduced "noisy" gate names. - The ``PRAGMA ADD-KRAUS`` statements to overload these noisy gates on specific qubit targets with their noisy implementation. - THe ``PRAGMA READOUT-POVM`` statements that define the noisy readout per qubit. :param NoiseModel noise_model: The assumed noise model. :return: A quil Program with the noise pragmas. :rtype: pyquil.quil.Program """ from pyquil.quil import Program p = Program() defgates = set() for k in noise_model.gates: # obtain ideal gate matrix and new, noisy name by looking it up in the NOISY_GATES dict try: ideal_gate, new_name = get_noisy_gate(k.gate, tuple(k.params)) # if ideal version of gate has not yet been DEFGATE'd, do this if new_name not in defgates: p.defgate(new_name, ideal_gate) defgates.add(new_name) except NoisyGateUndefined: print("WARNING: Could not find ideal gate definition for gate {}".format(k.gate), file=sys.stderr) new_name = k.gate # define noisy version of gate on specific targets p.define_noisy_gate(new_name, k.targets, k.kraus_ops) # define noisy readouts for q, ap in noise_model.assignment_probs.items(): p.define_noisy_readout(q, p00=ap[0, 0], p11=ap[1, 1]) return p
['def', '_noise_model_program_header', '(', 'noise_model', ')', ':', 'from', 'pyquil', '.', 'quil', 'import', 'Program', 'p', '=', 'Program', '(', ')', 'defgates', '=', 'set', '(', ')', 'for', 'k', 'in', 'noise_model', '.', 'gates', ':', '# obtain ideal gate matrix and new, noisy name by looking it up in the NOISY_GATES dict', 'try', ':', 'ideal_gate', ',', 'new_name', '=', 'get_noisy_gate', '(', 'k', '.', 'gate', ',', 'tuple', '(', 'k', '.', 'params', ')', ')', "# if ideal version of gate has not yet been DEFGATE'd, do this", 'if', 'new_name', 'not', 'in', 'defgates', ':', 'p', '.', 'defgate', '(', 'new_name', ',', 'ideal_gate', ')', 'defgates', '.', 'add', '(', 'new_name', ')', 'except', 'NoisyGateUndefined', ':', 'print', '(', '"WARNING: Could not find ideal gate definition for gate {}"', '.', 'format', '(', 'k', '.', 'gate', ')', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'new_name', '=', 'k', '.', 'gate', '# define noisy version of gate on specific targets', 'p', '.', 'define_noisy_gate', '(', 'new_name', ',', 'k', '.', 'targets', ',', 'k', '.', 'kraus_ops', ')', '# define noisy readouts', 'for', 'q', ',', 'ap', 'in', 'noise_model', '.', 'assignment_probs', '.', 'items', '(', ')', ':', 'p', '.', 'define_noisy_readout', '(', 'q', ',', 'p00', '=', 'ap', '[', '0', ',', '0', ']', ',', 'p11', '=', 'ap', '[', '1', ',', '1', ']', ')', 'return', 'p']
Generate the header for a pyquil Program that uses ``noise_model`` to overload noisy gates. The program header consists of 3 sections: - The ``DEFGATE`` statements that define the meaning of the newly introduced "noisy" gate names. - The ``PRAGMA ADD-KRAUS`` statements to overload these noisy gates on specific qubit targets with their noisy implementation. - THe ``PRAGMA READOUT-POVM`` statements that define the noisy readout per qubit. :param NoiseModel noise_model: The assumed noise model. :return: A quil Program with the noise pragmas. :rtype: pyquil.quil.Program
['Generate', 'the', 'header', 'for', 'a', 'pyquil', 'Program', 'that', 'uses', 'noise_model', 'to', 'overload', 'noisy', 'gates', '.', 'The', 'program', 'header', 'consists', 'of', '3', 'sections', ':']
train
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/noise.py#L506-L545
3,539
saltstack/salt
salt/modules/system_profiler.py
receipts
def receipts(): ''' Return the results of a call to ``system_profiler -xml -detail full SPInstallHistoryDataType`` as a dictionary. Top-level keys of the dictionary are the names of each set of install receipts, since there can be multiple receipts with the same name. Contents of each key are a list of dictionaries. CLI Example: .. code-block:: bash salt '*' systemprofiler.receipts ''' apps = _call_system_profiler('SPInstallHistoryDataType') appdict = {} for a in apps: details = dict(a) details.pop('_name') if 'install_date' in details: details['install_date'] = details['install_date'].strftime('%Y-%m-%d %H:%M:%S') if 'info' in details: try: details['info'] = '{0}: {1}'.format(details['info'][0], details['info'][1].strftime('%Y-%m-%d %H:%M:%S')) except (IndexError, AttributeError): pass if a['_name'] not in appdict: appdict[a['_name']] = [] appdict[a['_name']].append(details) return appdict
python
def receipts(): ''' Return the results of a call to ``system_profiler -xml -detail full SPInstallHistoryDataType`` as a dictionary. Top-level keys of the dictionary are the names of each set of install receipts, since there can be multiple receipts with the same name. Contents of each key are a list of dictionaries. CLI Example: .. code-block:: bash salt '*' systemprofiler.receipts ''' apps = _call_system_profiler('SPInstallHistoryDataType') appdict = {} for a in apps: details = dict(a) details.pop('_name') if 'install_date' in details: details['install_date'] = details['install_date'].strftime('%Y-%m-%d %H:%M:%S') if 'info' in details: try: details['info'] = '{0}: {1}'.format(details['info'][0], details['info'][1].strftime('%Y-%m-%d %H:%M:%S')) except (IndexError, AttributeError): pass if a['_name'] not in appdict: appdict[a['_name']] = [] appdict[a['_name']].append(details) return appdict
['def', 'receipts', '(', ')', ':', 'apps', '=', '_call_system_profiler', '(', "'SPInstallHistoryDataType'", ')', 'appdict', '=', '{', '}', 'for', 'a', 'in', 'apps', ':', 'details', '=', 'dict', '(', 'a', ')', 'details', '.', 'pop', '(', "'_name'", ')', 'if', "'install_date'", 'in', 'details', ':', 'details', '[', "'install_date'", ']', '=', 'details', '[', "'install_date'", ']', '.', 'strftime', '(', "'%Y-%m-%d %H:%M:%S'", ')', 'if', "'info'", 'in', 'details', ':', 'try', ':', 'details', '[', "'info'", ']', '=', "'{0}: {1}'", '.', 'format', '(', 'details', '[', "'info'", ']', '[', '0', ']', ',', 'details', '[', "'info'", ']', '[', '1', ']', '.', 'strftime', '(', "'%Y-%m-%d %H:%M:%S'", ')', ')', 'except', '(', 'IndexError', ',', 'AttributeError', ')', ':', 'pass', 'if', 'a', '[', "'_name'", ']', 'not', 'in', 'appdict', ':', 'appdict', '[', 'a', '[', "'_name'", ']', ']', '=', '[', ']', 'appdict', '[', 'a', '[', "'_name'", ']', ']', '.', 'append', '(', 'details', ')', 'return', 'appdict']
Return the results of a call to ``system_profiler -xml -detail full SPInstallHistoryDataType`` as a dictionary. Top-level keys of the dictionary are the names of each set of install receipts, since there can be multiple receipts with the same name. Contents of each key are a list of dictionaries. CLI Example: .. code-block:: bash salt '*' systemprofiler.receipts
['Return', 'the', 'results', 'of', 'a', 'call', 'to', 'system_profiler', '-', 'xml', '-', 'detail', 'full', 'SPInstallHistoryDataType', 'as', 'a', 'dictionary', '.', 'Top', '-', 'level', 'keys', 'of', 'the', 'dictionary', 'are', 'the', 'names', 'of', 'each', 'set', 'of', 'install', 'receipts', 'since', 'there', 'can', 'be', 'multiple', 'receipts', 'with', 'the', 'same', 'name', '.', 'Contents', 'of', 'each', 'key', 'are', 'a', 'list', 'of', 'dictionaries', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/system_profiler.py#L58-L95
3,540
jonathf/chaospy
chaospy/distributions/baseclass.py
Dist.inv
def inv(self, q_data, max_iterations=100, tollerance=1e-5): """ Inverse Rosenblatt transformation. If possible the transformation is done analytically. If not possible, transformation is approximated using an algorithm that alternates between Newton-Raphson and binary search. Args: q_data (numpy.ndarray): Probabilities to be inverse. If any values are outside ``[0, 1]``, error will be raised. ``q_data.shape`` must be compatible with distribution shape. max_iterations (int): If approximation is used, this sets the maximum number of allowed iterations in the Newton-Raphson algorithm. tollerance (float): If approximation is used, this set the error tolerance level required to define a sample as converged. Returns: (numpy.ndarray): Inverted probability values where ``out.shape == q_data.shape``. """ q_data = numpy.asfarray(q_data) assert numpy.all((q_data >= 0) & (q_data <= 1)), "sanitize your inputs!" shape = q_data.shape q_data = q_data.reshape(len(self), -1) x_data = evaluation.evaluate_inverse(self, q_data) lower, upper = evaluation.evaluate_bound(self, x_data) x_data = numpy.clip(x_data, a_min=lower, a_max=upper) x_data = x_data.reshape(shape) return x_data
python
def inv(self, q_data, max_iterations=100, tollerance=1e-5): """ Inverse Rosenblatt transformation. If possible the transformation is done analytically. If not possible, transformation is approximated using an algorithm that alternates between Newton-Raphson and binary search. Args: q_data (numpy.ndarray): Probabilities to be inverse. If any values are outside ``[0, 1]``, error will be raised. ``q_data.shape`` must be compatible with distribution shape. max_iterations (int): If approximation is used, this sets the maximum number of allowed iterations in the Newton-Raphson algorithm. tollerance (float): If approximation is used, this set the error tolerance level required to define a sample as converged. Returns: (numpy.ndarray): Inverted probability values where ``out.shape == q_data.shape``. """ q_data = numpy.asfarray(q_data) assert numpy.all((q_data >= 0) & (q_data <= 1)), "sanitize your inputs!" shape = q_data.shape q_data = q_data.reshape(len(self), -1) x_data = evaluation.evaluate_inverse(self, q_data) lower, upper = evaluation.evaluate_bound(self, x_data) x_data = numpy.clip(x_data, a_min=lower, a_max=upper) x_data = x_data.reshape(shape) return x_data
['def', 'inv', '(', 'self', ',', 'q_data', ',', 'max_iterations', '=', '100', ',', 'tollerance', '=', '1e-5', ')', ':', 'q_data', '=', 'numpy', '.', 'asfarray', '(', 'q_data', ')', 'assert', 'numpy', '.', 'all', '(', '(', 'q_data', '>=', '0', ')', '&', '(', 'q_data', '<=', '1', ')', ')', ',', '"sanitize your inputs!"', 'shape', '=', 'q_data', '.', 'shape', 'q_data', '=', 'q_data', '.', 'reshape', '(', 'len', '(', 'self', ')', ',', '-', '1', ')', 'x_data', '=', 'evaluation', '.', 'evaluate_inverse', '(', 'self', ',', 'q_data', ')', 'lower', ',', 'upper', '=', 'evaluation', '.', 'evaluate_bound', '(', 'self', ',', 'x_data', ')', 'x_data', '=', 'numpy', '.', 'clip', '(', 'x_data', ',', 'a_min', '=', 'lower', ',', 'a_max', '=', 'upper', ')', 'x_data', '=', 'x_data', '.', 'reshape', '(', 'shape', ')', 'return', 'x_data']
Inverse Rosenblatt transformation. If possible the transformation is done analytically. If not possible, transformation is approximated using an algorithm that alternates between Newton-Raphson and binary search. Args: q_data (numpy.ndarray): Probabilities to be inverse. If any values are outside ``[0, 1]``, error will be raised. ``q_data.shape`` must be compatible with distribution shape. max_iterations (int): If approximation is used, this sets the maximum number of allowed iterations in the Newton-Raphson algorithm. tollerance (float): If approximation is used, this set the error tolerance level required to define a sample as converged. Returns: (numpy.ndarray): Inverted probability values where ``out.shape == q_data.shape``.
['Inverse', 'Rosenblatt', 'transformation', '.']
train
https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/distributions/baseclass.py#L154-L187
3,541
limix/limix-core
limix_core/util/preprocess.py
boxcox
def boxcox(X): """ Gaussianize X using the Box-Cox transformation: [samples x phenotypes] - each phentoype is brought to a positive schale, by first subtracting the minimum value and adding 1. - Then each phenotype transformed by the boxcox transformation """ X_transformed = sp.zeros_like(X) maxlog = sp.zeros(X.shape[1]) for i in range(X.shape[1]): i_nan = sp.isnan(X[:,i]) values = X[~i_nan,i] X_transformed[i_nan,i] = X[i_nan,i] X_transformed[~i_nan,i], maxlog[i] = st.boxcox(values-values.min()+1.0) return X_transformed, maxlog
python
def boxcox(X): """ Gaussianize X using the Box-Cox transformation: [samples x phenotypes] - each phentoype is brought to a positive schale, by first subtracting the minimum value and adding 1. - Then each phenotype transformed by the boxcox transformation """ X_transformed = sp.zeros_like(X) maxlog = sp.zeros(X.shape[1]) for i in range(X.shape[1]): i_nan = sp.isnan(X[:,i]) values = X[~i_nan,i] X_transformed[i_nan,i] = X[i_nan,i] X_transformed[~i_nan,i], maxlog[i] = st.boxcox(values-values.min()+1.0) return X_transformed, maxlog
['def', 'boxcox', '(', 'X', ')', ':', 'X_transformed', '=', 'sp', '.', 'zeros_like', '(', 'X', ')', 'maxlog', '=', 'sp', '.', 'zeros', '(', 'X', '.', 'shape', '[', '1', ']', ')', 'for', 'i', 'in', 'range', '(', 'X', '.', 'shape', '[', '1', ']', ')', ':', 'i_nan', '=', 'sp', '.', 'isnan', '(', 'X', '[', ':', ',', 'i', ']', ')', 'values', '=', 'X', '[', '~', 'i_nan', ',', 'i', ']', 'X_transformed', '[', 'i_nan', ',', 'i', ']', '=', 'X', '[', 'i_nan', ',', 'i', ']', 'X_transformed', '[', '~', 'i_nan', ',', 'i', ']', ',', 'maxlog', '[', 'i', ']', '=', 'st', '.', 'boxcox', '(', 'values', '-', 'values', '.', 'min', '(', ')', '+', '1.0', ')', 'return', 'X_transformed', ',', 'maxlog']
Gaussianize X using the Box-Cox transformation: [samples x phenotypes] - each phentoype is brought to a positive schale, by first subtracting the minimum value and adding 1. - Then each phenotype transformed by the boxcox transformation
['Gaussianize', 'X', 'using', 'the', 'Box', '-', 'Cox', 'transformation', ':', '[', 'samples', 'x', 'phenotypes', ']']
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/util/preprocess.py#L113-L127
3,542
david-cortes/hpfrec
hpfrec/__init__.py
HPF.topN
def topN(self, user, n=10, exclude_seen=True, items_pool=None): """ Recommend Top-N items for a user Outputs the Top-N items according to score predicted by the model. Can exclude the items for the user that were associated to her in the training set, and can also recommend from only a subset of user-provided items. Parameters ---------- user : obj User for which to recommend. n : int Number of top items to recommend. exclude_seen: bool Whether to exclude items that were associated to the user in the training set. items_pool: None or array Items to consider for recommending to the user. Returns ------- rec : array (n,) Top-N recommended items. """ if isinstance(n, float): n = int(n) assert isinstance(n ,int) if self.reindex: if self.produce_dicts: try: user = self.user_dict_[user] except: raise ValueError("Can only predict for users who were in the training set.") else: user = pd.Categorical(np.array([user]), self.user_mapping_).codes[0] if user == -1: raise ValueError("Can only predict for users who were in the training set.") if exclude_seen and not self.keep_data: raise Exception("Can only exclude seen items when passing 'keep_data=True' to .fit") if items_pool is None: allpreds = - (self.Theta[user].dot(self.Beta.T)) if exclude_seen: n_ext = np.min([n + self._n_seen_by_user[user], self.Beta.shape[0]]) rec = np.argpartition(allpreds, n_ext-1)[:n_ext] seen = self.seen[self._st_ix_user[user] : self._st_ix_user[user] + self._n_seen_by_user[user]] rec = np.setdiff1d(rec, seen) rec = rec[np.argsort(allpreds[rec])[:n]] if self.reindex: return self.item_mapping_[rec] else: return rec else: n = np.min([n, self.Beta.shape[0]]) rec = np.argpartition(allpreds, n-1)[:n] rec = rec[np.argsort(allpreds[rec])] if self.reindex: return self.item_mapping_[rec] else: return rec else: if isinstance(items_pool, list) or isinstance(items_pool, tuple): items_pool = np.array(items_pool) if items_pool.__class__.__name__=='Series': items_pool = items_pool.values if isinstance(items_pool, np.ndarray): if len(items_pool.shape) > 1: items_pool = items_pool.reshape(-1) if self.reindex: items_pool_reind = pd.Categorical(items_pool, self.item_mapping_).codes nan_ix = (items_pool_reind == -1) if nan_ix.sum() > 0: items_pool_reind = items_pool_reind[~nan_ix] msg = "There were " + ("%d" % int(nan_ix.sum())) + " entries from 'item_pool'" msg += "that were not in the training data and will be exluded." warnings.warn(msg) del nan_ix if items_pool_reind.shape[0] == 0: raise ValueError("No items to recommend.") elif items_pool_reind.shape[0] == 1: raise ValueError("Only 1 item to recommend.") else: pass else: raise ValueError("'items_pool' must be an array.") if self.reindex: allpreds = - self.Theta[user].dot(self.Beta[items_pool_reind].T) else: allpreds = - self.Theta[user].dot(self.Beta[items_pool].T) n = np.min([n, items_pool.shape[0]]) if exclude_seen: n_ext = np.min([n + self._n_seen_by_user[user], items_pool.shape[0]]) rec = np.argpartition(allpreds, n_ext-1)[:n_ext] seen = self.seen[self._st_ix_user[user] : self._st_ix_user[user] + self._n_seen_by_user[user]] if self.reindex: rec = np.setdiff1d(items_pool_reind[rec], seen) allpreds = - self.Theta[user].dot(self.Beta[rec].T) return self.item_mapping_[rec[np.argsort(allpreds)[:n]]] else: rec = np.setdiff1d(items_pool[rec], seen) allpreds = - self.Theta[user].dot(self.Beta[rec].T) return rec[np.argsort(allpreds)[:n]] else: rec = np.argpartition(allpreds, n-1)[:n] return items_pool[rec[np.argsort(allpreds[rec])]]
python
def topN(self, user, n=10, exclude_seen=True, items_pool=None): """ Recommend Top-N items for a user Outputs the Top-N items according to score predicted by the model. Can exclude the items for the user that were associated to her in the training set, and can also recommend from only a subset of user-provided items. Parameters ---------- user : obj User for which to recommend. n : int Number of top items to recommend. exclude_seen: bool Whether to exclude items that were associated to the user in the training set. items_pool: None or array Items to consider for recommending to the user. Returns ------- rec : array (n,) Top-N recommended items. """ if isinstance(n, float): n = int(n) assert isinstance(n ,int) if self.reindex: if self.produce_dicts: try: user = self.user_dict_[user] except: raise ValueError("Can only predict for users who were in the training set.") else: user = pd.Categorical(np.array([user]), self.user_mapping_).codes[0] if user == -1: raise ValueError("Can only predict for users who were in the training set.") if exclude_seen and not self.keep_data: raise Exception("Can only exclude seen items when passing 'keep_data=True' to .fit") if items_pool is None: allpreds = - (self.Theta[user].dot(self.Beta.T)) if exclude_seen: n_ext = np.min([n + self._n_seen_by_user[user], self.Beta.shape[0]]) rec = np.argpartition(allpreds, n_ext-1)[:n_ext] seen = self.seen[self._st_ix_user[user] : self._st_ix_user[user] + self._n_seen_by_user[user]] rec = np.setdiff1d(rec, seen) rec = rec[np.argsort(allpreds[rec])[:n]] if self.reindex: return self.item_mapping_[rec] else: return rec else: n = np.min([n, self.Beta.shape[0]]) rec = np.argpartition(allpreds, n-1)[:n] rec = rec[np.argsort(allpreds[rec])] if self.reindex: return self.item_mapping_[rec] else: return rec else: if isinstance(items_pool, list) or isinstance(items_pool, tuple): items_pool = np.array(items_pool) if items_pool.__class__.__name__=='Series': items_pool = items_pool.values if isinstance(items_pool, np.ndarray): if len(items_pool.shape) > 1: items_pool = items_pool.reshape(-1) if self.reindex: items_pool_reind = pd.Categorical(items_pool, self.item_mapping_).codes nan_ix = (items_pool_reind == -1) if nan_ix.sum() > 0: items_pool_reind = items_pool_reind[~nan_ix] msg = "There were " + ("%d" % int(nan_ix.sum())) + " entries from 'item_pool'" msg += "that were not in the training data and will be exluded." warnings.warn(msg) del nan_ix if items_pool_reind.shape[0] == 0: raise ValueError("No items to recommend.") elif items_pool_reind.shape[0] == 1: raise ValueError("Only 1 item to recommend.") else: pass else: raise ValueError("'items_pool' must be an array.") if self.reindex: allpreds = - self.Theta[user].dot(self.Beta[items_pool_reind].T) else: allpreds = - self.Theta[user].dot(self.Beta[items_pool].T) n = np.min([n, items_pool.shape[0]]) if exclude_seen: n_ext = np.min([n + self._n_seen_by_user[user], items_pool.shape[0]]) rec = np.argpartition(allpreds, n_ext-1)[:n_ext] seen = self.seen[self._st_ix_user[user] : self._st_ix_user[user] + self._n_seen_by_user[user]] if self.reindex: rec = np.setdiff1d(items_pool_reind[rec], seen) allpreds = - self.Theta[user].dot(self.Beta[rec].T) return self.item_mapping_[rec[np.argsort(allpreds)[:n]]] else: rec = np.setdiff1d(items_pool[rec], seen) allpreds = - self.Theta[user].dot(self.Beta[rec].T) return rec[np.argsort(allpreds)[:n]] else: rec = np.argpartition(allpreds, n-1)[:n] return items_pool[rec[np.argsort(allpreds[rec])]]
['def', 'topN', '(', 'self', ',', 'user', ',', 'n', '=', '10', ',', 'exclude_seen', '=', 'True', ',', 'items_pool', '=', 'None', ')', ':', 'if', 'isinstance', '(', 'n', ',', 'float', ')', ':', 'n', '=', 'int', '(', 'n', ')', 'assert', 'isinstance', '(', 'n', ',', 'int', ')', 'if', 'self', '.', 'reindex', ':', 'if', 'self', '.', 'produce_dicts', ':', 'try', ':', 'user', '=', 'self', '.', 'user_dict_', '[', 'user', ']', 'except', ':', 'raise', 'ValueError', '(', '"Can only predict for users who were in the training set."', ')', 'else', ':', 'user', '=', 'pd', '.', 'Categorical', '(', 'np', '.', 'array', '(', '[', 'user', ']', ')', ',', 'self', '.', 'user_mapping_', ')', '.', 'codes', '[', '0', ']', 'if', 'user', '==', '-', '1', ':', 'raise', 'ValueError', '(', '"Can only predict for users who were in the training set."', ')', 'if', 'exclude_seen', 'and', 'not', 'self', '.', 'keep_data', ':', 'raise', 'Exception', '(', '"Can only exclude seen items when passing \'keep_data=True\' to .fit"', ')', 'if', 'items_pool', 'is', 'None', ':', 'allpreds', '=', '-', '(', 'self', '.', 'Theta', '[', 'user', ']', '.', 'dot', '(', 'self', '.', 'Beta', '.', 'T', ')', ')', 'if', 'exclude_seen', ':', 'n_ext', '=', 'np', '.', 'min', '(', '[', 'n', '+', 'self', '.', '_n_seen_by_user', '[', 'user', ']', ',', 'self', '.', 'Beta', '.', 'shape', '[', '0', ']', ']', ')', 'rec', '=', 'np', '.', 'argpartition', '(', 'allpreds', ',', 'n_ext', '-', '1', ')', '[', ':', 'n_ext', ']', 'seen', '=', 'self', '.', 'seen', '[', 'self', '.', '_st_ix_user', '[', 'user', ']', ':', 'self', '.', '_st_ix_user', '[', 'user', ']', '+', 'self', '.', '_n_seen_by_user', '[', 'user', ']', ']', 'rec', '=', 'np', '.', 'setdiff1d', '(', 'rec', ',', 'seen', ')', 'rec', '=', 'rec', '[', 'np', '.', 'argsort', '(', 'allpreds', '[', 'rec', ']', ')', '[', ':', 'n', ']', ']', 'if', 'self', '.', 'reindex', ':', 'return', 'self', '.', 'item_mapping_', '[', 'rec', ']', 'else', ':', 'return', 'rec', 'else', ':', 'n', '=', 'np', '.', 'min', '(', '[', 'n', ',', 'self', '.', 'Beta', '.', 'shape', '[', '0', ']', ']', ')', 'rec', '=', 'np', '.', 'argpartition', '(', 'allpreds', ',', 'n', '-', '1', ')', '[', ':', 'n', ']', 'rec', '=', 'rec', '[', 'np', '.', 'argsort', '(', 'allpreds', '[', 'rec', ']', ')', ']', 'if', 'self', '.', 'reindex', ':', 'return', 'self', '.', 'item_mapping_', '[', 'rec', ']', 'else', ':', 'return', 'rec', 'else', ':', 'if', 'isinstance', '(', 'items_pool', ',', 'list', ')', 'or', 'isinstance', '(', 'items_pool', ',', 'tuple', ')', ':', 'items_pool', '=', 'np', '.', 'array', '(', 'items_pool', ')', 'if', 'items_pool', '.', '__class__', '.', '__name__', '==', "'Series'", ':', 'items_pool', '=', 'items_pool', '.', 'values', 'if', 'isinstance', '(', 'items_pool', ',', 'np', '.', 'ndarray', ')', ':', 'if', 'len', '(', 'items_pool', '.', 'shape', ')', '>', '1', ':', 'items_pool', '=', 'items_pool', '.', 'reshape', '(', '-', '1', ')', 'if', 'self', '.', 'reindex', ':', 'items_pool_reind', '=', 'pd', '.', 'Categorical', '(', 'items_pool', ',', 'self', '.', 'item_mapping_', ')', '.', 'codes', 'nan_ix', '=', '(', 'items_pool_reind', '==', '-', '1', ')', 'if', 'nan_ix', '.', 'sum', '(', ')', '>', '0', ':', 'items_pool_reind', '=', 'items_pool_reind', '[', '~', 'nan_ix', ']', 'msg', '=', '"There were "', '+', '(', '"%d"', '%', 'int', '(', 'nan_ix', '.', 'sum', '(', ')', ')', ')', '+', '" entries from \'item_pool\'"', 'msg', '+=', '"that were not in the training data and will be exluded."', 'warnings', '.', 'warn', '(', 'msg', ')', 'del', 'nan_ix', 'if', 'items_pool_reind', '.', 'shape', '[', '0', ']', '==', '0', ':', 'raise', 'ValueError', '(', '"No items to recommend."', ')', 'elif', 'items_pool_reind', '.', 'shape', '[', '0', ']', '==', '1', ':', 'raise', 'ValueError', '(', '"Only 1 item to recommend."', ')', 'else', ':', 'pass', 'else', ':', 'raise', 'ValueError', '(', '"\'items_pool\' must be an array."', ')', 'if', 'self', '.', 'reindex', ':', 'allpreds', '=', '-', 'self', '.', 'Theta', '[', 'user', ']', '.', 'dot', '(', 'self', '.', 'Beta', '[', 'items_pool_reind', ']', '.', 'T', ')', 'else', ':', 'allpreds', '=', '-', 'self', '.', 'Theta', '[', 'user', ']', '.', 'dot', '(', 'self', '.', 'Beta', '[', 'items_pool', ']', '.', 'T', ')', 'n', '=', 'np', '.', 'min', '(', '[', 'n', ',', 'items_pool', '.', 'shape', '[', '0', ']', ']', ')', 'if', 'exclude_seen', ':', 'n_ext', '=', 'np', '.', 'min', '(', '[', 'n', '+', 'self', '.', '_n_seen_by_user', '[', 'user', ']', ',', 'items_pool', '.', 'shape', '[', '0', ']', ']', ')', 'rec', '=', 'np', '.', 'argpartition', '(', 'allpreds', ',', 'n_ext', '-', '1', ')', '[', ':', 'n_ext', ']', 'seen', '=', 'self', '.', 'seen', '[', 'self', '.', '_st_ix_user', '[', 'user', ']', ':', 'self', '.', '_st_ix_user', '[', 'user', ']', '+', 'self', '.', '_n_seen_by_user', '[', 'user', ']', ']', 'if', 'self', '.', 'reindex', ':', 'rec', '=', 'np', '.', 'setdiff1d', '(', 'items_pool_reind', '[', 'rec', ']', ',', 'seen', ')', 'allpreds', '=', '-', 'self', '.', 'Theta', '[', 'user', ']', '.', 'dot', '(', 'self', '.', 'Beta', '[', 'rec', ']', '.', 'T', ')', 'return', 'self', '.', 'item_mapping_', '[', 'rec', '[', 'np', '.', 'argsort', '(', 'allpreds', ')', '[', ':', 'n', ']', ']', ']', 'else', ':', 'rec', '=', 'np', '.', 'setdiff1d', '(', 'items_pool', '[', 'rec', ']', ',', 'seen', ')', 'allpreds', '=', '-', 'self', '.', 'Theta', '[', 'user', ']', '.', 'dot', '(', 'self', '.', 'Beta', '[', 'rec', ']', '.', 'T', ')', 'return', 'rec', '[', 'np', '.', 'argsort', '(', 'allpreds', ')', '[', ':', 'n', ']', ']', 'else', ':', 'rec', '=', 'np', '.', 'argpartition', '(', 'allpreds', ',', 'n', '-', '1', ')', '[', ':', 'n', ']', 'return', 'items_pool', '[', 'rec', '[', 'np', '.', 'argsort', '(', 'allpreds', '[', 'rec', ']', ')', ']', ']']
Recommend Top-N items for a user Outputs the Top-N items according to score predicted by the model. Can exclude the items for the user that were associated to her in the training set, and can also recommend from only a subset of user-provided items. Parameters ---------- user : obj User for which to recommend. n : int Number of top items to recommend. exclude_seen: bool Whether to exclude items that were associated to the user in the training set. items_pool: None or array Items to consider for recommending to the user. Returns ------- rec : array (n,) Top-N recommended items.
['Recommend', 'Top', '-', 'N', 'items', 'for', 'a', 'user']
train
https://github.com/david-cortes/hpfrec/blob/cf0b18aa03e189f822b582d59c23f7862593289e/hpfrec/__init__.py#L1265-L1372
3,543
v1k45/django-notify-x
notify/views.py
mark_all
def mark_all(request): """ Marks notifications as either read or unread depending of POST parameters. Takes ``action`` as POST data, it can either be ``read`` or ``unread``. :param request: HTTP Request context. :return: Response to mark_all action. """ action = request.POST.get('action', None) success = True if action == 'read': request.user.notifications.read_all() msg = _("Marked all notifications as read") elif action == 'unread': request.user.notifications.unread_all() msg = _("Marked all notifications as unread") else: msg = _("Invalid mark action") success = False ctx = {'msg': msg, 'success': success, 'action': action} return notification_redirect(request, ctx)
python
def mark_all(request): """ Marks notifications as either read or unread depending of POST parameters. Takes ``action`` as POST data, it can either be ``read`` or ``unread``. :param request: HTTP Request context. :return: Response to mark_all action. """ action = request.POST.get('action', None) success = True if action == 'read': request.user.notifications.read_all() msg = _("Marked all notifications as read") elif action == 'unread': request.user.notifications.unread_all() msg = _("Marked all notifications as unread") else: msg = _("Invalid mark action") success = False ctx = {'msg': msg, 'success': success, 'action': action} return notification_redirect(request, ctx)
['def', 'mark_all', '(', 'request', ')', ':', 'action', '=', 'request', '.', 'POST', '.', 'get', '(', "'action'", ',', 'None', ')', 'success', '=', 'True', 'if', 'action', '==', "'read'", ':', 'request', '.', 'user', '.', 'notifications', '.', 'read_all', '(', ')', 'msg', '=', '_', '(', '"Marked all notifications as read"', ')', 'elif', 'action', '==', "'unread'", ':', 'request', '.', 'user', '.', 'notifications', '.', 'unread_all', '(', ')', 'msg', '=', '_', '(', '"Marked all notifications as unread"', ')', 'else', ':', 'msg', '=', '_', '(', '"Invalid mark action"', ')', 'success', '=', 'False', 'ctx', '=', '{', "'msg'", ':', 'msg', ',', "'success'", ':', 'success', ',', "'action'", ':', 'action', '}', 'return', 'notification_redirect', '(', 'request', ',', 'ctx', ')']
Marks notifications as either read or unread depending of POST parameters. Takes ``action`` as POST data, it can either be ``read`` or ``unread``. :param request: HTTP Request context. :return: Response to mark_all action.
['Marks', 'notifications', 'as', 'either', 'read', 'or', 'unread', 'depending', 'of', 'POST', 'parameters', '.', 'Takes', 'action', 'as', 'POST', 'data', 'it', 'can', 'either', 'be', 'read', 'or', 'unread', '.']
train
https://github.com/v1k45/django-notify-x/blob/b4aa03039759126889666a59117e83dcd4cdb374/notify/views.py#L101-L125
3,544
theelous3/multio
multio/__init__.py
SocketWrapper.wrap
def wrap(cls, meth): ''' Wraps a connection opening method in this class. ''' async def inner(*args, **kwargs): sock = await meth(*args, **kwargs) return cls(sock) return inner
python
def wrap(cls, meth): ''' Wraps a connection opening method in this class. ''' async def inner(*args, **kwargs): sock = await meth(*args, **kwargs) return cls(sock) return inner
['def', 'wrap', '(', 'cls', ',', 'meth', ')', ':', 'async', 'def', 'inner', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'sock', '=', 'await', 'meth', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'cls', '(', 'sock', ')', 'return', 'inner']
Wraps a connection opening method in this class.
['Wraps', 'a', 'connection', 'opening', 'method', 'in', 'this', 'class', '.']
train
https://github.com/theelous3/multio/blob/018e4a9f78d5f4e78608a1a1537000b5fd778bbe/multio/__init__.py#L112-L121
3,545
tensorflow/tensor2tensor
tensor2tensor/utils/bleu_hook.py
bleu_tokenize
def bleu_tokenize(string): r"""Tokenize a string following the official BLEU implementation. See https://github.com/moses-smt/mosesdecoder/" "blob/master/scripts/generic/mteval-v14.pl#L954-L983 In our case, the input string is expected to be just one line and no HTML entities de-escaping is needed. So we just tokenize on punctuation and symbols, except when a punctuation is preceded and followed by a digit (e.g. a comma/dot as a thousand/decimal separator). Note that a number (e.g. a year) followed by a dot at the end of sentence is NOT tokenized, i.e. the dot stays with the number because `s/(\p{P})(\P{N})/ $1 $2/g` does not match this case (unless we add a space after each sentence). However, this error is already in the original mteval-v14.pl and we want to be consistent with it. Args: string: the input string Returns: a list of tokens """ string = uregex.nondigit_punct_re.sub(r"\1 \2 ", string) string = uregex.punct_nondigit_re.sub(r" \1 \2", string) string = uregex.symbol_re.sub(r" \1 ", string) return string.split()
python
def bleu_tokenize(string): r"""Tokenize a string following the official BLEU implementation. See https://github.com/moses-smt/mosesdecoder/" "blob/master/scripts/generic/mteval-v14.pl#L954-L983 In our case, the input string is expected to be just one line and no HTML entities de-escaping is needed. So we just tokenize on punctuation and symbols, except when a punctuation is preceded and followed by a digit (e.g. a comma/dot as a thousand/decimal separator). Note that a number (e.g. a year) followed by a dot at the end of sentence is NOT tokenized, i.e. the dot stays with the number because `s/(\p{P})(\P{N})/ $1 $2/g` does not match this case (unless we add a space after each sentence). However, this error is already in the original mteval-v14.pl and we want to be consistent with it. Args: string: the input string Returns: a list of tokens """ string = uregex.nondigit_punct_re.sub(r"\1 \2 ", string) string = uregex.punct_nondigit_re.sub(r" \1 \2", string) string = uregex.symbol_re.sub(r" \1 ", string) return string.split()
['def', 'bleu_tokenize', '(', 'string', ')', ':', 'string', '=', 'uregex', '.', 'nondigit_punct_re', '.', 'sub', '(', 'r"\\1 \\2 "', ',', 'string', ')', 'string', '=', 'uregex', '.', 'punct_nondigit_re', '.', 'sub', '(', 'r" \\1 \\2"', ',', 'string', ')', 'string', '=', 'uregex', '.', 'symbol_re', '.', 'sub', '(', 'r" \\1 "', ',', 'string', ')', 'return', 'string', '.', 'split', '(', ')']
r"""Tokenize a string following the official BLEU implementation. See https://github.com/moses-smt/mosesdecoder/" "blob/master/scripts/generic/mteval-v14.pl#L954-L983 In our case, the input string is expected to be just one line and no HTML entities de-escaping is needed. So we just tokenize on punctuation and symbols, except when a punctuation is preceded and followed by a digit (e.g. a comma/dot as a thousand/decimal separator). Note that a number (e.g. a year) followed by a dot at the end of sentence is NOT tokenized, i.e. the dot stays with the number because `s/(\p{P})(\P{N})/ $1 $2/g` does not match this case (unless we add a space after each sentence). However, this error is already in the original mteval-v14.pl and we want to be consistent with it. Args: string: the input string Returns: a list of tokens
['r', 'Tokenize', 'a', 'string', 'following', 'the', 'official', 'BLEU', 'implementation', '.']
train
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/bleu_hook.py#L172-L199
3,546
taskcluster/taskcluster-client.py
taskcluster/auth.py
Auth.azureTables
def azureTables(self, *args, **kwargs): """ List Tables in an Account Managed by Auth Retrieve a list of all tables in an account. This method gives output: ``v1/azure-table-list-response.json#`` This method is ``stable`` """ return self._makeApiCall(self.funcinfo["azureTables"], *args, **kwargs)
python
def azureTables(self, *args, **kwargs): """ List Tables in an Account Managed by Auth Retrieve a list of all tables in an account. This method gives output: ``v1/azure-table-list-response.json#`` This method is ``stable`` """ return self._makeApiCall(self.funcinfo["azureTables"], *args, **kwargs)
['def', 'azureTables', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'return', 'self', '.', '_makeApiCall', '(', 'self', '.', 'funcinfo', '[', '"azureTables"', ']', ',', '*', 'args', ',', '*', '*', 'kwargs', ')']
List Tables in an Account Managed by Auth Retrieve a list of all tables in an account. This method gives output: ``v1/azure-table-list-response.json#`` This method is ``stable``
['List', 'Tables', 'in', 'an', 'Account', 'Managed', 'by', 'Auth']
train
https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L470-L481
3,547
tensorflow/cleverhans
examples/nips17_adversarial_competition/dev_toolkit/sample_attacks/noop/attack_noop.py
load_images
def load_images(input_dir, batch_shape): """Read png images from input directory in batches. Args: input_dir: input directory batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3] Yields: filenames: list file names without path of each image Length of this list could be less than batch_size, in this case only first few images of the result are elements of the minibatch. images: array with all images from this batch """ images = np.zeros(batch_shape) filenames = [] idx = 0 batch_size = batch_shape[0] for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')): with tf.gfile.Open(filepath) as f: images[idx, :, :, :] = imread(f, mode='RGB').astype(np.float) / 255.0 filenames.append(os.path.basename(filepath)) idx += 1 if idx == batch_size: yield filenames, images filenames = [] images = np.zeros(batch_shape) idx = 0 if idx > 0: yield filenames, images
python
def load_images(input_dir, batch_shape): """Read png images from input directory in batches. Args: input_dir: input directory batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3] Yields: filenames: list file names without path of each image Length of this list could be less than batch_size, in this case only first few images of the result are elements of the minibatch. images: array with all images from this batch """ images = np.zeros(batch_shape) filenames = [] idx = 0 batch_size = batch_shape[0] for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')): with tf.gfile.Open(filepath) as f: images[idx, :, :, :] = imread(f, mode='RGB').astype(np.float) / 255.0 filenames.append(os.path.basename(filepath)) idx += 1 if idx == batch_size: yield filenames, images filenames = [] images = np.zeros(batch_shape) idx = 0 if idx > 0: yield filenames, images
['def', 'load_images', '(', 'input_dir', ',', 'batch_shape', ')', ':', 'images', '=', 'np', '.', 'zeros', '(', 'batch_shape', ')', 'filenames', '=', '[', ']', 'idx', '=', '0', 'batch_size', '=', 'batch_shape', '[', '0', ']', 'for', 'filepath', 'in', 'tf', '.', 'gfile', '.', 'Glob', '(', 'os', '.', 'path', '.', 'join', '(', 'input_dir', ',', "'*.png'", ')', ')', ':', 'with', 'tf', '.', 'gfile', '.', 'Open', '(', 'filepath', ')', 'as', 'f', ':', 'images', '[', 'idx', ',', ':', ',', ':', ',', ':', ']', '=', 'imread', '(', 'f', ',', 'mode', '=', "'RGB'", ')', '.', 'astype', '(', 'np', '.', 'float', ')', '/', '255.0', 'filenames', '.', 'append', '(', 'os', '.', 'path', '.', 'basename', '(', 'filepath', ')', ')', 'idx', '+=', '1', 'if', 'idx', '==', 'batch_size', ':', 'yield', 'filenames', ',', 'images', 'filenames', '=', '[', ']', 'images', '=', 'np', '.', 'zeros', '(', 'batch_shape', ')', 'idx', '=', '0', 'if', 'idx', '>', '0', ':', 'yield', 'filenames', ',', 'images']
Read png images from input directory in batches. Args: input_dir: input directory batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3] Yields: filenames: list file names without path of each image Length of this list could be less than batch_size, in this case only first few images of the result are elements of the minibatch. images: array with all images from this batch
['Read', 'png', 'images', 'from', 'input', 'directory', 'in', 'batches', '.']
train
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/dev_toolkit/sample_attacks/noop/attack_noop.py#L40-L68
3,548
StackStorm/pybind
pybind/slxos/v17s_1_02/protocol/cfm/domain_name/__init__.py
domain_name._set_domain_name
def _set_domain_name(self, v, load=False): """ Setter method for domain_name, mapped from YANG variable /protocol/cfm/domain_name/domain_name (string) If this variable is read-only (config: false) in the source YANG file, then _set_domain_name is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_domain_name() directly. """ parent = getattr(self, "_parent", None) if parent is not None and load is False: raise AttributeError("Cannot set keys directly when" + " within an instantiated list") if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..21']}), is_leaf=True, yang_name="domain-name", rest_name="domain-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'String length 21 char', u'cli-run-template': u'$(.?:)', u'cli-incomplete-command': None, u'cli-hide-in-submode': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag', defining_module='brocade-dot1ag', yang_type='string', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """domain_name must be of a type compatible with string""", 'defined-type': "string", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..21']}), is_leaf=True, yang_name="domain-name", rest_name="domain-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'String length 21 char', u'cli-run-template': u'$(.?:)', u'cli-incomplete-command': None, u'cli-hide-in-submode': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag', defining_module='brocade-dot1ag', yang_type='string', is_config=True)""", }) self.__domain_name = t if hasattr(self, '_set'): self._set()
python
def _set_domain_name(self, v, load=False): """ Setter method for domain_name, mapped from YANG variable /protocol/cfm/domain_name/domain_name (string) If this variable is read-only (config: false) in the source YANG file, then _set_domain_name is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_domain_name() directly. """ parent = getattr(self, "_parent", None) if parent is not None and load is False: raise AttributeError("Cannot set keys directly when" + " within an instantiated list") if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..21']}), is_leaf=True, yang_name="domain-name", rest_name="domain-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'String length 21 char', u'cli-run-template': u'$(.?:)', u'cli-incomplete-command': None, u'cli-hide-in-submode': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag', defining_module='brocade-dot1ag', yang_type='string', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """domain_name must be of a type compatible with string""", 'defined-type': "string", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..21']}), is_leaf=True, yang_name="domain-name", rest_name="domain-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'String length 21 char', u'cli-run-template': u'$(.?:)', u'cli-incomplete-command': None, u'cli-hide-in-submode': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag', defining_module='brocade-dot1ag', yang_type='string', is_config=True)""", }) self.__domain_name = t if hasattr(self, '_set'): self._set()
['def', '_set_domain_name', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'parent', '=', 'getattr', '(', 'self', ',', '"_parent"', ',', 'None', ')', 'if', 'parent', 'is', 'not', 'None', 'and', 'load', 'is', 'False', ':', 'raise', 'AttributeError', '(', '"Cannot set keys directly when"', '+', '" within an instantiated list"', ')', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'RestrictedClassType', '(', 'base_type', '=', 'unicode', ',', 'restriction_dict', '=', '{', "'length'", ':', '[', "u'1..21'", ']', '}', ')', ',', 'is_leaf', '=', 'True', ',', 'yang_name', '=', '"domain-name"', ',', 'rest_name', '=', '"domain-name"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'String length 21 char'", ',', "u'cli-run-template'", ':', "u'$(.?:)'", ',', "u'cli-incomplete-command'", ':', 'None', ',', "u'cli-hide-in-submode'", ':', 'None', '}', '}', ',', 'is_keyval', '=', 'True', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-dot1ag'", ',', 'defining_module', '=', "'brocade-dot1ag'", ',', 'yang_type', '=', "'string'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""domain_name must be of a type compatible with string"""', ',', "'defined-type'", ':', '"string"', ',', "'generated-type'", ':', '"""YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={\'length\': [u\'1..21\']}), is_leaf=True, yang_name="domain-name", rest_name="domain-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'String length 21 char\', u\'cli-run-template\': u\'$(.?:)\', u\'cli-incomplete-command\': None, u\'cli-hide-in-submode\': None}}, is_keyval=True, namespace=\'urn:brocade.com:mgmt:brocade-dot1ag\', defining_module=\'brocade-dot1ag\', yang_type=\'string\', is_config=True)"""', ',', '}', ')', 'self', '.', '__domain_name', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')']
Setter method for domain_name, mapped from YANG variable /protocol/cfm/domain_name/domain_name (string) If this variable is read-only (config: false) in the source YANG file, then _set_domain_name is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_domain_name() directly.
['Setter', 'method', 'for', 'domain_name', 'mapped', 'from', 'YANG', 'variable', '/', 'protocol', '/', 'cfm', '/', 'domain_name', '/', 'domain_name', '(', 'string', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_domain_name', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_domain_name', '()', 'directly', '.']
train
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/protocol/cfm/domain_name/__init__.py#L94-L120
3,549
gem/oq-engine
openquake/hazardlib/calc/stochastic.py
sample_cluster
def sample_cluster(sources, srcfilter, num_ses, param): """ Yields ruptures generated by a cluster of sources. :param sources: A sequence of sources of the same group :param num_ses: Number of stochastic event sets :param param: a dictionary of additional parameters including ses_per_logic_tree_path :yields: dictionaries with keys rup_array, calc_times, eff_ruptures """ eb_ruptures = [] numpy.random.seed(sources[0].serial) [grp_id] = set(src.src_group_id for src in sources) # AccumDict of arrays with 3 elements weight, nsites, calc_time calc_times = AccumDict(accum=numpy.zeros(3, numpy.float32)) # Set the parameters required to compute the number of occurrences # of the group of sources # assert param['oqparam'].number_of_logic_tree_samples > 0 samples = getattr(sources[0], 'samples', 1) tom = getattr(sources, 'temporal_occurrence_model') rate = tom.occurrence_rate time_span = tom.time_span # Note that using a single time interval corresponding to the product # of the investigation time and the number of realisations as we do # here is admitted only in the case of a time-independent model grp_num_occ = numpy.random.poisson(rate * time_span * samples * num_ses) # Now we process the sources included in the group. Possible cases: # * The group is a cluster. In this case we choose one rupture per each # source; uncertainty in the ruptures can be handled in this case # using mutually exclusive ruptures (note that this is admitted # only for nons-parametric sources). # * The group contains mutually exclusive sources. In this case we # choose one source and then one rupture from this source. rup_counter = {} rup_data = {} eff_ruptures = 0 for rlz_num in range(grp_num_occ): if sources.cluster: for src, _sites in srcfilter(sources): # Sum Ruptures if rlz_num == 0: eff_ruptures += src.num_ruptures # Track calculation time t0 = time.time() rup = src.get_one_rupture() # The problem here is that we do not know a-priori the # number of occurrences of a given rupture. if src.id not in rup_counter: rup_counter[src.id] = {} rup_data[src.id] = {} if rup.idx not in rup_counter[src.id]: rup_counter[src.id][rup.idx] = 1 rup_data[src.id][rup.idx] = [rup, src.id, grp_id] else: rup_counter[src.id][rup.idx] += 1 # Store info dt = time.time() - t0 calc_times[src.id] += numpy.array([len(rup_data[src.id]), src.nsites, dt]) elif param['src_interdep'] == 'mutex': print('Not yet implemented') exit(0) # Create event based ruptures for src_key in rup_data: for rup_key in rup_data[src_key]: dat = rup_data[src_key][rup_key] cnt = rup_counter[src_key][rup_key] ebr = EBRupture(dat[0], dat[1], dat[2], cnt, samples) eb_ruptures.append(ebr) return eb_ruptures, calc_times, eff_ruptures, grp_id
python
def sample_cluster(sources, srcfilter, num_ses, param): """ Yields ruptures generated by a cluster of sources. :param sources: A sequence of sources of the same group :param num_ses: Number of stochastic event sets :param param: a dictionary of additional parameters including ses_per_logic_tree_path :yields: dictionaries with keys rup_array, calc_times, eff_ruptures """ eb_ruptures = [] numpy.random.seed(sources[0].serial) [grp_id] = set(src.src_group_id for src in sources) # AccumDict of arrays with 3 elements weight, nsites, calc_time calc_times = AccumDict(accum=numpy.zeros(3, numpy.float32)) # Set the parameters required to compute the number of occurrences # of the group of sources # assert param['oqparam'].number_of_logic_tree_samples > 0 samples = getattr(sources[0], 'samples', 1) tom = getattr(sources, 'temporal_occurrence_model') rate = tom.occurrence_rate time_span = tom.time_span # Note that using a single time interval corresponding to the product # of the investigation time and the number of realisations as we do # here is admitted only in the case of a time-independent model grp_num_occ = numpy.random.poisson(rate * time_span * samples * num_ses) # Now we process the sources included in the group. Possible cases: # * The group is a cluster. In this case we choose one rupture per each # source; uncertainty in the ruptures can be handled in this case # using mutually exclusive ruptures (note that this is admitted # only for nons-parametric sources). # * The group contains mutually exclusive sources. In this case we # choose one source and then one rupture from this source. rup_counter = {} rup_data = {} eff_ruptures = 0 for rlz_num in range(grp_num_occ): if sources.cluster: for src, _sites in srcfilter(sources): # Sum Ruptures if rlz_num == 0: eff_ruptures += src.num_ruptures # Track calculation time t0 = time.time() rup = src.get_one_rupture() # The problem here is that we do not know a-priori the # number of occurrences of a given rupture. if src.id not in rup_counter: rup_counter[src.id] = {} rup_data[src.id] = {} if rup.idx not in rup_counter[src.id]: rup_counter[src.id][rup.idx] = 1 rup_data[src.id][rup.idx] = [rup, src.id, grp_id] else: rup_counter[src.id][rup.idx] += 1 # Store info dt = time.time() - t0 calc_times[src.id] += numpy.array([len(rup_data[src.id]), src.nsites, dt]) elif param['src_interdep'] == 'mutex': print('Not yet implemented') exit(0) # Create event based ruptures for src_key in rup_data: for rup_key in rup_data[src_key]: dat = rup_data[src_key][rup_key] cnt = rup_counter[src_key][rup_key] ebr = EBRupture(dat[0], dat[1], dat[2], cnt, samples) eb_ruptures.append(ebr) return eb_ruptures, calc_times, eff_ruptures, grp_id
['def', 'sample_cluster', '(', 'sources', ',', 'srcfilter', ',', 'num_ses', ',', 'param', ')', ':', 'eb_ruptures', '=', '[', ']', 'numpy', '.', 'random', '.', 'seed', '(', 'sources', '[', '0', ']', '.', 'serial', ')', '[', 'grp_id', ']', '=', 'set', '(', 'src', '.', 'src_group_id', 'for', 'src', 'in', 'sources', ')', '# AccumDict of arrays with 3 elements weight, nsites, calc_time', 'calc_times', '=', 'AccumDict', '(', 'accum', '=', 'numpy', '.', 'zeros', '(', '3', ',', 'numpy', '.', 'float32', ')', ')', '# Set the parameters required to compute the number of occurrences', '# of the group of sources', "# assert param['oqparam'].number_of_logic_tree_samples > 0", 'samples', '=', 'getattr', '(', 'sources', '[', '0', ']', ',', "'samples'", ',', '1', ')', 'tom', '=', 'getattr', '(', 'sources', ',', "'temporal_occurrence_model'", ')', 'rate', '=', 'tom', '.', 'occurrence_rate', 'time_span', '=', 'tom', '.', 'time_span', '# Note that using a single time interval corresponding to the product', '# of the investigation time and the number of realisations as we do', '# here is admitted only in the case of a time-independent model', 'grp_num_occ', '=', 'numpy', '.', 'random', '.', 'poisson', '(', 'rate', '*', 'time_span', '*', 'samples', '*', 'num_ses', ')', '# Now we process the sources included in the group. Possible cases:', '# * The group is a cluster. In this case we choose one rupture per each', '# source; uncertainty in the ruptures can be handled in this case', '# using mutually exclusive ruptures (note that this is admitted', '# only for nons-parametric sources).', '# * The group contains mutually exclusive sources. In this case we', '# choose one source and then one rupture from this source.', 'rup_counter', '=', '{', '}', 'rup_data', '=', '{', '}', 'eff_ruptures', '=', '0', 'for', 'rlz_num', 'in', 'range', '(', 'grp_num_occ', ')', ':', 'if', 'sources', '.', 'cluster', ':', 'for', 'src', ',', '_sites', 'in', 'srcfilter', '(', 'sources', ')', ':', '# Sum Ruptures', 'if', 'rlz_num', '==', '0', ':', 'eff_ruptures', '+=', 'src', '.', 'num_ruptures', '# Track calculation time', 't0', '=', 'time', '.', 'time', '(', ')', 'rup', '=', 'src', '.', 'get_one_rupture', '(', ')', '# The problem here is that we do not know a-priori the', '# number of occurrences of a given rupture.', 'if', 'src', '.', 'id', 'not', 'in', 'rup_counter', ':', 'rup_counter', '[', 'src', '.', 'id', ']', '=', '{', '}', 'rup_data', '[', 'src', '.', 'id', ']', '=', '{', '}', 'if', 'rup', '.', 'idx', 'not', 'in', 'rup_counter', '[', 'src', '.', 'id', ']', ':', 'rup_counter', '[', 'src', '.', 'id', ']', '[', 'rup', '.', 'idx', ']', '=', '1', 'rup_data', '[', 'src', '.', 'id', ']', '[', 'rup', '.', 'idx', ']', '=', '[', 'rup', ',', 'src', '.', 'id', ',', 'grp_id', ']', 'else', ':', 'rup_counter', '[', 'src', '.', 'id', ']', '[', 'rup', '.', 'idx', ']', '+=', '1', '# Store info', 'dt', '=', 'time', '.', 'time', '(', ')', '-', 't0', 'calc_times', '[', 'src', '.', 'id', ']', '+=', 'numpy', '.', 'array', '(', '[', 'len', '(', 'rup_data', '[', 'src', '.', 'id', ']', ')', ',', 'src', '.', 'nsites', ',', 'dt', ']', ')', 'elif', 'param', '[', "'src_interdep'", ']', '==', "'mutex'", ':', 'print', '(', "'Not yet implemented'", ')', 'exit', '(', '0', ')', '# Create event based ruptures', 'for', 'src_key', 'in', 'rup_data', ':', 'for', 'rup_key', 'in', 'rup_data', '[', 'src_key', ']', ':', 'dat', '=', 'rup_data', '[', 'src_key', ']', '[', 'rup_key', ']', 'cnt', '=', 'rup_counter', '[', 'src_key', ']', '[', 'rup_key', ']', 'ebr', '=', 'EBRupture', '(', 'dat', '[', '0', ']', ',', 'dat', '[', '1', ']', ',', 'dat', '[', '2', ']', ',', 'cnt', ',', 'samples', ')', 'eb_ruptures', '.', 'append', '(', 'ebr', ')', 'return', 'eb_ruptures', ',', 'calc_times', ',', 'eff_ruptures', ',', 'grp_id']
Yields ruptures generated by a cluster of sources. :param sources: A sequence of sources of the same group :param num_ses: Number of stochastic event sets :param param: a dictionary of additional parameters including ses_per_logic_tree_path :yields: dictionaries with keys rup_array, calc_times, eff_ruptures
['Yields', 'ruptures', 'generated', 'by', 'a', 'cluster', 'of', 'sources', '.']
train
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/calc/stochastic.py#L141-L216
3,550
priestc/giotto
giotto/controllers/irc_.py
listen
def listen(manifest, config, model_mock=False): """ IRC listening process. """ config['manifest'] = manifest config['model_mock'] = model_mock IRC = IrcBot(config) try: IRC.start() except KeyboardInterrupt: pass
python
def listen(manifest, config, model_mock=False): """ IRC listening process. """ config['manifest'] = manifest config['model_mock'] = model_mock IRC = IrcBot(config) try: IRC.start() except KeyboardInterrupt: pass
['def', 'listen', '(', 'manifest', ',', 'config', ',', 'model_mock', '=', 'False', ')', ':', 'config', '[', "'manifest'", ']', '=', 'manifest', 'config', '[', "'model_mock'", ']', '=', 'model_mock', 'IRC', '=', 'IrcBot', '(', 'config', ')', 'try', ':', 'IRC', '.', 'start', '(', ')', 'except', 'KeyboardInterrupt', ':', 'pass']
IRC listening process.
['IRC', 'listening', 'process', '.']
train
https://github.com/priestc/giotto/blob/d4c26380caefa7745bb27135e315de830f7254d3/giotto/controllers/irc_.py#L182-L192
3,551
PonteIneptique/flask-github-proxy
flask_github_proxy/__init__.py
GithubProxy.make_ref
def make_ref(self, branch): """ Make a branch on github :param branch: Name of the branch to create :return: Sha of the branch or self.ProxyError """ master_sha = self.get_ref(self.master_upstream) if not isinstance(master_sha, str): return self.ProxyError( 404, "The default branch from which to checkout is either not available or does not exist", step="make_ref" ) params = { "ref": "refs/heads/{branch}".format(branch=branch), "sha": master_sha } uri = "{api}/repos/{origin}/git/refs".format( api=self.github_api_url, origin=self.origin ) data = self.request("POST", uri, data=params) if data.status_code == 201: data = json.loads(data.content.decode("utf-8")) return data["object"]["sha"] else: decoded_data = json.loads(data.content.decode("utf-8")) return self.ProxyError( data.status_code, (decoded_data, "message"), step="make_ref", context={ "uri": uri, "params": params } )
python
def make_ref(self, branch): """ Make a branch on github :param branch: Name of the branch to create :return: Sha of the branch or self.ProxyError """ master_sha = self.get_ref(self.master_upstream) if not isinstance(master_sha, str): return self.ProxyError( 404, "The default branch from which to checkout is either not available or does not exist", step="make_ref" ) params = { "ref": "refs/heads/{branch}".format(branch=branch), "sha": master_sha } uri = "{api}/repos/{origin}/git/refs".format( api=self.github_api_url, origin=self.origin ) data = self.request("POST", uri, data=params) if data.status_code == 201: data = json.loads(data.content.decode("utf-8")) return data["object"]["sha"] else: decoded_data = json.loads(data.content.decode("utf-8")) return self.ProxyError( data.status_code, (decoded_data, "message"), step="make_ref", context={ "uri": uri, "params": params } )
['def', 'make_ref', '(', 'self', ',', 'branch', ')', ':', 'master_sha', '=', 'self', '.', 'get_ref', '(', 'self', '.', 'master_upstream', ')', 'if', 'not', 'isinstance', '(', 'master_sha', ',', 'str', ')', ':', 'return', 'self', '.', 'ProxyError', '(', '404', ',', '"The default branch from which to checkout is either not available or does not exist"', ',', 'step', '=', '"make_ref"', ')', 'params', '=', '{', '"ref"', ':', '"refs/heads/{branch}"', '.', 'format', '(', 'branch', '=', 'branch', ')', ',', '"sha"', ':', 'master_sha', '}', 'uri', '=', '"{api}/repos/{origin}/git/refs"', '.', 'format', '(', 'api', '=', 'self', '.', 'github_api_url', ',', 'origin', '=', 'self', '.', 'origin', ')', 'data', '=', 'self', '.', 'request', '(', '"POST"', ',', 'uri', ',', 'data', '=', 'params', ')', 'if', 'data', '.', 'status_code', '==', '201', ':', 'data', '=', 'json', '.', 'loads', '(', 'data', '.', 'content', '.', 'decode', '(', '"utf-8"', ')', ')', 'return', 'data', '[', '"object"', ']', '[', '"sha"', ']', 'else', ':', 'decoded_data', '=', 'json', '.', 'loads', '(', 'data', '.', 'content', '.', 'decode', '(', '"utf-8"', ')', ')', 'return', 'self', '.', 'ProxyError', '(', 'data', '.', 'status_code', ',', '(', 'decoded_data', ',', '"message"', ')', ',', 'step', '=', '"make_ref"', ',', 'context', '=', '{', '"uri"', ':', 'uri', ',', '"params"', ':', 'params', '}', ')']
Make a branch on github :param branch: Name of the branch to create :return: Sha of the branch or self.ProxyError
['Make', 'a', 'branch', 'on', 'github']
train
https://github.com/PonteIneptique/flask-github-proxy/blob/f0a60639342f7c0834360dc12a099bfc3a06d939/flask_github_proxy/__init__.py#L370-L405
3,552
razor-x/scipy-data_fitting
scipy_data_fitting/data.py
Data.load_data
def load_data(self): """ Loads data from `scipy_data_fitting.Data.path` using [`numpy.genfromtxt`][1] and returns a [`numpy.ndarray`][2]. Data is scaled according to `scipy_data_fitting.Data.scale`. Arguments to [`numpy.genfromtxt`][1] are controlled by `scipy_data_fitting.Data.genfromtxt_args`. [1]: http://docs.scipy.org/doc/numpy/reference/generated/numpy.genfromtxt.html [2]: http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html """ array = numpy.genfromtxt(self.path, **self.genfromtxt_args) for n, scale in enumerate(self.scale): array[n,:] *= self.scale[n] return array
python
def load_data(self): """ Loads data from `scipy_data_fitting.Data.path` using [`numpy.genfromtxt`][1] and returns a [`numpy.ndarray`][2]. Data is scaled according to `scipy_data_fitting.Data.scale`. Arguments to [`numpy.genfromtxt`][1] are controlled by `scipy_data_fitting.Data.genfromtxt_args`. [1]: http://docs.scipy.org/doc/numpy/reference/generated/numpy.genfromtxt.html [2]: http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html """ array = numpy.genfromtxt(self.path, **self.genfromtxt_args) for n, scale in enumerate(self.scale): array[n,:] *= self.scale[n] return array
['def', 'load_data', '(', 'self', ')', ':', 'array', '=', 'numpy', '.', 'genfromtxt', '(', 'self', '.', 'path', ',', '*', '*', 'self', '.', 'genfromtxt_args', ')', 'for', 'n', ',', 'scale', 'in', 'enumerate', '(', 'self', '.', 'scale', ')', ':', 'array', '[', 'n', ',', ':', ']', '*=', 'self', '.', 'scale', '[', 'n', ']', 'return', 'array']
Loads data from `scipy_data_fitting.Data.path` using [`numpy.genfromtxt`][1] and returns a [`numpy.ndarray`][2]. Data is scaled according to `scipy_data_fitting.Data.scale`. Arguments to [`numpy.genfromtxt`][1] are controlled by `scipy_data_fitting.Data.genfromtxt_args`. [1]: http://docs.scipy.org/doc/numpy/reference/generated/numpy.genfromtxt.html [2]: http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
['Loads', 'data', 'from', 'scipy_data_fitting', '.', 'Data', '.', 'path', 'using', '[', 'numpy', '.', 'genfromtxt', ']', '[', '1', ']', 'and', 'returns', 'a', '[', 'numpy', '.', 'ndarray', ']', '[', '2', ']', '.']
train
https://github.com/razor-x/scipy-data_fitting/blob/c756a645da8629699b3f22244bfb7d5d4d88b179/scipy_data_fitting/data.py#L236-L251
3,553
christophertbrown/bioscripts
ctbBio/rp16.py
find_databases
def find_databases(databases): """ define ribosomal proteins and location of curated databases """ # 16 ribosomal proteins in their expected order proteins = ['L15', 'L18', 'L6', 'S8', 'L5', 'L24', 'L14', 'S17', 'L16', 'S3', 'L22', 'S19', 'L2', 'L4', 'L3', 'S10'] # curated databases protein_databases = { 'L14': 'rpL14_JGI_MDM.filtered.faa', 'L15': 'rpL15_JGI_MDM.filtered.faa', 'L16': 'rpL16_JGI_MDM.filtered.faa', 'L18': 'rpL18_JGI_MDM.filtered.faa', 'L22': 'rpL22_JGI_MDM.filtered.faa', 'L24': 'rpL24_JGI_MDM.filtered.faa', 'L2': 'rpL2_JGI_MDM.filtered.faa', 'L3': 'rpL3_JGI_MDM.filtered.faa', 'L4': 'rpL4_JGI_MDM.filtered.faa', 'L5': 'rpL5_JGI_MDM.filtered.faa', 'L6': 'rpL6_JGI_MDM.filtered.faa', 'S10': 'rpS10_JGI_MDM.filtered.faa', 'S17': 'rpS17_JGI_MDM.filtered.faa', 'S19': 'rpS19_JGI_MDM.filtered.faa', 'S3': 'rpS3_JGI_MDM.filtered.faa', 'S8': 'rpS8_JGI_MDM.filtered.faa'} protein_databases = {key: '%s/%s' % (databases, database) \ for key, database in list(protein_databases.items())} return proteins, protein_databases
python
def find_databases(databases): """ define ribosomal proteins and location of curated databases """ # 16 ribosomal proteins in their expected order proteins = ['L15', 'L18', 'L6', 'S8', 'L5', 'L24', 'L14', 'S17', 'L16', 'S3', 'L22', 'S19', 'L2', 'L4', 'L3', 'S10'] # curated databases protein_databases = { 'L14': 'rpL14_JGI_MDM.filtered.faa', 'L15': 'rpL15_JGI_MDM.filtered.faa', 'L16': 'rpL16_JGI_MDM.filtered.faa', 'L18': 'rpL18_JGI_MDM.filtered.faa', 'L22': 'rpL22_JGI_MDM.filtered.faa', 'L24': 'rpL24_JGI_MDM.filtered.faa', 'L2': 'rpL2_JGI_MDM.filtered.faa', 'L3': 'rpL3_JGI_MDM.filtered.faa', 'L4': 'rpL4_JGI_MDM.filtered.faa', 'L5': 'rpL5_JGI_MDM.filtered.faa', 'L6': 'rpL6_JGI_MDM.filtered.faa', 'S10': 'rpS10_JGI_MDM.filtered.faa', 'S17': 'rpS17_JGI_MDM.filtered.faa', 'S19': 'rpS19_JGI_MDM.filtered.faa', 'S3': 'rpS3_JGI_MDM.filtered.faa', 'S8': 'rpS8_JGI_MDM.filtered.faa'} protein_databases = {key: '%s/%s' % (databases, database) \ for key, database in list(protein_databases.items())} return proteins, protein_databases
['def', 'find_databases', '(', 'databases', ')', ':', '# 16 ribosomal proteins in their expected order', 'proteins', '=', '[', "'L15'", ',', "'L18'", ',', "'L6'", ',', "'S8'", ',', "'L5'", ',', "'L24'", ',', "'L14'", ',', "'S17'", ',', "'L16'", ',', "'S3'", ',', "'L22'", ',', "'S19'", ',', "'L2'", ',', "'L4'", ',', "'L3'", ',', "'S10'", ']', '# curated databases', 'protein_databases', '=', '{', "'L14'", ':', "'rpL14_JGI_MDM.filtered.faa'", ',', "'L15'", ':', "'rpL15_JGI_MDM.filtered.faa'", ',', "'L16'", ':', "'rpL16_JGI_MDM.filtered.faa'", ',', "'L18'", ':', "'rpL18_JGI_MDM.filtered.faa'", ',', "'L22'", ':', "'rpL22_JGI_MDM.filtered.faa'", ',', "'L24'", ':', "'rpL24_JGI_MDM.filtered.faa'", ',', "'L2'", ':', "'rpL2_JGI_MDM.filtered.faa'", ',', "'L3'", ':', "'rpL3_JGI_MDM.filtered.faa'", ',', "'L4'", ':', "'rpL4_JGI_MDM.filtered.faa'", ',', "'L5'", ':', "'rpL5_JGI_MDM.filtered.faa'", ',', "'L6'", ':', "'rpL6_JGI_MDM.filtered.faa'", ',', "'S10'", ':', "'rpS10_JGI_MDM.filtered.faa'", ',', "'S17'", ':', "'rpS17_JGI_MDM.filtered.faa'", ',', "'S19'", ':', "'rpS19_JGI_MDM.filtered.faa'", ',', "'S3'", ':', "'rpS3_JGI_MDM.filtered.faa'", ',', "'S8'", ':', "'rpS8_JGI_MDM.filtered.faa'", '}', 'protein_databases', '=', '{', 'key', ':', "'%s/%s'", '%', '(', 'databases', ',', 'database', ')', 'for', 'key', ',', 'database', 'in', 'list', '(', 'protein_databases', '.', 'items', '(', ')', ')', '}', 'return', 'proteins', ',', 'protein_databases']
define ribosomal proteins and location of curated databases
['define', 'ribosomal', 'proteins', 'and', 'location', 'of', 'curated', 'databases']
train
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rp16.py#L21-L48
3,554
pycontribs/pyrax
pyrax/object_storage.py
Container.store_object
def store_object(self, obj_name, data, content_type=None, etag=None, content_encoding=None, ttl=None, return_none=False, headers=None, extra_info=None): """ Creates a new object in this container, and populates it with the given data. A StorageObject reference to the uploaded file will be returned, unless 'return_none' is set to True. The 'extra_info' parameter is included for backwards compatibility. It is no longer used at all, and will not be modified with swiftclient info, since swiftclient is not used any more. """ return self.create(obj_name=obj_name, data=data, content_type=content_type, etag=etag, content_encoding=content_encoding, ttl=ttl, return_none=return_none, headers=headers)
python
def store_object(self, obj_name, data, content_type=None, etag=None, content_encoding=None, ttl=None, return_none=False, headers=None, extra_info=None): """ Creates a new object in this container, and populates it with the given data. A StorageObject reference to the uploaded file will be returned, unless 'return_none' is set to True. The 'extra_info' parameter is included for backwards compatibility. It is no longer used at all, and will not be modified with swiftclient info, since swiftclient is not used any more. """ return self.create(obj_name=obj_name, data=data, content_type=content_type, etag=etag, content_encoding=content_encoding, ttl=ttl, return_none=return_none, headers=headers)
['def', 'store_object', '(', 'self', ',', 'obj_name', ',', 'data', ',', 'content_type', '=', 'None', ',', 'etag', '=', 'None', ',', 'content_encoding', '=', 'None', ',', 'ttl', '=', 'None', ',', 'return_none', '=', 'False', ',', 'headers', '=', 'None', ',', 'extra_info', '=', 'None', ')', ':', 'return', 'self', '.', 'create', '(', 'obj_name', '=', 'obj_name', ',', 'data', '=', 'data', ',', 'content_type', '=', 'content_type', ',', 'etag', '=', 'etag', ',', 'content_encoding', '=', 'content_encoding', ',', 'ttl', '=', 'ttl', ',', 'return_none', '=', 'return_none', ',', 'headers', '=', 'headers', ')']
Creates a new object in this container, and populates it with the given data. A StorageObject reference to the uploaded file will be returned, unless 'return_none' is set to True. The 'extra_info' parameter is included for backwards compatibility. It is no longer used at all, and will not be modified with swiftclient info, since swiftclient is not used any more.
['Creates', 'a', 'new', 'object', 'in', 'this', 'container', 'and', 'populates', 'it', 'with', 'the', 'given', 'data', '.', 'A', 'StorageObject', 'reference', 'to', 'the', 'uploaded', 'file', 'will', 'be', 'returned', 'unless', 'return_none', 'is', 'set', 'to', 'True', '.']
train
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/object_storage.py#L433-L448
3,555
gawel/irc3
irc3/plugins/command.py
print_help_page
def print_help_page(bot, file=sys.stdout): """print help page""" def p(text): print(text, file=file) plugin = bot.get_plugin(Commands) title = "Available Commands for {nick} at {host}".format(**bot.config) p("=" * len(title)) p(title) p("=" * len(title)) p('') p('.. contents::') p('') modules = {} for name, (predicates, callback) in plugin.items(): commands = modules.setdefault(callback.__module__, []) commands.append((name, callback, predicates)) for module in sorted(modules): p(module) p('=' * len(module)) p('') for name, callback, predicates in sorted(modules[module]): p(name) p('-' * len(name)) p('') doc = callback.__doc__ doc = doc.replace('%%', bot.config.cmd) for line in doc.split('\n'): line = line.strip() if line.startswith(bot.config.cmd): line = ' ``{}``'.format(line) p(line) if 'permission' in predicates: p('*Require {0[permission]} permission.*'.format(predicates)) if predicates.get('public', True) is False: p('*Only available in private.*') p('')
python
def print_help_page(bot, file=sys.stdout): """print help page""" def p(text): print(text, file=file) plugin = bot.get_plugin(Commands) title = "Available Commands for {nick} at {host}".format(**bot.config) p("=" * len(title)) p(title) p("=" * len(title)) p('') p('.. contents::') p('') modules = {} for name, (predicates, callback) in plugin.items(): commands = modules.setdefault(callback.__module__, []) commands.append((name, callback, predicates)) for module in sorted(modules): p(module) p('=' * len(module)) p('') for name, callback, predicates in sorted(modules[module]): p(name) p('-' * len(name)) p('') doc = callback.__doc__ doc = doc.replace('%%', bot.config.cmd) for line in doc.split('\n'): line = line.strip() if line.startswith(bot.config.cmd): line = ' ``{}``'.format(line) p(line) if 'permission' in predicates: p('*Require {0[permission]} permission.*'.format(predicates)) if predicates.get('public', True) is False: p('*Only available in private.*') p('')
['def', 'print_help_page', '(', 'bot', ',', 'file', '=', 'sys', '.', 'stdout', ')', ':', 'def', 'p', '(', 'text', ')', ':', 'print', '(', 'text', ',', 'file', '=', 'file', ')', 'plugin', '=', 'bot', '.', 'get_plugin', '(', 'Commands', ')', 'title', '=', '"Available Commands for {nick} at {host}"', '.', 'format', '(', '*', '*', 'bot', '.', 'config', ')', 'p', '(', '"="', '*', 'len', '(', 'title', ')', ')', 'p', '(', 'title', ')', 'p', '(', '"="', '*', 'len', '(', 'title', ')', ')', 'p', '(', "''", ')', 'p', '(', "'.. contents::'", ')', 'p', '(', "''", ')', 'modules', '=', '{', '}', 'for', 'name', ',', '(', 'predicates', ',', 'callback', ')', 'in', 'plugin', '.', 'items', '(', ')', ':', 'commands', '=', 'modules', '.', 'setdefault', '(', 'callback', '.', '__module__', ',', '[', ']', ')', 'commands', '.', 'append', '(', '(', 'name', ',', 'callback', ',', 'predicates', ')', ')', 'for', 'module', 'in', 'sorted', '(', 'modules', ')', ':', 'p', '(', 'module', ')', 'p', '(', "'='", '*', 'len', '(', 'module', ')', ')', 'p', '(', "''", ')', 'for', 'name', ',', 'callback', ',', 'predicates', 'in', 'sorted', '(', 'modules', '[', 'module', ']', ')', ':', 'p', '(', 'name', ')', 'p', '(', "'-'", '*', 'len', '(', 'name', ')', ')', 'p', '(', "''", ')', 'doc', '=', 'callback', '.', '__doc__', 'doc', '=', 'doc', '.', 'replace', '(', "'%%'", ',', 'bot', '.', 'config', '.', 'cmd', ')', 'for', 'line', 'in', 'doc', '.', 'split', '(', "'\\n'", ')', ':', 'line', '=', 'line', '.', 'strip', '(', ')', 'if', 'line', '.', 'startswith', '(', 'bot', '.', 'config', '.', 'cmd', ')', ':', 'line', '=', "' ``{}``'", '.', 'format', '(', 'line', ')', 'p', '(', 'line', ')', 'if', "'permission'", 'in', 'predicates', ':', 'p', '(', "'*Require {0[permission]} permission.*'", '.', 'format', '(', 'predicates', ')', ')', 'if', 'predicates', '.', 'get', '(', "'public'", ',', 'True', ')', 'is', 'False', ':', 'p', '(', "'*Only available in private.*'", ')', 'p', '(', "''", ')']
print help page
['print', 'help', 'page']
train
https://github.com/gawel/irc3/blob/cd27840a5809a1f803dc620860fe75d83d2a2ec8/irc3/plugins/command.py#L483-L519
3,556
sebdah/dynamic-dynamodb
dynamic_dynamodb/core/circuit_breaker.py
is_open
def is_open(table_name=None, table_key=None, gsi_name=None, gsi_key=None): """ Checks whether the circuit breaker is open :param table_name: Name of the table being checked :param table_key: Configuration key for table :param gsi_name: Name of the GSI being checked :param gsi_key: Configuration key for the GSI :returns: bool -- True if the circuit is open """ logger.debug('Checking circuit breaker status') # Parse the URL to make sure it is OK pattern = re.compile( r'^(?P<scheme>http(s)?://)' r'((?P<username>.+):(?P<password>.+)@){0,1}' r'(?P<url>.*)$' ) url = timeout = None if gsi_name: url = get_gsi_option(table_key, gsi_key, 'circuit_breaker_url') timeout = get_gsi_option(table_key, gsi_key, 'circuit_breaker_timeout') elif table_name: url = get_table_option(table_key, 'circuit_breaker_url') timeout = get_table_option(table_key, 'circuit_breaker_timeout') if not url: url = get_global_option('circuit_breaker_url') timeout = get_global_option('circuit_breaker_timeout') match = pattern.match(url) if not match: logger.error('Malformatted URL: {0}'.format(url)) sys.exit(1) use_basic_auth = False if match.group('username') and match.group('password'): use_basic_auth = True # Make the actual URL to call auth = () if use_basic_auth: url = '{scheme}{url}'.format( scheme=match.group('scheme'), url=match.group('url')) auth = (match.group('username'), match.group('password')) headers = {} if table_name: headers["x-table-name"] = table_name if gsi_name: headers["x-gsi-name"] = gsi_name # Make the actual request try: response = requests.get( url, auth=auth, timeout=timeout / 1000.00, headers=headers) if int(response.status_code) >= 200 and int(response.status_code) < 300: logger.info('Circuit breaker is closed') return False else: logger.warning( 'Circuit breaker returned with status code {0:d}'.format( response.status_code)) except requests.exceptions.SSLError as error: logger.warning('Circuit breaker: {0}'.format(error)) except requests.exceptions.Timeout as error: logger.warning('Circuit breaker: {0}'.format(error)) except requests.exceptions.ConnectionError as error: logger.warning('Circuit breaker: {0}'.format(error)) except requests.exceptions.HTTPError as error: logger.warning('Circuit breaker: {0}'.format(error)) except requests.exceptions.TooManyRedirects as error: logger.warning('Circuit breaker: {0}'.format(error)) except Exception as error: logger.error('Unhandled exception: {0}'.format(error)) logger.error( 'Please file a bug at ' 'https://github.com/sebdah/dynamic-dynamodb/issues') return True
python
def is_open(table_name=None, table_key=None, gsi_name=None, gsi_key=None): """ Checks whether the circuit breaker is open :param table_name: Name of the table being checked :param table_key: Configuration key for table :param gsi_name: Name of the GSI being checked :param gsi_key: Configuration key for the GSI :returns: bool -- True if the circuit is open """ logger.debug('Checking circuit breaker status') # Parse the URL to make sure it is OK pattern = re.compile( r'^(?P<scheme>http(s)?://)' r'((?P<username>.+):(?P<password>.+)@){0,1}' r'(?P<url>.*)$' ) url = timeout = None if gsi_name: url = get_gsi_option(table_key, gsi_key, 'circuit_breaker_url') timeout = get_gsi_option(table_key, gsi_key, 'circuit_breaker_timeout') elif table_name: url = get_table_option(table_key, 'circuit_breaker_url') timeout = get_table_option(table_key, 'circuit_breaker_timeout') if not url: url = get_global_option('circuit_breaker_url') timeout = get_global_option('circuit_breaker_timeout') match = pattern.match(url) if not match: logger.error('Malformatted URL: {0}'.format(url)) sys.exit(1) use_basic_auth = False if match.group('username') and match.group('password'): use_basic_auth = True # Make the actual URL to call auth = () if use_basic_auth: url = '{scheme}{url}'.format( scheme=match.group('scheme'), url=match.group('url')) auth = (match.group('username'), match.group('password')) headers = {} if table_name: headers["x-table-name"] = table_name if gsi_name: headers["x-gsi-name"] = gsi_name # Make the actual request try: response = requests.get( url, auth=auth, timeout=timeout / 1000.00, headers=headers) if int(response.status_code) >= 200 and int(response.status_code) < 300: logger.info('Circuit breaker is closed') return False else: logger.warning( 'Circuit breaker returned with status code {0:d}'.format( response.status_code)) except requests.exceptions.SSLError as error: logger.warning('Circuit breaker: {0}'.format(error)) except requests.exceptions.Timeout as error: logger.warning('Circuit breaker: {0}'.format(error)) except requests.exceptions.ConnectionError as error: logger.warning('Circuit breaker: {0}'.format(error)) except requests.exceptions.HTTPError as error: logger.warning('Circuit breaker: {0}'.format(error)) except requests.exceptions.TooManyRedirects as error: logger.warning('Circuit breaker: {0}'.format(error)) except Exception as error: logger.error('Unhandled exception: {0}'.format(error)) logger.error( 'Please file a bug at ' 'https://github.com/sebdah/dynamic-dynamodb/issues') return True
['def', 'is_open', '(', 'table_name', '=', 'None', ',', 'table_key', '=', 'None', ',', 'gsi_name', '=', 'None', ',', 'gsi_key', '=', 'None', ')', ':', 'logger', '.', 'debug', '(', "'Checking circuit breaker status'", ')', '# Parse the URL to make sure it is OK', 'pattern', '=', 're', '.', 'compile', '(', "r'^(?P<scheme>http(s)?://)'", "r'((?P<username>.+):(?P<password>.+)@){0,1}'", "r'(?P<url>.*)$'", ')', 'url', '=', 'timeout', '=', 'None', 'if', 'gsi_name', ':', 'url', '=', 'get_gsi_option', '(', 'table_key', ',', 'gsi_key', ',', "'circuit_breaker_url'", ')', 'timeout', '=', 'get_gsi_option', '(', 'table_key', ',', 'gsi_key', ',', "'circuit_breaker_timeout'", ')', 'elif', 'table_name', ':', 'url', '=', 'get_table_option', '(', 'table_key', ',', "'circuit_breaker_url'", ')', 'timeout', '=', 'get_table_option', '(', 'table_key', ',', "'circuit_breaker_timeout'", ')', 'if', 'not', 'url', ':', 'url', '=', 'get_global_option', '(', "'circuit_breaker_url'", ')', 'timeout', '=', 'get_global_option', '(', "'circuit_breaker_timeout'", ')', 'match', '=', 'pattern', '.', 'match', '(', 'url', ')', 'if', 'not', 'match', ':', 'logger', '.', 'error', '(', "'Malformatted URL: {0}'", '.', 'format', '(', 'url', ')', ')', 'sys', '.', 'exit', '(', '1', ')', 'use_basic_auth', '=', 'False', 'if', 'match', '.', 'group', '(', "'username'", ')', 'and', 'match', '.', 'group', '(', "'password'", ')', ':', 'use_basic_auth', '=', 'True', '# Make the actual URL to call', 'auth', '=', '(', ')', 'if', 'use_basic_auth', ':', 'url', '=', "'{scheme}{url}'", '.', 'format', '(', 'scheme', '=', 'match', '.', 'group', '(', "'scheme'", ')', ',', 'url', '=', 'match', '.', 'group', '(', "'url'", ')', ')', 'auth', '=', '(', 'match', '.', 'group', '(', "'username'", ')', ',', 'match', '.', 'group', '(', "'password'", ')', ')', 'headers', '=', '{', '}', 'if', 'table_name', ':', 'headers', '[', '"x-table-name"', ']', '=', 'table_name', 'if', 'gsi_name', ':', 'headers', '[', '"x-gsi-name"', ']', '=', 'gsi_name', '# Make the actual request', 'try', ':', 'response', '=', 'requests', '.', 'get', '(', 'url', ',', 'auth', '=', 'auth', ',', 'timeout', '=', 'timeout', '/', '1000.00', ',', 'headers', '=', 'headers', ')', 'if', 'int', '(', 'response', '.', 'status_code', ')', '>=', '200', 'and', 'int', '(', 'response', '.', 'status_code', ')', '<', '300', ':', 'logger', '.', 'info', '(', "'Circuit breaker is closed'", ')', 'return', 'False', 'else', ':', 'logger', '.', 'warning', '(', "'Circuit breaker returned with status code {0:d}'", '.', 'format', '(', 'response', '.', 'status_code', ')', ')', 'except', 'requests', '.', 'exceptions', '.', 'SSLError', 'as', 'error', ':', 'logger', '.', 'warning', '(', "'Circuit breaker: {0}'", '.', 'format', '(', 'error', ')', ')', 'except', 'requests', '.', 'exceptions', '.', 'Timeout', 'as', 'error', ':', 'logger', '.', 'warning', '(', "'Circuit breaker: {0}'", '.', 'format', '(', 'error', ')', ')', 'except', 'requests', '.', 'exceptions', '.', 'ConnectionError', 'as', 'error', ':', 'logger', '.', 'warning', '(', "'Circuit breaker: {0}'", '.', 'format', '(', 'error', ')', ')', 'except', 'requests', '.', 'exceptions', '.', 'HTTPError', 'as', 'error', ':', 'logger', '.', 'warning', '(', "'Circuit breaker: {0}'", '.', 'format', '(', 'error', ')', ')', 'except', 'requests', '.', 'exceptions', '.', 'TooManyRedirects', 'as', 'error', ':', 'logger', '.', 'warning', '(', "'Circuit breaker: {0}'", '.', 'format', '(', 'error', ')', ')', 'except', 'Exception', 'as', 'error', ':', 'logger', '.', 'error', '(', "'Unhandled exception: {0}'", '.', 'format', '(', 'error', ')', ')', 'logger', '.', 'error', '(', "'Please file a bug at '", "'https://github.com/sebdah/dynamic-dynamodb/issues'", ')', 'return', 'True']
Checks whether the circuit breaker is open :param table_name: Name of the table being checked :param table_key: Configuration key for table :param gsi_name: Name of the GSI being checked :param gsi_key: Configuration key for the GSI :returns: bool -- True if the circuit is open
['Checks', 'whether', 'the', 'circuit', 'breaker', 'is', 'open']
train
https://github.com/sebdah/dynamic-dynamodb/blob/bfd0ca806b1c3301e724696de90ef0f973410493/dynamic_dynamodb/core/circuit_breaker.py#L13-L97
3,557
rackerlabs/fleece
fleece/raxauth.py
validate
def validate(token): """Validate token and return auth context.""" token_url = TOKEN_URL_FMT.format(token=token) headers = { 'x-auth-token': token, 'accept': 'application/json', } resp = requests.get(token_url, headers=headers) if not resp.status_code == 200: raise HTTPError(status=401) return resp.json()
python
def validate(token): """Validate token and return auth context.""" token_url = TOKEN_URL_FMT.format(token=token) headers = { 'x-auth-token': token, 'accept': 'application/json', } resp = requests.get(token_url, headers=headers) if not resp.status_code == 200: raise HTTPError(status=401) return resp.json()
['def', 'validate', '(', 'token', ')', ':', 'token_url', '=', 'TOKEN_URL_FMT', '.', 'format', '(', 'token', '=', 'token', ')', 'headers', '=', '{', "'x-auth-token'", ':', 'token', ',', "'accept'", ':', "'application/json'", ',', '}', 'resp', '=', 'requests', '.', 'get', '(', 'token_url', ',', 'headers', '=', 'headers', ')', 'if', 'not', 'resp', '.', 'status_code', '==', '200', ':', 'raise', 'HTTPError', '(', 'status', '=', '401', ')', 'return', 'resp', '.', 'json', '(', ')']
Validate token and return auth context.
['Validate', 'token', 'and', 'return', 'auth', 'context', '.']
train
https://github.com/rackerlabs/fleece/blob/42d79dfa0777e99dbb09bc46105449a9be5dbaa9/fleece/raxauth.py#L23-L34
3,558
mitsei/dlkit
dlkit/services/cataloging.py
Catalog._set_catalog_view
def _set_catalog_view(self, session): """Sets the underlying catalog view to match current view""" if self._catalog_view == FEDERATED: try: session.use_federated_catalog_view() except AttributeError: pass else: try: session.use_isolated_catalog_view() except AttributeError: pass
python
def _set_catalog_view(self, session): """Sets the underlying catalog view to match current view""" if self._catalog_view == FEDERATED: try: session.use_federated_catalog_view() except AttributeError: pass else: try: session.use_isolated_catalog_view() except AttributeError: pass
['def', '_set_catalog_view', '(', 'self', ',', 'session', ')', ':', 'if', 'self', '.', '_catalog_view', '==', 'FEDERATED', ':', 'try', ':', 'session', '.', 'use_federated_catalog_view', '(', ')', 'except', 'AttributeError', ':', 'pass', 'else', ':', 'try', ':', 'session', '.', 'use_isolated_catalog_view', '(', ')', 'except', 'AttributeError', ':', 'pass']
Sets the underlying catalog view to match current view
['Sets', 'the', 'underlying', 'catalog', 'view', 'to', 'match', 'current', 'view']
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/cataloging.py#L687-L698
3,559
dropbox/stone
stone/backend.py
CodeBackend.filter_out_none_valued_keys
def filter_out_none_valued_keys(self, d): # type: (typing.Dict[K, V]) -> typing.Dict[K, V] """Given a dict, returns a new dict with all the same key/values except for keys that had values of None.""" new_d = {} for k, v in d.items(): if v is not None: new_d[k] = v return new_d
python
def filter_out_none_valued_keys(self, d): # type: (typing.Dict[K, V]) -> typing.Dict[K, V] """Given a dict, returns a new dict with all the same key/values except for keys that had values of None.""" new_d = {} for k, v in d.items(): if v is not None: new_d[k] = v return new_d
['def', 'filter_out_none_valued_keys', '(', 'self', ',', 'd', ')', ':', '# type: (typing.Dict[K, V]) -> typing.Dict[K, V]', 'new_d', '=', '{', '}', 'for', 'k', ',', 'v', 'in', 'd', '.', 'items', '(', ')', ':', 'if', 'v', 'is', 'not', 'None', ':', 'new_d', '[', 'k', ']', '=', 'v', 'return', 'new_d']
Given a dict, returns a new dict with all the same key/values except for keys that had values of None.
['Given', 'a', 'dict', 'returns', 'a', 'new', 'dict', 'with', 'all', 'the', 'same', 'key', '/', 'values', 'except', 'for', 'keys', 'that', 'had', 'values', 'of', 'None', '.']
train
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/backend.py#L371-L379
3,560
awacha/sastool
sastool/misc/utils.py
flatten_hierarchical_dict
def flatten_hierarchical_dict(original_dict, separator='.', max_recursion_depth=None): """Flatten a dict. Inputs ------ original_dict: dict the dictionary to flatten separator: string, optional the separator item in the keys of the flattened dictionary max_recursion_depth: positive integer, optional the number of recursions to be done. None is infinte. Output ------ the flattened dictionary Notes ----- Each element of `original_dict` which is not an instance of `dict` (or of a subclass of it) is kept as is. The others are treated as follows. If ``original_dict['key_dict']`` is an instance of `dict` (or of a subclass of `dict`), a corresponding key of the form ``key_dict<separator><key_in_key_dict>`` will be created in ``original_dict`` with the value of ``original_dict['key_dict']['key_in_key_dict']``. If that value is a subclass of `dict` as well, the same procedure is repeated until the maximum recursion depth is reached. Only string keys are supported. """ if max_recursion_depth is not None and max_recursion_depth <= 0: # we reached the maximum recursion depth, refuse to go further return original_dict if max_recursion_depth is None: next_recursion_depth = None else: next_recursion_depth = max_recursion_depth - 1 dict1 = {} for k in original_dict: if not isinstance(original_dict[k], dict): dict1[k] = original_dict[k] else: dict_recursed = flatten_hierarchical_dict( original_dict[k], separator, next_recursion_depth) dict1.update( dict([(k + separator + x, dict_recursed[x]) for x in dict_recursed])) return dict1
python
def flatten_hierarchical_dict(original_dict, separator='.', max_recursion_depth=None): """Flatten a dict. Inputs ------ original_dict: dict the dictionary to flatten separator: string, optional the separator item in the keys of the flattened dictionary max_recursion_depth: positive integer, optional the number of recursions to be done. None is infinte. Output ------ the flattened dictionary Notes ----- Each element of `original_dict` which is not an instance of `dict` (or of a subclass of it) is kept as is. The others are treated as follows. If ``original_dict['key_dict']`` is an instance of `dict` (or of a subclass of `dict`), a corresponding key of the form ``key_dict<separator><key_in_key_dict>`` will be created in ``original_dict`` with the value of ``original_dict['key_dict']['key_in_key_dict']``. If that value is a subclass of `dict` as well, the same procedure is repeated until the maximum recursion depth is reached. Only string keys are supported. """ if max_recursion_depth is not None and max_recursion_depth <= 0: # we reached the maximum recursion depth, refuse to go further return original_dict if max_recursion_depth is None: next_recursion_depth = None else: next_recursion_depth = max_recursion_depth - 1 dict1 = {} for k in original_dict: if not isinstance(original_dict[k], dict): dict1[k] = original_dict[k] else: dict_recursed = flatten_hierarchical_dict( original_dict[k], separator, next_recursion_depth) dict1.update( dict([(k + separator + x, dict_recursed[x]) for x in dict_recursed])) return dict1
['def', 'flatten_hierarchical_dict', '(', 'original_dict', ',', 'separator', '=', "'.'", ',', 'max_recursion_depth', '=', 'None', ')', ':', 'if', 'max_recursion_depth', 'is', 'not', 'None', 'and', 'max_recursion_depth', '<=', '0', ':', '# we reached the maximum recursion depth, refuse to go further', 'return', 'original_dict', 'if', 'max_recursion_depth', 'is', 'None', ':', 'next_recursion_depth', '=', 'None', 'else', ':', 'next_recursion_depth', '=', 'max_recursion_depth', '-', '1', 'dict1', '=', '{', '}', 'for', 'k', 'in', 'original_dict', ':', 'if', 'not', 'isinstance', '(', 'original_dict', '[', 'k', ']', ',', 'dict', ')', ':', 'dict1', '[', 'k', ']', '=', 'original_dict', '[', 'k', ']', 'else', ':', 'dict_recursed', '=', 'flatten_hierarchical_dict', '(', 'original_dict', '[', 'k', ']', ',', 'separator', ',', 'next_recursion_depth', ')', 'dict1', '.', 'update', '(', 'dict', '(', '[', '(', 'k', '+', 'separator', '+', 'x', ',', 'dict_recursed', '[', 'x', ']', ')', 'for', 'x', 'in', 'dict_recursed', ']', ')', ')', 'return', 'dict1']
Flatten a dict. Inputs ------ original_dict: dict the dictionary to flatten separator: string, optional the separator item in the keys of the flattened dictionary max_recursion_depth: positive integer, optional the number of recursions to be done. None is infinte. Output ------ the flattened dictionary Notes ----- Each element of `original_dict` which is not an instance of `dict` (or of a subclass of it) is kept as is. The others are treated as follows. If ``original_dict['key_dict']`` is an instance of `dict` (or of a subclass of `dict`), a corresponding key of the form ``key_dict<separator><key_in_key_dict>`` will be created in ``original_dict`` with the value of ``original_dict['key_dict']['key_in_key_dict']``. If that value is a subclass of `dict` as well, the same procedure is repeated until the maximum recursion depth is reached. Only string keys are supported.
['Flatten', 'a', 'dict', '.']
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/misc/utils.py#L61-L107
3,561
softlayer/softlayer-python
SoftLayer/managers/vs_placement.py
PlacementManager.get_rule_id_from_name
def get_rule_id_from_name(self, name): """Finds the rule that matches name. SoftLayer_Virtual_PlacementGroup_Rule.getAllObjects doesn't support objectFilters. """ results = self.client.call('SoftLayer_Virtual_PlacementGroup_Rule', 'getAllObjects') return [result['id'] for result in results if result['keyName'] == name.upper()]
python
def get_rule_id_from_name(self, name): """Finds the rule that matches name. SoftLayer_Virtual_PlacementGroup_Rule.getAllObjects doesn't support objectFilters. """ results = self.client.call('SoftLayer_Virtual_PlacementGroup_Rule', 'getAllObjects') return [result['id'] for result in results if result['keyName'] == name.upper()]
['def', 'get_rule_id_from_name', '(', 'self', ',', 'name', ')', ':', 'results', '=', 'self', '.', 'client', '.', 'call', '(', "'SoftLayer_Virtual_PlacementGroup_Rule'", ',', "'getAllObjects'", ')', 'return', '[', 'result', '[', "'id'", ']', 'for', 'result', 'in', 'results', 'if', 'result', '[', "'keyName'", ']', '==', 'name', '.', 'upper', '(', ')', ']']
Finds the rule that matches name. SoftLayer_Virtual_PlacementGroup_Rule.getAllObjects doesn't support objectFilters.
['Finds', 'the', 'rule', 'that', 'matches', 'name', '.']
train
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/vs_placement.py#L94-L100
3,562
pinterest/pymemcache
pymemcache/client/base.py
Client.decr
def decr(self, key, value, noreply=False): """ The memcached "decr" command. Args: key: str, see class docs for details. value: int, the amount by which to increment the value. noreply: optional bool, False to wait for the reply (the default). Returns: If noreply is True, always returns None. Otherwise returns the new value of the key, or None if the key wasn't found. """ key = self.check_key(key) cmd = b'decr ' + key + b' ' + six.text_type(value).encode('ascii') if noreply: cmd += b' noreply' cmd += b'\r\n' results = self._misc_cmd([cmd], b'decr', noreply) if noreply: return None if results[0] == b'NOT_FOUND': return None return int(results[0])
python
def decr(self, key, value, noreply=False): """ The memcached "decr" command. Args: key: str, see class docs for details. value: int, the amount by which to increment the value. noreply: optional bool, False to wait for the reply (the default). Returns: If noreply is True, always returns None. Otherwise returns the new value of the key, or None if the key wasn't found. """ key = self.check_key(key) cmd = b'decr ' + key + b' ' + six.text_type(value).encode('ascii') if noreply: cmd += b' noreply' cmd += b'\r\n' results = self._misc_cmd([cmd], b'decr', noreply) if noreply: return None if results[0] == b'NOT_FOUND': return None return int(results[0])
['def', 'decr', '(', 'self', ',', 'key', ',', 'value', ',', 'noreply', '=', 'False', ')', ':', 'key', '=', 'self', '.', 'check_key', '(', 'key', ')', 'cmd', '=', "b'decr '", '+', 'key', '+', "b' '", '+', 'six', '.', 'text_type', '(', 'value', ')', '.', 'encode', '(', "'ascii'", ')', 'if', 'noreply', ':', 'cmd', '+=', "b' noreply'", 'cmd', '+=', "b'\\r\\n'", 'results', '=', 'self', '.', '_misc_cmd', '(', '[', 'cmd', ']', ',', "b'decr'", ',', 'noreply', ')', 'if', 'noreply', ':', 'return', 'None', 'if', 'results', '[', '0', ']', '==', "b'NOT_FOUND'", ':', 'return', 'None', 'return', 'int', '(', 'results', '[', '0', ']', ')']
The memcached "decr" command. Args: key: str, see class docs for details. value: int, the amount by which to increment the value. noreply: optional bool, False to wait for the reply (the default). Returns: If noreply is True, always returns None. Otherwise returns the new value of the key, or None if the key wasn't found.
['The', 'memcached', 'decr', 'command', '.']
train
https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L585-L608
3,563
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
xmlDoc.setTreeDoc
def setTreeDoc(self, tree): """update all nodes under the tree to point to the right document """ if tree is None: tree__o = None else: tree__o = tree._o libxml2mod.xmlSetTreeDoc(tree__o, self._o)
python
def setTreeDoc(self, tree): """update all nodes under the tree to point to the right document """ if tree is None: tree__o = None else: tree__o = tree._o libxml2mod.xmlSetTreeDoc(tree__o, self._o)
['def', 'setTreeDoc', '(', 'self', ',', 'tree', ')', ':', 'if', 'tree', 'is', 'None', ':', 'tree__o', '=', 'None', 'else', ':', 'tree__o', '=', 'tree', '.', '_o', 'libxml2mod', '.', 'xmlSetTreeDoc', '(', 'tree__o', ',', 'self', '.', '_o', ')']
update all nodes under the tree to point to the right document
['update', 'all', 'nodes', 'under', 'the', 'tree', 'to', 'point', 'to', 'the', 'right', 'document']
train
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L4566-L4571
3,564
pyQode/pyqode.core
pyqode/core/widgets/tabs.py
TabWidget._rename_duplicate_tabs
def _rename_duplicate_tabs(self, current, name, path): """ Rename tabs whose title is the same as the name """ for i in range(self.count()): if self.widget(i)._tab_name == name and self.widget(i) != current: file_path = self.widget(i).file.path if file_path: parent_dir = os.path.split(os.path.abspath( os.path.join(file_path, os.pardir)))[1] new_name = os.path.join(parent_dir, name) self.setTabText(i, new_name) self.widget(i)._tab_name = new_name break if path: parent_dir = os.path.split(os.path.abspath( os.path.join(path, os.pardir)))[1] return os.path.join(parent_dir, name) else: return name
python
def _rename_duplicate_tabs(self, current, name, path): """ Rename tabs whose title is the same as the name """ for i in range(self.count()): if self.widget(i)._tab_name == name and self.widget(i) != current: file_path = self.widget(i).file.path if file_path: parent_dir = os.path.split(os.path.abspath( os.path.join(file_path, os.pardir)))[1] new_name = os.path.join(parent_dir, name) self.setTabText(i, new_name) self.widget(i)._tab_name = new_name break if path: parent_dir = os.path.split(os.path.abspath( os.path.join(path, os.pardir)))[1] return os.path.join(parent_dir, name) else: return name
['def', '_rename_duplicate_tabs', '(', 'self', ',', 'current', ',', 'name', ',', 'path', ')', ':', 'for', 'i', 'in', 'range', '(', 'self', '.', 'count', '(', ')', ')', ':', 'if', 'self', '.', 'widget', '(', 'i', ')', '.', '_tab_name', '==', 'name', 'and', 'self', '.', 'widget', '(', 'i', ')', '!=', 'current', ':', 'file_path', '=', 'self', '.', 'widget', '(', 'i', ')', '.', 'file', '.', 'path', 'if', 'file_path', ':', 'parent_dir', '=', 'os', '.', 'path', '.', 'split', '(', 'os', '.', 'path', '.', 'abspath', '(', 'os', '.', 'path', '.', 'join', '(', 'file_path', ',', 'os', '.', 'pardir', ')', ')', ')', '[', '1', ']', 'new_name', '=', 'os', '.', 'path', '.', 'join', '(', 'parent_dir', ',', 'name', ')', 'self', '.', 'setTabText', '(', 'i', ',', 'new_name', ')', 'self', '.', 'widget', '(', 'i', ')', '.', '_tab_name', '=', 'new_name', 'break', 'if', 'path', ':', 'parent_dir', '=', 'os', '.', 'path', '.', 'split', '(', 'os', '.', 'path', '.', 'abspath', '(', 'os', '.', 'path', '.', 'join', '(', 'path', ',', 'os', '.', 'pardir', ')', ')', ')', '[', '1', ']', 'return', 'os', '.', 'path', '.', 'join', '(', 'parent_dir', ',', 'name', ')', 'else', ':', 'return', 'name']
Rename tabs whose title is the same as the name
['Rename', 'tabs', 'whose', 'title', 'is', 'the', 'same', 'as', 'the', 'name']
train
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/widgets/tabs.py#L310-L329
3,565
saltstack/salt
salt/modules/solaris_user.py
chhome
def chhome(name, home, persist=False): ''' Set a new home directory for an existing user name Username to modify home New home directory to set persist : False Set to ``True`` to prevent configuration files in the new home directory from being overwritten by the files from the skeleton directory. CLI Example: .. code-block:: bash salt '*' user.chhome foo /home/users/foo True ''' pre_info = info(name) if not pre_info: raise CommandExecutionError( 'User \'{0}\' does not exist'.format(name) ) if home == pre_info['home']: return True cmd = ['usermod', '-d', home] if persist: cmd.append('-m') cmd.append(name) __salt__['cmd.run'](cmd, python_shell=False) return info(name).get('home') == home
python
def chhome(name, home, persist=False): ''' Set a new home directory for an existing user name Username to modify home New home directory to set persist : False Set to ``True`` to prevent configuration files in the new home directory from being overwritten by the files from the skeleton directory. CLI Example: .. code-block:: bash salt '*' user.chhome foo /home/users/foo True ''' pre_info = info(name) if not pre_info: raise CommandExecutionError( 'User \'{0}\' does not exist'.format(name) ) if home == pre_info['home']: return True cmd = ['usermod', '-d', home] if persist: cmd.append('-m') cmd.append(name) __salt__['cmd.run'](cmd, python_shell=False) return info(name).get('home') == home
['def', 'chhome', '(', 'name', ',', 'home', ',', 'persist', '=', 'False', ')', ':', 'pre_info', '=', 'info', '(', 'name', ')', 'if', 'not', 'pre_info', ':', 'raise', 'CommandExecutionError', '(', "'User \\'{0}\\' does not exist'", '.', 'format', '(', 'name', ')', ')', 'if', 'home', '==', 'pre_info', '[', "'home'", ']', ':', 'return', 'True', 'cmd', '=', '[', "'usermod'", ',', "'-d'", ',', 'home', ']', 'if', 'persist', ':', 'cmd', '.', 'append', '(', "'-m'", ')', 'cmd', '.', 'append', '(', 'name', ')', '__salt__', '[', "'cmd.run'", ']', '(', 'cmd', ',', 'python_shell', '=', 'False', ')', 'return', 'info', '(', 'name', ')', '.', 'get', '(', "'home'", ')', '==', 'home']
Set a new home directory for an existing user name Username to modify home New home directory to set persist : False Set to ``True`` to prevent configuration files in the new home directory from being overwritten by the files from the skeleton directory. CLI Example: .. code-block:: bash salt '*' user.chhome foo /home/users/foo True
['Set', 'a', 'new', 'home', 'directory', 'for', 'an', 'existing', 'user']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/solaris_user.py#L270-L303
3,566
SBRG/ssbio
ssbio/protein/sequence/utils/fasta.py
load_fasta_file_as_dict_of_seqs
def load_fasta_file_as_dict_of_seqs(filename): """Load a FASTA file and return the sequences as a dict of {ID: sequence string} Args: filename (str): Path to the FASTA file to load Returns: dict: Dictionary of IDs to their sequence strings """ results = {} records = load_fasta_file(filename) for r in records: results[r.id] = str(r.seq) return results
python
def load_fasta_file_as_dict_of_seqs(filename): """Load a FASTA file and return the sequences as a dict of {ID: sequence string} Args: filename (str): Path to the FASTA file to load Returns: dict: Dictionary of IDs to their sequence strings """ results = {} records = load_fasta_file(filename) for r in records: results[r.id] = str(r.seq) return results
['def', 'load_fasta_file_as_dict_of_seqs', '(', 'filename', ')', ':', 'results', '=', '{', '}', 'records', '=', 'load_fasta_file', '(', 'filename', ')', 'for', 'r', 'in', 'records', ':', 'results', '[', 'r', '.', 'id', ']', '=', 'str', '(', 'r', '.', 'seq', ')', 'return', 'results']
Load a FASTA file and return the sequences as a dict of {ID: sequence string} Args: filename (str): Path to the FASTA file to load Returns: dict: Dictionary of IDs to their sequence strings
['Load', 'a', 'FASTA', 'file', 'and', 'return', 'the', 'sequences', 'as', 'a', 'dict', 'of', '{', 'ID', ':', 'sequence', 'string', '}']
train
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/utils/fasta.py#L93-L109
3,567
manns/pyspread
pyspread/src/gui/_toolbars.py
AttributesToolbar.OnUpdate
def OnUpdate(self, event): """Updates the toolbar states""" attributes = event.attr self._update_font(attributes["textfont"]) self._update_pointsize(attributes["pointsize"]) self._update_font_weight(attributes["fontweight"]) self._update_font_style(attributes["fontstyle"]) self._update_frozencell(attributes["frozen"]) self._update_lockedcell(attributes["locked"]) self._update_markupcell(attributes["markup"]) self._update_underline(attributes["underline"]) self._update_strikethrough(attributes["strikethrough"]) self._update_justification(attributes["justification"]) self._update_alignment(attributes["vertical_align"]) self._update_fontcolor(attributes["textcolor"]) self._update_merge(attributes["merge_area"] is not None) self._update_textrotation(attributes["angle"]) self._update_bgbrush(attributes["bgcolor"]) self._update_bordercolor(attributes["bordercolor_bottom"]) self._update_borderwidth(attributes["borderwidth_bottom"]) self.Refresh() event.Skip()
python
def OnUpdate(self, event): """Updates the toolbar states""" attributes = event.attr self._update_font(attributes["textfont"]) self._update_pointsize(attributes["pointsize"]) self._update_font_weight(attributes["fontweight"]) self._update_font_style(attributes["fontstyle"]) self._update_frozencell(attributes["frozen"]) self._update_lockedcell(attributes["locked"]) self._update_markupcell(attributes["markup"]) self._update_underline(attributes["underline"]) self._update_strikethrough(attributes["strikethrough"]) self._update_justification(attributes["justification"]) self._update_alignment(attributes["vertical_align"]) self._update_fontcolor(attributes["textcolor"]) self._update_merge(attributes["merge_area"] is not None) self._update_textrotation(attributes["angle"]) self._update_bgbrush(attributes["bgcolor"]) self._update_bordercolor(attributes["bordercolor_bottom"]) self._update_borderwidth(attributes["borderwidth_bottom"]) self.Refresh() event.Skip()
['def', 'OnUpdate', '(', 'self', ',', 'event', ')', ':', 'attributes', '=', 'event', '.', 'attr', 'self', '.', '_update_font', '(', 'attributes', '[', '"textfont"', ']', ')', 'self', '.', '_update_pointsize', '(', 'attributes', '[', '"pointsize"', ']', ')', 'self', '.', '_update_font_weight', '(', 'attributes', '[', '"fontweight"', ']', ')', 'self', '.', '_update_font_style', '(', 'attributes', '[', '"fontstyle"', ']', ')', 'self', '.', '_update_frozencell', '(', 'attributes', '[', '"frozen"', ']', ')', 'self', '.', '_update_lockedcell', '(', 'attributes', '[', '"locked"', ']', ')', 'self', '.', '_update_markupcell', '(', 'attributes', '[', '"markup"', ']', ')', 'self', '.', '_update_underline', '(', 'attributes', '[', '"underline"', ']', ')', 'self', '.', '_update_strikethrough', '(', 'attributes', '[', '"strikethrough"', ']', ')', 'self', '.', '_update_justification', '(', 'attributes', '[', '"justification"', ']', ')', 'self', '.', '_update_alignment', '(', 'attributes', '[', '"vertical_align"', ']', ')', 'self', '.', '_update_fontcolor', '(', 'attributes', '[', '"textcolor"', ']', ')', 'self', '.', '_update_merge', '(', 'attributes', '[', '"merge_area"', ']', 'is', 'not', 'None', ')', 'self', '.', '_update_textrotation', '(', 'attributes', '[', '"angle"', ']', ')', 'self', '.', '_update_bgbrush', '(', 'attributes', '[', '"bgcolor"', ']', ')', 'self', '.', '_update_bordercolor', '(', 'attributes', '[', '"bordercolor_bottom"', ']', ')', 'self', '.', '_update_borderwidth', '(', 'attributes', '[', '"borderwidth_bottom"', ']', ')', 'self', '.', 'Refresh', '(', ')', 'event', '.', 'Skip', '(', ')']
Updates the toolbar states
['Updates', 'the', 'toolbar', 'states']
train
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_toolbars.py#L948-L973
3,568
odlgroup/odl
odl/discr/discr_ops.py
ResizingOperator.adjoint
def adjoint(self): """Adjoint of this operator.""" if not self.is_linear: raise NotImplementedError('this operator is not linear and ' 'thus has no adjoint') forward_op = self class ResizingOperatorAdjoint(ResizingOperatorBase): """Adjoint of `ResizingOperator`. See `the online documentation <https://odlgroup.github.io/odl/math/resizing_ops.html>`_ on resizing operators for mathematical details. """ def _call(self, x, out): """Implement ``self(x, out)``.""" with writable_array(out) as out_arr: resize_array(x.asarray(), self.range.shape, offset=self.offset, pad_mode=self.pad_mode, pad_const=0, direction='adjoint', out=out_arr) @property def adjoint(self): """Adjoint of the adjoint, i.e. the original operator.""" return forward_op @property def inverse(self): """(Pseudo-)Inverse of this operator. Note that in axes where ``self`` extends, the returned operator acts as a proper inverse, while in restriction axes, the operation is not invertible. """ return ResizingOperatorAdjoint( domain=self.range, range=self.domain, pad_mode=self.pad_mode) return ResizingOperatorAdjoint(domain=self.range, range=self.domain, pad_mode=self.pad_mode)
python
def adjoint(self): """Adjoint of this operator.""" if not self.is_linear: raise NotImplementedError('this operator is not linear and ' 'thus has no adjoint') forward_op = self class ResizingOperatorAdjoint(ResizingOperatorBase): """Adjoint of `ResizingOperator`. See `the online documentation <https://odlgroup.github.io/odl/math/resizing_ops.html>`_ on resizing operators for mathematical details. """ def _call(self, x, out): """Implement ``self(x, out)``.""" with writable_array(out) as out_arr: resize_array(x.asarray(), self.range.shape, offset=self.offset, pad_mode=self.pad_mode, pad_const=0, direction='adjoint', out=out_arr) @property def adjoint(self): """Adjoint of the adjoint, i.e. the original operator.""" return forward_op @property def inverse(self): """(Pseudo-)Inverse of this operator. Note that in axes where ``self`` extends, the returned operator acts as a proper inverse, while in restriction axes, the operation is not invertible. """ return ResizingOperatorAdjoint( domain=self.range, range=self.domain, pad_mode=self.pad_mode) return ResizingOperatorAdjoint(domain=self.range, range=self.domain, pad_mode=self.pad_mode)
['def', 'adjoint', '(', 'self', ')', ':', 'if', 'not', 'self', '.', 'is_linear', ':', 'raise', 'NotImplementedError', '(', "'this operator is not linear and '", "'thus has no adjoint'", ')', 'forward_op', '=', 'self', 'class', 'ResizingOperatorAdjoint', '(', 'ResizingOperatorBase', ')', ':', '"""Adjoint of `ResizingOperator`.\n\n See `the online documentation\n <https://odlgroup.github.io/odl/math/resizing_ops.html>`_\n on resizing operators for mathematical details.\n """', 'def', '_call', '(', 'self', ',', 'x', ',', 'out', ')', ':', '"""Implement ``self(x, out)``."""', 'with', 'writable_array', '(', 'out', ')', 'as', 'out_arr', ':', 'resize_array', '(', 'x', '.', 'asarray', '(', ')', ',', 'self', '.', 'range', '.', 'shape', ',', 'offset', '=', 'self', '.', 'offset', ',', 'pad_mode', '=', 'self', '.', 'pad_mode', ',', 'pad_const', '=', '0', ',', 'direction', '=', "'adjoint'", ',', 'out', '=', 'out_arr', ')', '@', 'property', 'def', 'adjoint', '(', 'self', ')', ':', '"""Adjoint of the adjoint, i.e. the original operator."""', 'return', 'forward_op', '@', 'property', 'def', 'inverse', '(', 'self', ')', ':', '"""(Pseudo-)Inverse of this operator.\n\n Note that in axes where ``self`` extends, the returned operator\n acts as a proper inverse, while in restriction axes, the\n operation is not invertible.\n """', 'return', 'ResizingOperatorAdjoint', '(', 'domain', '=', 'self', '.', 'range', ',', 'range', '=', 'self', '.', 'domain', ',', 'pad_mode', '=', 'self', '.', 'pad_mode', ')', 'return', 'ResizingOperatorAdjoint', '(', 'domain', '=', 'self', '.', 'range', ',', 'range', '=', 'self', '.', 'domain', ',', 'pad_mode', '=', 'self', '.', 'pad_mode', ')']
Adjoint of this operator.
['Adjoint', 'of', 'this', 'operator', '.']
train
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/discr_ops.py#L383-L426
3,569
welbornprod/colr
colr/__main__.py
try_int
def try_int(s, default=None, minimum=None): """ Try parsing a string into an integer. If None is passed, default is returned. On failure, InvalidNumber is raised. """ if not s: return default try: val = int(s) except (TypeError, ValueError): raise InvalidNumber(s) if (minimum is not None) and (val < minimum): val = minimum return val
python
def try_int(s, default=None, minimum=None): """ Try parsing a string into an integer. If None is passed, default is returned. On failure, InvalidNumber is raised. """ if not s: return default try: val = int(s) except (TypeError, ValueError): raise InvalidNumber(s) if (minimum is not None) and (val < minimum): val = minimum return val
['def', 'try_int', '(', 's', ',', 'default', '=', 'None', ',', 'minimum', '=', 'None', ')', ':', 'if', 'not', 's', ':', 'return', 'default', 'try', ':', 'val', '=', 'int', '(', 's', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'InvalidNumber', '(', 's', ')', 'if', '(', 'minimum', 'is', 'not', 'None', ')', 'and', '(', 'val', '<', 'minimum', ')', ':', 'val', '=', 'minimum', 'return', 'val']
Try parsing a string into an integer. If None is passed, default is returned. On failure, InvalidNumber is raised.
['Try', 'parsing', 'a', 'string', 'into', 'an', 'integer', '.', 'If', 'None', 'is', 'passed', 'default', 'is', 'returned', '.', 'On', 'failure', 'InvalidNumber', 'is', 'raised', '.']
train
https://github.com/welbornprod/colr/blob/417117fdbddbc53142096685ac2af006b2bd0220/colr/__main__.py#L512-L525
3,570
LandRegistry/lr-utils
lrutils/errorhandler/errorhandler_utils.py
setup_errors
def setup_errors(app, error_template="error.html"): """Add a handler for each of the available HTTP error responses.""" def error_handler(error): if isinstance(error, HTTPException): description = error.get_description(request.environ) code = error.code name = error.name else: description = error code = 500 name = "Internal Server Error" return render_template(error_template, error=error, code=code, name=Markup(name), description=Markup(description)), code for exception in default_exceptions: app.register_error_handler(exception, error_handler)
python
def setup_errors(app, error_template="error.html"): """Add a handler for each of the available HTTP error responses.""" def error_handler(error): if isinstance(error, HTTPException): description = error.get_description(request.environ) code = error.code name = error.name else: description = error code = 500 name = "Internal Server Error" return render_template(error_template, error=error, code=code, name=Markup(name), description=Markup(description)), code for exception in default_exceptions: app.register_error_handler(exception, error_handler)
['def', 'setup_errors', '(', 'app', ',', 'error_template', '=', '"error.html"', ')', ':', 'def', 'error_handler', '(', 'error', ')', ':', 'if', 'isinstance', '(', 'error', ',', 'HTTPException', ')', ':', 'description', '=', 'error', '.', 'get_description', '(', 'request', '.', 'environ', ')', 'code', '=', 'error', '.', 'code', 'name', '=', 'error', '.', 'name', 'else', ':', 'description', '=', 'error', 'code', '=', '500', 'name', '=', '"Internal Server Error"', 'return', 'render_template', '(', 'error_template', ',', 'error', '=', 'error', ',', 'code', '=', 'code', ',', 'name', '=', 'Markup', '(', 'name', ')', ',', 'description', '=', 'Markup', '(', 'description', ')', ')', ',', 'code', 'for', 'exception', 'in', 'default_exceptions', ':', 'app', '.', 'register_error_handler', '(', 'exception', ',', 'error_handler', ')']
Add a handler for each of the available HTTP error responses.
['Add', 'a', 'handler', 'for', 'each', 'of', 'the', 'available', 'HTTP', 'error', 'responses', '.']
train
https://github.com/LandRegistry/lr-utils/blob/811c9e5c11678a04ee203fa55a7c75080f4f9d89/lrutils/errorhandler/errorhandler_utils.py#L40-L58
3,571
OnroerendErfgoed/crabpy_pyramid
crabpy_pyramid/renderers/crab.py
item_wegobject_adapter
def item_wegobject_adapter(obj, request): """ Adapter for rendering a list of :class:`crabpy.gateway.Wegobject` to json. """ return { 'id': obj.id, 'aard': { 'id': obj.aard.id, 'naam': obj.aard.naam, 'definitie': obj.aard.definitie }, 'centroid': obj.centroid, 'bounding_box': obj.bounding_box, 'metadata': { 'begin_tijd': obj.metadata.begin_tijd, 'begin_datum': obj.metadata.begin_datum, 'begin_bewerking': { 'id': obj.metadata.begin_bewerking.id, 'naam': obj.metadata.begin_bewerking.naam, 'definitie': obj.metadata.begin_bewerking.definitie }, 'begin_organisatie': { 'id': obj.metadata.begin_organisatie.id, 'naam': obj.metadata.begin_organisatie.naam, 'definitie': obj.metadata.begin_organisatie.definitie } } }
python
def item_wegobject_adapter(obj, request): """ Adapter for rendering a list of :class:`crabpy.gateway.Wegobject` to json. """ return { 'id': obj.id, 'aard': { 'id': obj.aard.id, 'naam': obj.aard.naam, 'definitie': obj.aard.definitie }, 'centroid': obj.centroid, 'bounding_box': obj.bounding_box, 'metadata': { 'begin_tijd': obj.metadata.begin_tijd, 'begin_datum': obj.metadata.begin_datum, 'begin_bewerking': { 'id': obj.metadata.begin_bewerking.id, 'naam': obj.metadata.begin_bewerking.naam, 'definitie': obj.metadata.begin_bewerking.definitie }, 'begin_organisatie': { 'id': obj.metadata.begin_organisatie.id, 'naam': obj.metadata.begin_organisatie.naam, 'definitie': obj.metadata.begin_organisatie.definitie } } }
['def', 'item_wegobject_adapter', '(', 'obj', ',', 'request', ')', ':', 'return', '{', "'id'", ':', 'obj', '.', 'id', ',', "'aard'", ':', '{', "'id'", ':', 'obj', '.', 'aard', '.', 'id', ',', "'naam'", ':', 'obj', '.', 'aard', '.', 'naam', ',', "'definitie'", ':', 'obj', '.', 'aard', '.', 'definitie', '}', ',', "'centroid'", ':', 'obj', '.', 'centroid', ',', "'bounding_box'", ':', 'obj', '.', 'bounding_box', ',', "'metadata'", ':', '{', "'begin_tijd'", ':', 'obj', '.', 'metadata', '.', 'begin_tijd', ',', "'begin_datum'", ':', 'obj', '.', 'metadata', '.', 'begin_datum', ',', "'begin_bewerking'", ':', '{', "'id'", ':', 'obj', '.', 'metadata', '.', 'begin_bewerking', '.', 'id', ',', "'naam'", ':', 'obj', '.', 'metadata', '.', 'begin_bewerking', '.', 'naam', ',', "'definitie'", ':', 'obj', '.', 'metadata', '.', 'begin_bewerking', '.', 'definitie', '}', ',', "'begin_organisatie'", ':', '{', "'id'", ':', 'obj', '.', 'metadata', '.', 'begin_organisatie', '.', 'id', ',', "'naam'", ':', 'obj', '.', 'metadata', '.', 'begin_organisatie', '.', 'naam', ',', "'definitie'", ':', 'obj', '.', 'metadata', '.', 'begin_organisatie', '.', 'definitie', '}', '}', '}']
Adapter for rendering a list of :class:`crabpy.gateway.Wegobject` to json.
['Adapter', 'for', 'rendering', 'a', 'list', 'of', ':', 'class', ':', 'crabpy', '.', 'gateway', '.', 'Wegobject', 'to', 'json', '.']
train
https://github.com/OnroerendErfgoed/crabpy_pyramid/blob/b727ea55838d71575db96e987b536a0bac9f6a7a/crabpy_pyramid/renderers/crab.py#L402-L430
3,572
googleapis/google-cloud-python
bigquery/noxfile.py
default
def default(session): """Default unit test session. This is intended to be run **without** an interpreter set, so that the current ``python`` (on the ``PATH``) or the version of Python corresponding to the ``nox`` binary the ``PATH`` can run the tests. """ # Install all test dependencies, then install local packages in-place. session.install("mock", "pytest", "pytest-cov") for local_dep in LOCAL_DEPS: session.install("-e", local_dep) # Pyarrow does not support Python 3.7 dev_install = ".[all]" session.install("-e", dev_install) # IPython does not support Python 2 after version 5.x if session.python == "2.7": session.install("ipython==5.5") else: session.install("ipython") # Run py.test against the unit tests. session.run( "py.test", "--quiet", "--cov=google.cloud.bigquery", "--cov=tests.unit", "--cov-append", "--cov-config=.coveragerc", "--cov-report=", "--cov-fail-under=97", os.path.join("tests", "unit"), *session.posargs )
python
def default(session): """Default unit test session. This is intended to be run **without** an interpreter set, so that the current ``python`` (on the ``PATH``) or the version of Python corresponding to the ``nox`` binary the ``PATH`` can run the tests. """ # Install all test dependencies, then install local packages in-place. session.install("mock", "pytest", "pytest-cov") for local_dep in LOCAL_DEPS: session.install("-e", local_dep) # Pyarrow does not support Python 3.7 dev_install = ".[all]" session.install("-e", dev_install) # IPython does not support Python 2 after version 5.x if session.python == "2.7": session.install("ipython==5.5") else: session.install("ipython") # Run py.test against the unit tests. session.run( "py.test", "--quiet", "--cov=google.cloud.bigquery", "--cov=tests.unit", "--cov-append", "--cov-config=.coveragerc", "--cov-report=", "--cov-fail-under=97", os.path.join("tests", "unit"), *session.posargs )
['def', 'default', '(', 'session', ')', ':', '# Install all test dependencies, then install local packages in-place.', 'session', '.', 'install', '(', '"mock"', ',', '"pytest"', ',', '"pytest-cov"', ')', 'for', 'local_dep', 'in', 'LOCAL_DEPS', ':', 'session', '.', 'install', '(', '"-e"', ',', 'local_dep', ')', '# Pyarrow does not support Python 3.7', 'dev_install', '=', '".[all]"', 'session', '.', 'install', '(', '"-e"', ',', 'dev_install', ')', '# IPython does not support Python 2 after version 5.x', 'if', 'session', '.', 'python', '==', '"2.7"', ':', 'session', '.', 'install', '(', '"ipython==5.5"', ')', 'else', ':', 'session', '.', 'install', '(', '"ipython"', ')', '# Run py.test against the unit tests.', 'session', '.', 'run', '(', '"py.test"', ',', '"--quiet"', ',', '"--cov=google.cloud.bigquery"', ',', '"--cov=tests.unit"', ',', '"--cov-append"', ',', '"--cov-config=.coveragerc"', ',', '"--cov-report="', ',', '"--cov-fail-under=97"', ',', 'os', '.', 'path', '.', 'join', '(', '"tests"', ',', '"unit"', ')', ',', '*', 'session', '.', 'posargs', ')']
Default unit test session. This is intended to be run **without** an interpreter set, so that the current ``python`` (on the ``PATH``) or the version of Python corresponding to the ``nox`` binary the ``PATH`` can run the tests.
['Default', 'unit', 'test', 'session', '.']
train
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/noxfile.py#L28-L63
3,573
heitzmann/gdspy
gdspy/__init__.py
Cell.get_labels
def get_labels(self, depth=None): """ Returns a list with a copy of the labels in this cell. Parameters ---------- depth : integer or ``None`` If not ``None``, defines from how many reference levels to retrieve labels from. Returns ------- out : list of ``Label`` List containing the labels in this cell and its references. """ labels = libCopy.deepcopy(self.labels) if depth is None or depth > 0: for element in self.elements: if isinstance(element, CellReference): labels.extend( element.get_labels(None if depth is None else depth - 1)) elif isinstance(element, CellArray): labels.extend( element.get_labels(None if depth is None else depth - 1)) return labels
python
def get_labels(self, depth=None): """ Returns a list with a copy of the labels in this cell. Parameters ---------- depth : integer or ``None`` If not ``None``, defines from how many reference levels to retrieve labels from. Returns ------- out : list of ``Label`` List containing the labels in this cell and its references. """ labels = libCopy.deepcopy(self.labels) if depth is None or depth > 0: for element in self.elements: if isinstance(element, CellReference): labels.extend( element.get_labels(None if depth is None else depth - 1)) elif isinstance(element, CellArray): labels.extend( element.get_labels(None if depth is None else depth - 1)) return labels
['def', 'get_labels', '(', 'self', ',', 'depth', '=', 'None', ')', ':', 'labels', '=', 'libCopy', '.', 'deepcopy', '(', 'self', '.', 'labels', ')', 'if', 'depth', 'is', 'None', 'or', 'depth', '>', '0', ':', 'for', 'element', 'in', 'self', '.', 'elements', ':', 'if', 'isinstance', '(', 'element', ',', 'CellReference', ')', ':', 'labels', '.', 'extend', '(', 'element', '.', 'get_labels', '(', 'None', 'if', 'depth', 'is', 'None', 'else', 'depth', '-', '1', ')', ')', 'elif', 'isinstance', '(', 'element', ',', 'CellArray', ')', ':', 'labels', '.', 'extend', '(', 'element', '.', 'get_labels', '(', 'None', 'if', 'depth', 'is', 'None', 'else', 'depth', '-', '1', ')', ')', 'return', 'labels']
Returns a list with a copy of the labels in this cell. Parameters ---------- depth : integer or ``None`` If not ``None``, defines from how many reference levels to retrieve labels from. Returns ------- out : list of ``Label`` List containing the labels in this cell and its references.
['Returns', 'a', 'list', 'with', 'a', 'copy', 'of', 'the', 'labels', 'in', 'this', 'cell', '.']
train
https://github.com/heitzmann/gdspy/blob/2c8d1313248c544e2066d19095b7ad7158c79bc9/gdspy/__init__.py#L2499-L2525
3,574
cmollet/sridentify
sridentify/__init__.py
Sridentify.from_epsg
def from_epsg(self, epsg_code): """ Loads self.prj by epsg_code. If prjtext not found returns False. """ self.epsg_code = epsg_code assert isinstance(self.epsg_code, int) cur = self.conn.cursor() cur.execute("SELECT prjtext FROM prj_epsg WHERE epsg_code = ?", (self.epsg_code,)) result = cur.fetchone() if result is not None: self.prj = result[0] return True return False
python
def from_epsg(self, epsg_code): """ Loads self.prj by epsg_code. If prjtext not found returns False. """ self.epsg_code = epsg_code assert isinstance(self.epsg_code, int) cur = self.conn.cursor() cur.execute("SELECT prjtext FROM prj_epsg WHERE epsg_code = ?", (self.epsg_code,)) result = cur.fetchone() if result is not None: self.prj = result[0] return True return False
['def', 'from_epsg', '(', 'self', ',', 'epsg_code', ')', ':', 'self', '.', 'epsg_code', '=', 'epsg_code', 'assert', 'isinstance', '(', 'self', '.', 'epsg_code', ',', 'int', ')', 'cur', '=', 'self', '.', 'conn', '.', 'cursor', '(', ')', 'cur', '.', 'execute', '(', '"SELECT prjtext FROM prj_epsg WHERE epsg_code = ?"', ',', '(', 'self', '.', 'epsg_code', ',', ')', ')', 'result', '=', 'cur', '.', 'fetchone', '(', ')', 'if', 'result', 'is', 'not', 'None', ':', 'self', '.', 'prj', '=', 'result', '[', '0', ']', 'return', 'True', 'return', 'False']
Loads self.prj by epsg_code. If prjtext not found returns False.
['Loads', 'self', '.', 'prj', 'by', 'epsg_code', '.', 'If', 'prjtext', 'not', 'found', 'returns', 'False', '.']
train
https://github.com/cmollet/sridentify/blob/77248bd1e474f014ac8951dacd196fd3417c452c/sridentify/__init__.py#L172-L186
3,575
dpa-newslab/livebridge
livebridge/controller.py
Controller.stop_bridges
async def stop_bridges(self): """Stop all sleep tasks to allow bridges to end.""" for task in self.sleep_tasks: task.cancel() for bridge in self.bridges: bridge.stop()
python
async def stop_bridges(self): """Stop all sleep tasks to allow bridges to end.""" for task in self.sleep_tasks: task.cancel() for bridge in self.bridges: bridge.stop()
['async', 'def', 'stop_bridges', '(', 'self', ')', ':', 'for', 'task', 'in', 'self', '.', 'sleep_tasks', ':', 'task', '.', 'cancel', '(', ')', 'for', 'bridge', 'in', 'self', '.', 'bridges', ':', 'bridge', '.', 'stop', '(', ')']
Stop all sleep tasks to allow bridges to end.
['Stop', 'all', 'sleep', 'tasks', 'to', 'allow', 'bridges', 'to', 'end', '.']
train
https://github.com/dpa-newslab/livebridge/blob/d930e887faa2f882d15b574f0f1fe4a580d7c5fa/livebridge/controller.py#L50-L55
3,576
PSU-OIT-ARC/django-local-settings
local_settings/strategy.py
get_strategy_types
def get_strategy_types(): """Get a list of all :class:`Strategy` subclasses.""" def get_subtypes(type_): subtypes = type_.__subclasses__() for subtype in subtypes: subtypes.extend(get_subtypes(subtype)) return subtypes return get_subtypes(Strategy)
python
def get_strategy_types(): """Get a list of all :class:`Strategy` subclasses.""" def get_subtypes(type_): subtypes = type_.__subclasses__() for subtype in subtypes: subtypes.extend(get_subtypes(subtype)) return subtypes return get_subtypes(Strategy)
['def', 'get_strategy_types', '(', ')', ':', 'def', 'get_subtypes', '(', 'type_', ')', ':', 'subtypes', '=', 'type_', '.', '__subclasses__', '(', ')', 'for', 'subtype', 'in', 'subtypes', ':', 'subtypes', '.', 'extend', '(', 'get_subtypes', '(', 'subtype', ')', ')', 'return', 'subtypes', 'return', 'get_subtypes', '(', 'Strategy', ')']
Get a list of all :class:`Strategy` subclasses.
['Get', 'a', 'list', 'of', 'all', ':', 'class', ':', 'Strategy', 'subclasses', '.']
train
https://github.com/PSU-OIT-ARC/django-local-settings/blob/758810fbd9411c2046a187afcac6532155cac694/local_settings/strategy.py#L209-L216
3,577
IndicoDataSolutions/IndicoIo-python
indicoio/text/personas.py
personas
def personas(text, cloud=None, batch=False, api_key=None, version=None, **kwargs): """ Given input text, returns the authors likelihood of being 16 different personality types in a dict. Example usage: .. code-block:: python >>> text = "I love going out with my friends" >>> entities = indicoio.personas(text) {'architect': 0.2191890478134155, 'logician': 0.0158474326133728, 'commander': 0.07654544115066528 ...} :param text: The text to be analyzed. :type text: str or unicode :rtype: The authors 'Extraversion', 'Conscientiousness', 'Openness', and 'Agreeableness' score (a float between 0 and 1) in a dictionary. """ url_params = {"batch": batch, "api_key": api_key, "version": version} kwargs['persona'] = True return api_handler(text, cloud=cloud, api="personality", url_params=url_params, **kwargs)
python
def personas(text, cloud=None, batch=False, api_key=None, version=None, **kwargs): """ Given input text, returns the authors likelihood of being 16 different personality types in a dict. Example usage: .. code-block:: python >>> text = "I love going out with my friends" >>> entities = indicoio.personas(text) {'architect': 0.2191890478134155, 'logician': 0.0158474326133728, 'commander': 0.07654544115066528 ...} :param text: The text to be analyzed. :type text: str or unicode :rtype: The authors 'Extraversion', 'Conscientiousness', 'Openness', and 'Agreeableness' score (a float between 0 and 1) in a dictionary. """ url_params = {"batch": batch, "api_key": api_key, "version": version} kwargs['persona'] = True return api_handler(text, cloud=cloud, api="personality", url_params=url_params, **kwargs)
['def', 'personas', '(', 'text', ',', 'cloud', '=', 'None', ',', 'batch', '=', 'False', ',', 'api_key', '=', 'None', ',', 'version', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'url_params', '=', '{', '"batch"', ':', 'batch', ',', '"api_key"', ':', 'api_key', ',', '"version"', ':', 'version', '}', 'kwargs', '[', "'persona'", ']', '=', 'True', 'return', 'api_handler', '(', 'text', ',', 'cloud', '=', 'cloud', ',', 'api', '=', '"personality"', ',', 'url_params', '=', 'url_params', ',', '*', '*', 'kwargs', ')']
Given input text, returns the authors likelihood of being 16 different personality types in a dict. Example usage: .. code-block:: python >>> text = "I love going out with my friends" >>> entities = indicoio.personas(text) {'architect': 0.2191890478134155, 'logician': 0.0158474326133728, 'commander': 0.07654544115066528 ...} :param text: The text to be analyzed. :type text: str or unicode :rtype: The authors 'Extraversion', 'Conscientiousness', 'Openness', and 'Agreeableness' score (a float between 0 and 1) in a dictionary.
['Given', 'input', 'text', 'returns', 'the', 'authors', 'likelihood', 'of', 'being', '16', 'different', 'personality', 'types', 'in', 'a', 'dict', '.']
train
https://github.com/IndicoDataSolutions/IndicoIo-python/blob/6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa/indicoio/text/personas.py#L6-L27
3,578
praekeltfoundation/seaworthy
seaworthy/stream/matchers.py
OrderedMatcher.args_str
def args_str(self): """ Return an args string for the repr. """ matched = [str(m) for m in self._matchers[:self._position]] unmatched = [str(m) for m in self._matchers[self._position:]] return 'matched=[{}], unmatched=[{}]'.format( ', '.join(matched), ', '.join(unmatched))
python
def args_str(self): """ Return an args string for the repr. """ matched = [str(m) for m in self._matchers[:self._position]] unmatched = [str(m) for m in self._matchers[self._position:]] return 'matched=[{}], unmatched=[{}]'.format( ', '.join(matched), ', '.join(unmatched))
['def', 'args_str', '(', 'self', ')', ':', 'matched', '=', '[', 'str', '(', 'm', ')', 'for', 'm', 'in', 'self', '.', '_matchers', '[', ':', 'self', '.', '_position', ']', ']', 'unmatched', '=', '[', 'str', '(', 'm', ')', 'for', 'm', 'in', 'self', '.', '_matchers', '[', 'self', '.', '_position', ':', ']', ']', 'return', "'matched=[{}], unmatched=[{}]'", '.', 'format', '(', "', '", '.', 'join', '(', 'matched', ')', ',', "', '", '.', 'join', '(', 'unmatched', ')', ')']
Return an args string for the repr.
['Return', 'an', 'args', 'string', 'for', 'the', 'repr', '.']
train
https://github.com/praekeltfoundation/seaworthy/blob/6f10a19b45d4ea1dc3bd0553cc4d0438696c079c/seaworthy/stream/matchers.py#L99-L106
3,579
facebook/pyre-check
client/buck.py
SimpleBuckBuilder.build
def build(self, targets: Iterable[str]) -> Iterable[str]: """ Shell out to buck to build the targets, then yield the paths to the link trees. """ return generate_source_directories( targets, build=self._build, prompt=self._prompt )
python
def build(self, targets: Iterable[str]) -> Iterable[str]: """ Shell out to buck to build the targets, then yield the paths to the link trees. """ return generate_source_directories( targets, build=self._build, prompt=self._prompt )
['def', 'build', '(', 'self', ',', 'targets', ':', 'Iterable', '[', 'str', ']', ')', '->', 'Iterable', '[', 'str', ']', ':', 'return', 'generate_source_directories', '(', 'targets', ',', 'build', '=', 'self', '.', '_build', ',', 'prompt', '=', 'self', '.', '_prompt', ')']
Shell out to buck to build the targets, then yield the paths to the link trees.
['Shell', 'out', 'to', 'buck', 'to', 'build', 'the', 'targets', 'then', 'yield', 'the', 'paths', 'to', 'the', 'link', 'trees', '.']
train
https://github.com/facebook/pyre-check/blob/4a9604d943d28ef20238505a51acfb1f666328d7/client/buck.py#L32-L39
3,580
THLO/map
map/mapper.py
MapInputHandler.createList
def createList(self,args): """ This is an internal method to create the list of input files (or directories) contained in the provided directory or directories. """ resultList = [] if len(args.path) == 1 and os.path.isdir(args.path[0]): resultList = [os.path.join(args.path[0], f) for f in os.listdir(args.path[0])] else: # If there are multiple items, wildcard expansion has already created the list of files resultList = args.path return list(set(resultList))
python
def createList(self,args): """ This is an internal method to create the list of input files (or directories) contained in the provided directory or directories. """ resultList = [] if len(args.path) == 1 and os.path.isdir(args.path[0]): resultList = [os.path.join(args.path[0], f) for f in os.listdir(args.path[0])] else: # If there are multiple items, wildcard expansion has already created the list of files resultList = args.path return list(set(resultList))
['def', 'createList', '(', 'self', ',', 'args', ')', ':', 'resultList', '=', '[', ']', 'if', 'len', '(', 'args', '.', 'path', ')', '==', '1', 'and', 'os', '.', 'path', '.', 'isdir', '(', 'args', '.', 'path', '[', '0', ']', ')', ':', 'resultList', '=', '[', 'os', '.', 'path', '.', 'join', '(', 'args', '.', 'path', '[', '0', ']', ',', 'f', ')', 'for', 'f', 'in', 'os', '.', 'listdir', '(', 'args', '.', 'path', '[', '0', ']', ')', ']', 'else', ':', '# If there are multiple items, wildcard expansion has already created the list of files', 'resultList', '=', 'args', '.', 'path', 'return', 'list', '(', 'set', '(', 'resultList', ')', ')']
This is an internal method to create the list of input files (or directories) contained in the provided directory or directories.
['This', 'is', 'an', 'internal', 'method', 'to', 'create', 'the', 'list', 'of', 'input', 'files', '(', 'or', 'directories', ')', 'contained', 'in', 'the', 'provided', 'directory', 'or', 'directories', '.']
train
https://github.com/THLO/map/blob/6c1571187662bbf2e66faaf96b11a3e151ed4c87/map/mapper.py#L81-L91
3,581
mitsei/dlkit
dlkit/json_/assessment/objects.py
AssessmentForm.set_rubric
def set_rubric(self, assessment_id): """Sets the rubric expressed as another assessment. arg: assessment_id (osid.id.Id): the assessment ``Id`` raise: InvalidArgument - ``assessment_id`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``assessment_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.set_avatar_template if self.get_rubric_metadata().is_read_only(): raise errors.NoAccess() if not self._is_valid_id(assessment_id): raise errors.InvalidArgument() self._my_map['rubricId'] = str(assessment_id)
python
def set_rubric(self, assessment_id): """Sets the rubric expressed as another assessment. arg: assessment_id (osid.id.Id): the assessment ``Id`` raise: InvalidArgument - ``assessment_id`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``assessment_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.set_avatar_template if self.get_rubric_metadata().is_read_only(): raise errors.NoAccess() if not self._is_valid_id(assessment_id): raise errors.InvalidArgument() self._my_map['rubricId'] = str(assessment_id)
['def', 'set_rubric', '(', 'self', ',', 'assessment_id', ')', ':', '# Implemented from template for osid.resource.ResourceForm.set_avatar_template', 'if', 'self', '.', 'get_rubric_metadata', '(', ')', '.', 'is_read_only', '(', ')', ':', 'raise', 'errors', '.', 'NoAccess', '(', ')', 'if', 'not', 'self', '.', '_is_valid_id', '(', 'assessment_id', ')', ':', 'raise', 'errors', '.', 'InvalidArgument', '(', ')', 'self', '.', '_my_map', '[', "'rubricId'", ']', '=', 'str', '(', 'assessment_id', ')']
Sets the rubric expressed as another assessment. arg: assessment_id (osid.id.Id): the assessment ``Id`` raise: InvalidArgument - ``assessment_id`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``assessment_id`` is ``null`` *compliance: mandatory -- This method must be implemented.*
['Sets', 'the', 'rubric', 'expressed', 'as', 'another', 'assessment', '.']
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/objects.py#L1097-L1112
3,582
samghelms/mathviz
mathviz_hopper/src/bottle.py
Route.get_undecorated_callback
def get_undecorated_callback(self): """ Return the callback. If the callback is a decorated function, try to recover the original function. """ func = self.callback func = getattr(func, '__func__' if py3k else 'im_func', func) closure_attr = '__closure__' if py3k else 'func_closure' while hasattr(func, closure_attr) and getattr(func, closure_attr): attributes = getattr(func, closure_attr) func = attributes[0].cell_contents # in case of decorators with multiple arguments if not isinstance(func, FunctionType): # pick first FunctionType instance from multiple arguments func = filter(lambda x: isinstance(x, FunctionType), map(lambda x: x.cell_contents, attributes)) func = list(func)[0] # py3 support return func
python
def get_undecorated_callback(self): """ Return the callback. If the callback is a decorated function, try to recover the original function. """ func = self.callback func = getattr(func, '__func__' if py3k else 'im_func', func) closure_attr = '__closure__' if py3k else 'func_closure' while hasattr(func, closure_attr) and getattr(func, closure_attr): attributes = getattr(func, closure_attr) func = attributes[0].cell_contents # in case of decorators with multiple arguments if not isinstance(func, FunctionType): # pick first FunctionType instance from multiple arguments func = filter(lambda x: isinstance(x, FunctionType), map(lambda x: x.cell_contents, attributes)) func = list(func)[0] # py3 support return func
['def', 'get_undecorated_callback', '(', 'self', ')', ':', 'func', '=', 'self', '.', 'callback', 'func', '=', 'getattr', '(', 'func', ',', "'__func__'", 'if', 'py3k', 'else', "'im_func'", ',', 'func', ')', 'closure_attr', '=', "'__closure__'", 'if', 'py3k', 'else', "'func_closure'", 'while', 'hasattr', '(', 'func', ',', 'closure_attr', ')', 'and', 'getattr', '(', 'func', ',', 'closure_attr', ')', ':', 'attributes', '=', 'getattr', '(', 'func', ',', 'closure_attr', ')', 'func', '=', 'attributes', '[', '0', ']', '.', 'cell_contents', '# in case of decorators with multiple arguments', 'if', 'not', 'isinstance', '(', 'func', ',', 'FunctionType', ')', ':', '# pick first FunctionType instance from multiple arguments', 'func', '=', 'filter', '(', 'lambda', 'x', ':', 'isinstance', '(', 'x', ',', 'FunctionType', ')', ',', 'map', '(', 'lambda', 'x', ':', 'x', '.', 'cell_contents', ',', 'attributes', ')', ')', 'func', '=', 'list', '(', 'func', ')', '[', '0', ']', '# py3 support', 'return', 'func']
Return the callback. If the callback is a decorated function, try to recover the original function.
['Return', 'the', 'callback', '.', 'If', 'the', 'callback', 'is', 'a', 'decorated', 'function', 'try', 'to', 'recover', 'the', 'original', 'function', '.']
train
https://github.com/samghelms/mathviz/blob/30fe89537379faea4de8c8b568ac6e52e4d15353/mathviz_hopper/src/bottle.py#L581-L597
3,583
proycon/pynlpl
pynlpl/formats/folia.py
AbstractSpanAnnotation.xml
def xml(self, attribs = None,elements = None, skipchildren = False): """See :meth:`AbstractElement.xml`""" if not attribs: attribs = {} E = ElementMaker(namespace="http://ilk.uvt.nl/folia",nsmap={None: "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace"}) e = super(AbstractSpanAnnotation,self).xml(attribs, elements, True) for child in self: if isinstance(child, (Word, Morpheme, Phoneme)): #Include REFERENCES to word items instead of word items themselves attribs['{' + NSFOLIA + '}id'] = child.id if child.PRINTABLE and child.hastext(self.textclass): attribs['{' + NSFOLIA + '}t'] = child.text(self.textclass) e.append( E.wref(**attribs) ) elif not (isinstance(child, Feature) and child.SUBSET): #Don't add pre-defined features, they are already added as attributes e.append( child.xml() ) return e
python
def xml(self, attribs = None,elements = None, skipchildren = False): """See :meth:`AbstractElement.xml`""" if not attribs: attribs = {} E = ElementMaker(namespace="http://ilk.uvt.nl/folia",nsmap={None: "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace"}) e = super(AbstractSpanAnnotation,self).xml(attribs, elements, True) for child in self: if isinstance(child, (Word, Morpheme, Phoneme)): #Include REFERENCES to word items instead of word items themselves attribs['{' + NSFOLIA + '}id'] = child.id if child.PRINTABLE and child.hastext(self.textclass): attribs['{' + NSFOLIA + '}t'] = child.text(self.textclass) e.append( E.wref(**attribs) ) elif not (isinstance(child, Feature) and child.SUBSET): #Don't add pre-defined features, they are already added as attributes e.append( child.xml() ) return e
['def', 'xml', '(', 'self', ',', 'attribs', '=', 'None', ',', 'elements', '=', 'None', ',', 'skipchildren', '=', 'False', ')', ':', 'if', 'not', 'attribs', ':', 'attribs', '=', '{', '}', 'E', '=', 'ElementMaker', '(', 'namespace', '=', '"http://ilk.uvt.nl/folia"', ',', 'nsmap', '=', '{', 'None', ':', '"http://ilk.uvt.nl/folia"', ',', "'xml'", ':', '"http://www.w3.org/XML/1998/namespace"', '}', ')', 'e', '=', 'super', '(', 'AbstractSpanAnnotation', ',', 'self', ')', '.', 'xml', '(', 'attribs', ',', 'elements', ',', 'True', ')', 'for', 'child', 'in', 'self', ':', 'if', 'isinstance', '(', 'child', ',', '(', 'Word', ',', 'Morpheme', ',', 'Phoneme', ')', ')', ':', '#Include REFERENCES to word items instead of word items themselves', 'attribs', '[', "'{'", '+', 'NSFOLIA', '+', "'}id'", ']', '=', 'child', '.', 'id', 'if', 'child', '.', 'PRINTABLE', 'and', 'child', '.', 'hastext', '(', 'self', '.', 'textclass', ')', ':', 'attribs', '[', "'{'", '+', 'NSFOLIA', '+', "'}t'", ']', '=', 'child', '.', 'text', '(', 'self', '.', 'textclass', ')', 'e', '.', 'append', '(', 'E', '.', 'wref', '(', '*', '*', 'attribs', ')', ')', 'elif', 'not', '(', 'isinstance', '(', 'child', ',', 'Feature', ')', 'and', 'child', '.', 'SUBSET', ')', ':', "#Don't add pre-defined features, they are already added as attributes", 'e', '.', 'append', '(', 'child', '.', 'xml', '(', ')', ')', 'return', 'e']
See :meth:`AbstractElement.xml`
['See', ':', 'meth', ':', 'AbstractElement', '.', 'xml']
train
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L4320-L4334
3,584
michael-lazar/rtv
rtv/theme.py
ThemeList._step
def _step(self, theme, direction): """ Traverse the list in the given direction and return the next theme """ if not self.themes: self.reload() # Try to find the starting index key = (theme.source, theme.name) for i, val in enumerate(self.themes): if (val.source, val.name) == key: index = i break else: # If the theme was set from a custom source it might # not be a part of the list returned by list_themes(). self.themes.insert(0, theme) index = 0 index = (index + direction) % len(self.themes) new_theme = self.themes[index] return new_theme
python
def _step(self, theme, direction): """ Traverse the list in the given direction and return the next theme """ if not self.themes: self.reload() # Try to find the starting index key = (theme.source, theme.name) for i, val in enumerate(self.themes): if (val.source, val.name) == key: index = i break else: # If the theme was set from a custom source it might # not be a part of the list returned by list_themes(). self.themes.insert(0, theme) index = 0 index = (index + direction) % len(self.themes) new_theme = self.themes[index] return new_theme
['def', '_step', '(', 'self', ',', 'theme', ',', 'direction', ')', ':', 'if', 'not', 'self', '.', 'themes', ':', 'self', '.', 'reload', '(', ')', '# Try to find the starting index', 'key', '=', '(', 'theme', '.', 'source', ',', 'theme', '.', 'name', ')', 'for', 'i', ',', 'val', 'in', 'enumerate', '(', 'self', '.', 'themes', ')', ':', 'if', '(', 'val', '.', 'source', ',', 'val', '.', 'name', ')', '==', 'key', ':', 'index', '=', 'i', 'break', 'else', ':', '# If the theme was set from a custom source it might', '# not be a part of the list returned by list_themes().', 'self', '.', 'themes', '.', 'insert', '(', '0', ',', 'theme', ')', 'index', '=', '0', 'index', '=', '(', 'index', '+', 'direction', ')', '%', 'len', '(', 'self', '.', 'themes', ')', 'new_theme', '=', 'self', '.', 'themes', '[', 'index', ']', 'return', 'new_theme']
Traverse the list in the given direction and return the next theme
['Traverse', 'the', 'list', 'in', 'the', 'given', 'direction', 'and', 'return', 'the', 'next', 'theme']
train
https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/theme.py#L540-L561
3,585
davidhuser/dhis2.py
dhis2/api.py
Api.post
def post(self, endpoint, json=None, params=None, **kwargs): """POST to DHIS2 :param endpoint: DHIS2 API endpoint :param json: HTTP payload :param params: HTTP parameters :return: requests.Response object """ json = kwargs['data'] if 'data' in kwargs else json return self._make_request('post', endpoint, data=json, params=params)
python
def post(self, endpoint, json=None, params=None, **kwargs): """POST to DHIS2 :param endpoint: DHIS2 API endpoint :param json: HTTP payload :param params: HTTP parameters :return: requests.Response object """ json = kwargs['data'] if 'data' in kwargs else json return self._make_request('post', endpoint, data=json, params=params)
['def', 'post', '(', 'self', ',', 'endpoint', ',', 'json', '=', 'None', ',', 'params', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'json', '=', 'kwargs', '[', "'data'", ']', 'if', "'data'", 'in', 'kwargs', 'else', 'json', 'return', 'self', '.', '_make_request', '(', "'post'", ',', 'endpoint', ',', 'data', '=', 'json', ',', 'params', '=', 'params', ')']
POST to DHIS2 :param endpoint: DHIS2 API endpoint :param json: HTTP payload :param params: HTTP parameters :return: requests.Response object
['POST', 'to', 'DHIS2', ':', 'param', 'endpoint', ':', 'DHIS2', 'API', 'endpoint', ':', 'param', 'json', ':', 'HTTP', 'payload', ':', 'param', 'params', ':', 'HTTP', 'parameters', ':', 'return', ':', 'requests', '.', 'Response', 'object']
train
https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/api.py#L253-L261
3,586
sffjunkie/astral
src/astral.py
Location.tz
def tz(self): """Time zone information.""" if self.timezone is None: return None try: tz = pytz.timezone(self.timezone) return tz except pytz.UnknownTimeZoneError: raise AstralError("Unknown timezone '%s'" % self.timezone)
python
def tz(self): """Time zone information.""" if self.timezone is None: return None try: tz = pytz.timezone(self.timezone) return tz except pytz.UnknownTimeZoneError: raise AstralError("Unknown timezone '%s'" % self.timezone)
['def', 'tz', '(', 'self', ')', ':', 'if', 'self', '.', 'timezone', 'is', 'None', ':', 'return', 'None', 'try', ':', 'tz', '=', 'pytz', '.', 'timezone', '(', 'self', '.', 'timezone', ')', 'return', 'tz', 'except', 'pytz', '.', 'UnknownTimeZoneError', ':', 'raise', 'AstralError', '(', '"Unknown timezone \'%s\'"', '%', 'self', '.', 'timezone', ')']
Time zone information.
['Time', 'zone', 'information', '.']
train
https://github.com/sffjunkie/astral/blob/b0aa63fce692357cd33c2bf36c69ed5b6582440c/src/astral.py#L716-L726
3,587
cmbruns/pyopenvr
src/samples/sdl/NOTWORKING_hellovr_opengl_sdl.py
CMainApplication.compileGLShader
def compileGLShader(self, pchShaderName, pchVertexShader, pchFragmentShader): """ Purpose: Compiles a GL shader program and returns the handle. Returns 0 if the shader couldn't be compiled for some reason. """ unProgramID = glCreateProgram() nSceneVertexShader = glCreateShader(GL_VERTEX_SHADER) glShaderSource( nSceneVertexShader, pchVertexShader) glCompileShader( nSceneVertexShader ) vShaderCompiled = glGetShaderiv( nSceneVertexShader, GL_COMPILE_STATUS) if not vShaderCompiled: dprintf("%s - Unable to compile vertex shader %d!\n" % (pchShaderName, nSceneVertexShader) ) glDeleteProgram( unProgramID ) glDeleteShader( nSceneVertexShader ) return 0 glAttachShader( unProgramID, nSceneVertexShader) glDeleteShader( nSceneVertexShader ) # the program hangs onto this once it's attached nSceneFragmentShader = glCreateShader(GL_FRAGMENT_SHADER) glShaderSource( nSceneFragmentShader, pchFragmentShader) glCompileShader( nSceneFragmentShader ) fShaderCompiled = glGetShaderiv( nSceneFragmentShader, GL_COMPILE_STATUS) if not fShaderCompiled: dprintf("%s - Unable to compile fragment shader %d!\n" % ( pchShaderName, nSceneFragmentShader) ) glDeleteProgram( unProgramID ) glDeleteShader( nSceneFragmentShader ) return 0 glAttachShader( unProgramID, nSceneFragmentShader ) glDeleteShader( nSceneFragmentShader ) # the program hangs onto this once it's attached glLinkProgram( unProgramID ) programSuccess = glGetProgramiv( unProgramID, GL_LINK_STATUS) if not programSuccess: dprintf("%s - Error linking program %d!\n" % (pchShaderName, unProgramID) ) glDeleteProgram( unProgramID ) return 0 glUseProgram( unProgramID ) glUseProgram( 0 ) return unProgramID
python
def compileGLShader(self, pchShaderName, pchVertexShader, pchFragmentShader): """ Purpose: Compiles a GL shader program and returns the handle. Returns 0 if the shader couldn't be compiled for some reason. """ unProgramID = glCreateProgram() nSceneVertexShader = glCreateShader(GL_VERTEX_SHADER) glShaderSource( nSceneVertexShader, pchVertexShader) glCompileShader( nSceneVertexShader ) vShaderCompiled = glGetShaderiv( nSceneVertexShader, GL_COMPILE_STATUS) if not vShaderCompiled: dprintf("%s - Unable to compile vertex shader %d!\n" % (pchShaderName, nSceneVertexShader) ) glDeleteProgram( unProgramID ) glDeleteShader( nSceneVertexShader ) return 0 glAttachShader( unProgramID, nSceneVertexShader) glDeleteShader( nSceneVertexShader ) # the program hangs onto this once it's attached nSceneFragmentShader = glCreateShader(GL_FRAGMENT_SHADER) glShaderSource( nSceneFragmentShader, pchFragmentShader) glCompileShader( nSceneFragmentShader ) fShaderCompiled = glGetShaderiv( nSceneFragmentShader, GL_COMPILE_STATUS) if not fShaderCompiled: dprintf("%s - Unable to compile fragment shader %d!\n" % ( pchShaderName, nSceneFragmentShader) ) glDeleteProgram( unProgramID ) glDeleteShader( nSceneFragmentShader ) return 0 glAttachShader( unProgramID, nSceneFragmentShader ) glDeleteShader( nSceneFragmentShader ) # the program hangs onto this once it's attached glLinkProgram( unProgramID ) programSuccess = glGetProgramiv( unProgramID, GL_LINK_STATUS) if not programSuccess: dprintf("%s - Error linking program %d!\n" % (pchShaderName, unProgramID) ) glDeleteProgram( unProgramID ) return 0 glUseProgram( unProgramID ) glUseProgram( 0 ) return unProgramID
['def', 'compileGLShader', '(', 'self', ',', 'pchShaderName', ',', 'pchVertexShader', ',', 'pchFragmentShader', ')', ':', 'unProgramID', '=', 'glCreateProgram', '(', ')', 'nSceneVertexShader', '=', 'glCreateShader', '(', 'GL_VERTEX_SHADER', ')', 'glShaderSource', '(', 'nSceneVertexShader', ',', 'pchVertexShader', ')', 'glCompileShader', '(', 'nSceneVertexShader', ')', 'vShaderCompiled', '=', 'glGetShaderiv', '(', 'nSceneVertexShader', ',', 'GL_COMPILE_STATUS', ')', 'if', 'not', 'vShaderCompiled', ':', 'dprintf', '(', '"%s - Unable to compile vertex shader %d!\\n"', '%', '(', 'pchShaderName', ',', 'nSceneVertexShader', ')', ')', 'glDeleteProgram', '(', 'unProgramID', ')', 'glDeleteShader', '(', 'nSceneVertexShader', ')', 'return', '0', 'glAttachShader', '(', 'unProgramID', ',', 'nSceneVertexShader', ')', 'glDeleteShader', '(', 'nSceneVertexShader', ')', "# the program hangs onto this once it's attached\r", 'nSceneFragmentShader', '=', 'glCreateShader', '(', 'GL_FRAGMENT_SHADER', ')', 'glShaderSource', '(', 'nSceneFragmentShader', ',', 'pchFragmentShader', ')', 'glCompileShader', '(', 'nSceneFragmentShader', ')', 'fShaderCompiled', '=', 'glGetShaderiv', '(', 'nSceneFragmentShader', ',', 'GL_COMPILE_STATUS', ')', 'if', 'not', 'fShaderCompiled', ':', 'dprintf', '(', '"%s - Unable to compile fragment shader %d!\\n"', '%', '(', 'pchShaderName', ',', 'nSceneFragmentShader', ')', ')', 'glDeleteProgram', '(', 'unProgramID', ')', 'glDeleteShader', '(', 'nSceneFragmentShader', ')', 'return', '0', 'glAttachShader', '(', 'unProgramID', ',', 'nSceneFragmentShader', ')', 'glDeleteShader', '(', 'nSceneFragmentShader', ')', "# the program hangs onto this once it's attached\r", 'glLinkProgram', '(', 'unProgramID', ')', 'programSuccess', '=', 'glGetProgramiv', '(', 'unProgramID', ',', 'GL_LINK_STATUS', ')', 'if', 'not', 'programSuccess', ':', 'dprintf', '(', '"%s - Error linking program %d!\\n"', '%', '(', 'pchShaderName', ',', 'unProgramID', ')', ')', 'glDeleteProgram', '(', 'unProgramID', ')', 'return', '0', 'glUseProgram', '(', 'unProgramID', ')', 'glUseProgram', '(', '0', ')', 'return', 'unProgramID']
Purpose: Compiles a GL shader program and returns the handle. Returns 0 if the shader couldn't be compiled for some reason.
['Purpose', ':', 'Compiles', 'a', 'GL', 'shader', 'program', 'and', 'returns', 'the', 'handle', '.', 'Returns', '0', 'if', 'the', 'shader', 'couldn', 't', 'be', 'compiled', 'for', 'some', 'reason', '.']
train
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/samples/sdl/NOTWORKING_hellovr_opengl_sdl.py#L1024-L1060
3,588
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/property.py
change
def change (properties, feature, value = None): """ Returns a modified version of properties with all values of the given feature replaced by the given value. If 'value' is None the feature will be removed. """ assert is_iterable_typed(properties, basestring) assert isinstance(feature, basestring) assert isinstance(value, (basestring, type(None))) result = [] feature = add_grist (feature) for p in properties: if get_grist (p) == feature: if value: result.append (replace_grist (value, feature)) else: result.append (p) return result
python
def change (properties, feature, value = None): """ Returns a modified version of properties with all values of the given feature replaced by the given value. If 'value' is None the feature will be removed. """ assert is_iterable_typed(properties, basestring) assert isinstance(feature, basestring) assert isinstance(value, (basestring, type(None))) result = [] feature = add_grist (feature) for p in properties: if get_grist (p) == feature: if value: result.append (replace_grist (value, feature)) else: result.append (p) return result
['def', 'change', '(', 'properties', ',', 'feature', ',', 'value', '=', 'None', ')', ':', 'assert', 'is_iterable_typed', '(', 'properties', ',', 'basestring', ')', 'assert', 'isinstance', '(', 'feature', ',', 'basestring', ')', 'assert', 'isinstance', '(', 'value', ',', '(', 'basestring', ',', 'type', '(', 'None', ')', ')', ')', 'result', '=', '[', ']', 'feature', '=', 'add_grist', '(', 'feature', ')', 'for', 'p', 'in', 'properties', ':', 'if', 'get_grist', '(', 'p', ')', '==', 'feature', ':', 'if', 'value', ':', 'result', '.', 'append', '(', 'replace_grist', '(', 'value', ',', 'feature', ')', ')', 'else', ':', 'result', '.', 'append', '(', 'p', ')', 'return', 'result']
Returns a modified version of properties with all values of the given feature replaced by the given value. If 'value' is None the feature will be removed.
['Returns', 'a', 'modified', 'version', 'of', 'properties', 'with', 'all', 'values', 'of', 'the', 'given', 'feature', 'replaced', 'by', 'the', 'given', 'value', '.', 'If', 'value', 'is', 'None', 'the', 'feature', 'will', 'be', 'removed', '.']
train
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property.py#L457-L477
3,589
Shizmob/pydle
pydle/features/rfc1459/client.py
RFC1459Support.on_raw_433
async def on_raw_433(self, message): """ Nickname in use. """ if not self.registered: self._registration_attempts += 1 # Attempt to set new nickname. if self._attempt_nicknames: await self.set_nickname(self._attempt_nicknames.pop(0)) else: await self.set_nickname( self._nicknames[0] + '_' * (self._registration_attempts - len(self._nicknames)))
python
async def on_raw_433(self, message): """ Nickname in use. """ if not self.registered: self._registration_attempts += 1 # Attempt to set new nickname. if self._attempt_nicknames: await self.set_nickname(self._attempt_nicknames.pop(0)) else: await self.set_nickname( self._nicknames[0] + '_' * (self._registration_attempts - len(self._nicknames)))
['async', 'def', 'on_raw_433', '(', 'self', ',', 'message', ')', ':', 'if', 'not', 'self', '.', 'registered', ':', 'self', '.', '_registration_attempts', '+=', '1', '# Attempt to set new nickname.', 'if', 'self', '.', '_attempt_nicknames', ':', 'await', 'self', '.', 'set_nickname', '(', 'self', '.', '_attempt_nicknames', '.', 'pop', '(', '0', ')', ')', 'else', ':', 'await', 'self', '.', 'set_nickname', '(', 'self', '.', '_nicknames', '[', '0', ']', '+', "'_'", '*', '(', 'self', '.', '_registration_attempts', '-', 'len', '(', 'self', '.', '_nicknames', ')', ')', ')']
Nickname in use.
['Nickname', 'in', 'use', '.']
train
https://github.com/Shizmob/pydle/blob/7ec7d65d097318ed0bcdc5d8401470287d8c7cf7/pydle/features/rfc1459/client.py#L990-L999
3,590
guma44/GEOparse
GEOparse/GEOTypes.py
SimpleGEO._get_table_as_string
def _get_table_as_string(self): """Get table as SOFT formated string.""" tablelist = [] tablelist.append("!%s_table_begin" % self.geotype.lower()) tablelist.append("\t".join(self.table.columns)) for idx, row in self.table.iterrows(): tablelist.append("\t".join(map(str, row))) tablelist.append("!%s_table_end" % self.geotype.lower()) return "\n".join(tablelist)
python
def _get_table_as_string(self): """Get table as SOFT formated string.""" tablelist = [] tablelist.append("!%s_table_begin" % self.geotype.lower()) tablelist.append("\t".join(self.table.columns)) for idx, row in self.table.iterrows(): tablelist.append("\t".join(map(str, row))) tablelist.append("!%s_table_end" % self.geotype.lower()) return "\n".join(tablelist)
['def', '_get_table_as_string', '(', 'self', ')', ':', 'tablelist', '=', '[', ']', 'tablelist', '.', 'append', '(', '"!%s_table_begin"', '%', 'self', '.', 'geotype', '.', 'lower', '(', ')', ')', 'tablelist', '.', 'append', '(', '"\\t"', '.', 'join', '(', 'self', '.', 'table', '.', 'columns', ')', ')', 'for', 'idx', ',', 'row', 'in', 'self', '.', 'table', '.', 'iterrows', '(', ')', ':', 'tablelist', '.', 'append', '(', '"\\t"', '.', 'join', '(', 'map', '(', 'str', ',', 'row', ')', ')', ')', 'tablelist', '.', 'append', '(', '"!%s_table_end"', '%', 'self', '.', 'geotype', '.', 'lower', '(', ')', ')', 'return', '"\\n"', '.', 'join', '(', 'tablelist', ')']
Get table as SOFT formated string.
['Get', 'table', 'as', 'SOFT', 'formated', 'string', '.']
train
https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L308-L316
3,591
google/tangent
tangent/reverse_ad.py
ReverseAD.visit
def visit(self, node): """Visit a node. This method is largely modelled after the ast.NodeTransformer class. Args: node: The node to visit. Returns: A tuple of the primal and adjoint, each of which is a node or a list of nodes. """ method = 'visit_' + node.__class__.__name__ if not hasattr(self, method): raise ValueError('Unknown node type: %s' % node.__class__.__name__) visitor = getattr(self, method) # If this node is a statement, inform all child nodes what the active # variables in this statement are if anno.hasanno(node, 'active_in'): self.active_variables = anno.getanno(node, 'active_in') pri, adj = visitor(node) # Annotate primal and adjoint statements if isinstance(pri, gast.AST): anno.setdefaultanno(pri, 'adj', adj) else: for node in pri: anno.setdefaultanno(node, 'adj', adj) if isinstance(adj, gast.AST): anno.setdefaultanno(adj, 'pri', pri) else: for node in adj: anno.setdefaultanno(node, 'pri', pri) return pri, adj
python
def visit(self, node): """Visit a node. This method is largely modelled after the ast.NodeTransformer class. Args: node: The node to visit. Returns: A tuple of the primal and adjoint, each of which is a node or a list of nodes. """ method = 'visit_' + node.__class__.__name__ if not hasattr(self, method): raise ValueError('Unknown node type: %s' % node.__class__.__name__) visitor = getattr(self, method) # If this node is a statement, inform all child nodes what the active # variables in this statement are if anno.hasanno(node, 'active_in'): self.active_variables = anno.getanno(node, 'active_in') pri, adj = visitor(node) # Annotate primal and adjoint statements if isinstance(pri, gast.AST): anno.setdefaultanno(pri, 'adj', adj) else: for node in pri: anno.setdefaultanno(node, 'adj', adj) if isinstance(adj, gast.AST): anno.setdefaultanno(adj, 'pri', pri) else: for node in adj: anno.setdefaultanno(node, 'pri', pri) return pri, adj
['def', 'visit', '(', 'self', ',', 'node', ')', ':', 'method', '=', "'visit_'", '+', 'node', '.', '__class__', '.', '__name__', 'if', 'not', 'hasattr', '(', 'self', ',', 'method', ')', ':', 'raise', 'ValueError', '(', "'Unknown node type: %s'", '%', 'node', '.', '__class__', '.', '__name__', ')', 'visitor', '=', 'getattr', '(', 'self', ',', 'method', ')', '# If this node is a statement, inform all child nodes what the active', '# variables in this statement are', 'if', 'anno', '.', 'hasanno', '(', 'node', ',', "'active_in'", ')', ':', 'self', '.', 'active_variables', '=', 'anno', '.', 'getanno', '(', 'node', ',', "'active_in'", ')', 'pri', ',', 'adj', '=', 'visitor', '(', 'node', ')', '# Annotate primal and adjoint statements', 'if', 'isinstance', '(', 'pri', ',', 'gast', '.', 'AST', ')', ':', 'anno', '.', 'setdefaultanno', '(', 'pri', ',', "'adj'", ',', 'adj', ')', 'else', ':', 'for', 'node', 'in', 'pri', ':', 'anno', '.', 'setdefaultanno', '(', 'node', ',', "'adj'", ',', 'adj', ')', 'if', 'isinstance', '(', 'adj', ',', 'gast', '.', 'AST', ')', ':', 'anno', '.', 'setdefaultanno', '(', 'adj', ',', "'pri'", ',', 'pri', ')', 'else', ':', 'for', 'node', 'in', 'adj', ':', 'anno', '.', 'setdefaultanno', '(', 'node', ',', "'pri'", ',', 'pri', ')', 'return', 'pri', ',', 'adj']
Visit a node. This method is largely modelled after the ast.NodeTransformer class. Args: node: The node to visit. Returns: A tuple of the primal and adjoint, each of which is a node or a list of nodes.
['Visit', 'a', 'node', '.']
train
https://github.com/google/tangent/blob/6533e83af09de7345d1b438512679992f080dcc9/tangent/reverse_ad.py#L130-L165
3,592
HewlettPackard/python-hpOneView
hpOneView/resources/resource.py
ResourcePatchMixin.patch
def patch(self, operation, path, value, custom_headers=None, timeout=-1): """Uses the PATCH to update a resource. Only one operation can be performed in each PATCH call. Args operation: Patch operation path: Path value: Value timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. custom_headers: Allows to add custom http headers. Returns: Updated resource. """ patch_request_body = [{'op': operation, 'path': path, 'value': value}] resource_uri = self.data['uri'] self.data = self.patch_request(resource_uri, body=patch_request_body, custom_headers=custom_headers, timeout=timeout) return self
python
def patch(self, operation, path, value, custom_headers=None, timeout=-1): """Uses the PATCH to update a resource. Only one operation can be performed in each PATCH call. Args operation: Patch operation path: Path value: Value timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. custom_headers: Allows to add custom http headers. Returns: Updated resource. """ patch_request_body = [{'op': operation, 'path': path, 'value': value}] resource_uri = self.data['uri'] self.data = self.patch_request(resource_uri, body=patch_request_body, custom_headers=custom_headers, timeout=timeout) return self
['def', 'patch', '(', 'self', ',', 'operation', ',', 'path', ',', 'value', ',', 'custom_headers', '=', 'None', ',', 'timeout', '=', '-', '1', ')', ':', 'patch_request_body', '=', '[', '{', "'op'", ':', 'operation', ',', "'path'", ':', 'path', ',', "'value'", ':', 'value', '}', ']', 'resource_uri', '=', 'self', '.', 'data', '[', "'uri'", ']', 'self', '.', 'data', '=', 'self', '.', 'patch_request', '(', 'resource_uri', ',', 'body', '=', 'patch_request_body', ',', 'custom_headers', '=', 'custom_headers', ',', 'timeout', '=', 'timeout', ')', 'return', 'self']
Uses the PATCH to update a resource. Only one operation can be performed in each PATCH call. Args operation: Patch operation path: Path value: Value timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. custom_headers: Allows to add custom http headers. Returns: Updated resource.
['Uses', 'the', 'PATCH', 'to', 'update', 'a', 'resource', '.']
train
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/resource.py#L783-L806
3,593
tjcsl/cslbot
cslbot/commands/s.py
cmd
def cmd(send, msg, args): """Corrects a previous message. Syntax: {command}/<msg>/<replacement>/<ig|nick> """ if not msg: send("Invalid Syntax.") return char = msg[0] msg = [x.replace(r'\/', '/') for x in re.split(r'(?<!\\)\%s' % char, msg[1:], maxsplit=2)] # fix for people who forget a trailing slash if len(msg) == 2 and args['config']['feature'].getboolean('lazyregex'): msg.append('') # not a valid sed statement. if not msg or len(msg) < 3: send("Invalid Syntax.") return if args['type'] == 'privmsg': send("Don't worry, %s is not a grammar Nazi." % args['botnick']) return string = msg[0] replacement = msg[1] modifiers = get_modifiers(msg[2], args['nick'], args['config']['core']['nickregex']) if modifiers is None: send("Invalid modifiers.") return try: regex = re.compile(string, re.IGNORECASE) if modifiers['ignorecase'] else re.compile(string) log = get_log(args['db'], args['target'], modifiers['nick']) workers = args['handler'].workers result = workers.run_pool(do_replace, [log, args['config']['core'], char, regex, replacement]) try: msg = result.get(5) except multiprocessing.TimeoutError: workers.restart_pool() send("Sed regex timed out.") return if msg: send(msg) else: send("No match found.") except sre_constants.error as ex: raise CommandFailedException(ex)
python
def cmd(send, msg, args): """Corrects a previous message. Syntax: {command}/<msg>/<replacement>/<ig|nick> """ if not msg: send("Invalid Syntax.") return char = msg[0] msg = [x.replace(r'\/', '/') for x in re.split(r'(?<!\\)\%s' % char, msg[1:], maxsplit=2)] # fix for people who forget a trailing slash if len(msg) == 2 and args['config']['feature'].getboolean('lazyregex'): msg.append('') # not a valid sed statement. if not msg or len(msg) < 3: send("Invalid Syntax.") return if args['type'] == 'privmsg': send("Don't worry, %s is not a grammar Nazi." % args['botnick']) return string = msg[0] replacement = msg[1] modifiers = get_modifiers(msg[2], args['nick'], args['config']['core']['nickregex']) if modifiers is None: send("Invalid modifiers.") return try: regex = re.compile(string, re.IGNORECASE) if modifiers['ignorecase'] else re.compile(string) log = get_log(args['db'], args['target'], modifiers['nick']) workers = args['handler'].workers result = workers.run_pool(do_replace, [log, args['config']['core'], char, regex, replacement]) try: msg = result.get(5) except multiprocessing.TimeoutError: workers.restart_pool() send("Sed regex timed out.") return if msg: send(msg) else: send("No match found.") except sre_constants.error as ex: raise CommandFailedException(ex)
['def', 'cmd', '(', 'send', ',', 'msg', ',', 'args', ')', ':', 'if', 'not', 'msg', ':', 'send', '(', '"Invalid Syntax."', ')', 'return', 'char', '=', 'msg', '[', '0', ']', 'msg', '=', '[', 'x', '.', 'replace', '(', "r'\\/'", ',', "'/'", ')', 'for', 'x', 'in', 're', '.', 'split', '(', "r'(?<!\\\\)\\%s'", '%', 'char', ',', 'msg', '[', '1', ':', ']', ',', 'maxsplit', '=', '2', ')', ']', '# fix for people who forget a trailing slash', 'if', 'len', '(', 'msg', ')', '==', '2', 'and', 'args', '[', "'config'", ']', '[', "'feature'", ']', '.', 'getboolean', '(', "'lazyregex'", ')', ':', 'msg', '.', 'append', '(', "''", ')', '# not a valid sed statement.', 'if', 'not', 'msg', 'or', 'len', '(', 'msg', ')', '<', '3', ':', 'send', '(', '"Invalid Syntax."', ')', 'return', 'if', 'args', '[', "'type'", ']', '==', "'privmsg'", ':', 'send', '(', '"Don\'t worry, %s is not a grammar Nazi."', '%', 'args', '[', "'botnick'", ']', ')', 'return', 'string', '=', 'msg', '[', '0', ']', 'replacement', '=', 'msg', '[', '1', ']', 'modifiers', '=', 'get_modifiers', '(', 'msg', '[', '2', ']', ',', 'args', '[', "'nick'", ']', ',', 'args', '[', "'config'", ']', '[', "'core'", ']', '[', "'nickregex'", ']', ')', 'if', 'modifiers', 'is', 'None', ':', 'send', '(', '"Invalid modifiers."', ')', 'return', 'try', ':', 'regex', '=', 're', '.', 'compile', '(', 'string', ',', 're', '.', 'IGNORECASE', ')', 'if', 'modifiers', '[', "'ignorecase'", ']', 'else', 're', '.', 'compile', '(', 'string', ')', 'log', '=', 'get_log', '(', 'args', '[', "'db'", ']', ',', 'args', '[', "'target'", ']', ',', 'modifiers', '[', "'nick'", ']', ')', 'workers', '=', 'args', '[', "'handler'", ']', '.', 'workers', 'result', '=', 'workers', '.', 'run_pool', '(', 'do_replace', ',', '[', 'log', ',', 'args', '[', "'config'", ']', '[', "'core'", ']', ',', 'char', ',', 'regex', ',', 'replacement', ']', ')', 'try', ':', 'msg', '=', 'result', '.', 'get', '(', '5', ')', 'except', 'multiprocessing', '.', 'TimeoutError', ':', 'workers', '.', 'restart_pool', '(', ')', 'send', '(', '"Sed regex timed out."', ')', 'return', 'if', 'msg', ':', 'send', '(', 'msg', ')', 'else', ':', 'send', '(', '"No match found."', ')', 'except', 'sre_constants', '.', 'error', 'as', 'ex', ':', 'raise', 'CommandFailedException', '(', 'ex', ')']
Corrects a previous message. Syntax: {command}/<msg>/<replacement>/<ig|nick>
['Corrects', 'a', 'previous', 'message', '.']
train
https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/commands/s.py#L80-L124
3,594
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_fabric_service.py
brocade_fabric_service.show_linkinfo_output_show_link_info_linkinfo_isl_linkinfo_isllink_destdomain
def show_linkinfo_output_show_link_info_linkinfo_isl_linkinfo_isllink_destdomain(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_linkinfo = ET.Element("show_linkinfo") config = show_linkinfo output = ET.SubElement(show_linkinfo, "output") show_link_info = ET.SubElement(output, "show-link-info") linkinfo_rbridgeid_key = ET.SubElement(show_link_info, "linkinfo-rbridgeid") linkinfo_rbridgeid_key.text = kwargs.pop('linkinfo_rbridgeid') linkinfo_isl = ET.SubElement(show_link_info, "linkinfo-isl") linkinfo_isl_linknumber_key = ET.SubElement(linkinfo_isl, "linkinfo-isl-linknumber") linkinfo_isl_linknumber_key.text = kwargs.pop('linkinfo_isl_linknumber') linkinfo_isllink_destdomain = ET.SubElement(linkinfo_isl, "linkinfo-isllink-destdomain") linkinfo_isllink_destdomain.text = kwargs.pop('linkinfo_isllink_destdomain') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def show_linkinfo_output_show_link_info_linkinfo_isl_linkinfo_isllink_destdomain(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_linkinfo = ET.Element("show_linkinfo") config = show_linkinfo output = ET.SubElement(show_linkinfo, "output") show_link_info = ET.SubElement(output, "show-link-info") linkinfo_rbridgeid_key = ET.SubElement(show_link_info, "linkinfo-rbridgeid") linkinfo_rbridgeid_key.text = kwargs.pop('linkinfo_rbridgeid') linkinfo_isl = ET.SubElement(show_link_info, "linkinfo-isl") linkinfo_isl_linknumber_key = ET.SubElement(linkinfo_isl, "linkinfo-isl-linknumber") linkinfo_isl_linknumber_key.text = kwargs.pop('linkinfo_isl_linknumber') linkinfo_isllink_destdomain = ET.SubElement(linkinfo_isl, "linkinfo-isllink-destdomain") linkinfo_isllink_destdomain.text = kwargs.pop('linkinfo_isllink_destdomain') callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'show_linkinfo_output_show_link_info_linkinfo_isl_linkinfo_isllink_destdomain', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'show_linkinfo', '=', 'ET', '.', 'Element', '(', '"show_linkinfo"', ')', 'config', '=', 'show_linkinfo', 'output', '=', 'ET', '.', 'SubElement', '(', 'show_linkinfo', ',', '"output"', ')', 'show_link_info', '=', 'ET', '.', 'SubElement', '(', 'output', ',', '"show-link-info"', ')', 'linkinfo_rbridgeid_key', '=', 'ET', '.', 'SubElement', '(', 'show_link_info', ',', '"linkinfo-rbridgeid"', ')', 'linkinfo_rbridgeid_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'linkinfo_rbridgeid'", ')', 'linkinfo_isl', '=', 'ET', '.', 'SubElement', '(', 'show_link_info', ',', '"linkinfo-isl"', ')', 'linkinfo_isl_linknumber_key', '=', 'ET', '.', 'SubElement', '(', 'linkinfo_isl', ',', '"linkinfo-isl-linknumber"', ')', 'linkinfo_isl_linknumber_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'linkinfo_isl_linknumber'", ')', 'linkinfo_isllink_destdomain', '=', 'ET', '.', 'SubElement', '(', 'linkinfo_isl', ',', '"linkinfo-isllink-destdomain"', ')', 'linkinfo_isllink_destdomain', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'linkinfo_isllink_destdomain'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_fabric_service.py#L103-L120
3,595
fuzeman/PyUPnP
pyupnp/lict.py
Lict.popvalue
def popvalue(self, k, d=None): """ D.popvalue(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised """ if k not in self._col_dict: return d value = self._col_dict.pop(k) self._col_list.remove(value) return value
python
def popvalue(self, k, d=None): """ D.popvalue(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised """ if k not in self._col_dict: return d value = self._col_dict.pop(k) self._col_list.remove(value) return value
['def', 'popvalue', '(', 'self', ',', 'k', ',', 'd', '=', 'None', ')', ':', 'if', 'k', 'not', 'in', 'self', '.', '_col_dict', ':', 'return', 'd', 'value', '=', 'self', '.', '_col_dict', '.', 'pop', '(', 'k', ')', 'self', '.', '_col_list', '.', 'remove', '(', 'value', ')', 'return', 'value']
D.popvalue(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised
['D', '.', 'popvalue', '(', 'k', '[', 'd', ']', ')', '-', '>', 'v', 'remove', 'specified', 'key', 'and', 'return', 'the', 'corresponding', 'value', '.', 'If', 'key', 'is', 'not', 'found', 'd', 'is', 'returned', 'if', 'given', 'otherwise', 'KeyError', 'is', 'raised']
train
https://github.com/fuzeman/PyUPnP/blob/6dea64be299952346a14300ab6cc7dac42736433/pyupnp/lict.py#L316-L325
3,596
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/runsnakerun/squaremap/squaremap.py
HotMapNavigator.lastNode
def lastNode(class_, hot_map): ''' Return the very last node (recursively) in the hot map. ''' children = hot_map[-1][2] if children: return class_.lastNode(children) else: return hot_map[-1][1]
python
def lastNode(class_, hot_map): ''' Return the very last node (recursively) in the hot map. ''' children = hot_map[-1][2] if children: return class_.lastNode(children) else: return hot_map[-1][1]
['def', 'lastNode', '(', 'class_', ',', 'hot_map', ')', ':', 'children', '=', 'hot_map', '[', '-', '1', ']', '[', '2', ']', 'if', 'children', ':', 'return', 'class_', '.', 'lastNode', '(', 'children', ')', 'else', ':', 'return', 'hot_map', '[', '-', '1', ']', '[', '1', ']']
Return the very last node (recursively) in the hot map.
['Return', 'the', 'very', 'last', 'node', '(', 'recursively', ')', 'in', 'the', 'hot', 'map', '.']
train
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/squaremap/squaremap.py#L72-L78
3,597
saltstack/salt
salt/modules/netbox.py
make_interface_child
def make_interface_child(device_name, interface_name, parent_name): ''' .. versionadded:: 2019.2.0 Set an interface as part of a LAG. device_name The name of the device, e.g., ``edge_router``. interface_name The name of the interface to be attached to LAG, e.g., ``xe-1/0/2``. parent_name The name of the LAG interface, e.g., ``ae13``. CLI Example: .. code-block:: bash salt myminion netbox.make_interface_child xe-1/0/2 ae13 ''' nb_device = get_('dcim', 'devices', name=device_name) nb_parent = get_('dcim', 'interfaces', device_id=nb_device['id'], name=parent_name) if nb_device and nb_parent: return update_interface(device_name, interface_name, lag=nb_parent['id']) else: return False
python
def make_interface_child(device_name, interface_name, parent_name): ''' .. versionadded:: 2019.2.0 Set an interface as part of a LAG. device_name The name of the device, e.g., ``edge_router``. interface_name The name of the interface to be attached to LAG, e.g., ``xe-1/0/2``. parent_name The name of the LAG interface, e.g., ``ae13``. CLI Example: .. code-block:: bash salt myminion netbox.make_interface_child xe-1/0/2 ae13 ''' nb_device = get_('dcim', 'devices', name=device_name) nb_parent = get_('dcim', 'interfaces', device_id=nb_device['id'], name=parent_name) if nb_device and nb_parent: return update_interface(device_name, interface_name, lag=nb_parent['id']) else: return False
['def', 'make_interface_child', '(', 'device_name', ',', 'interface_name', ',', 'parent_name', ')', ':', 'nb_device', '=', 'get_', '(', "'dcim'", ',', "'devices'", ',', 'name', '=', 'device_name', ')', 'nb_parent', '=', 'get_', '(', "'dcim'", ',', "'interfaces'", ',', 'device_id', '=', 'nb_device', '[', "'id'", ']', ',', 'name', '=', 'parent_name', ')', 'if', 'nb_device', 'and', 'nb_parent', ':', 'return', 'update_interface', '(', 'device_name', ',', 'interface_name', ',', 'lag', '=', 'nb_parent', '[', "'id'", ']', ')', 'else', ':', 'return', 'False']
.. versionadded:: 2019.2.0 Set an interface as part of a LAG. device_name The name of the device, e.g., ``edge_router``. interface_name The name of the interface to be attached to LAG, e.g., ``xe-1/0/2``. parent_name The name of the LAG interface, e.g., ``ae13``. CLI Example: .. code-block:: bash salt myminion netbox.make_interface_child xe-1/0/2 ae13
['..', 'versionadded', '::', '2019', '.', '2', '.', '0']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netbox.py#L852-L878
3,598
rocky/python-xdis
xdis/dropbox/decrypt25.py
tea_decipher
def tea_decipher(v, key): """ Tiny Decryption Algorithm decription (TEA) See https://en.wikipedia.org/wiki/Tiny_Encryption_Algorithm """ DELTA = 0x9e3779b9 n = len(v) rounds = 6 + 52//n sum = (rounds*DELTA) y = v[0] while sum != 0: e = (sum >> 2) & 3 for p in range(n-1, -1, -1): z = v[(n + p - 1) % n] v[p] = (v[p] - MX(z, y, sum, key, p, e)) & 0xffffffff y = v[p] sum -= DELTA return v
python
def tea_decipher(v, key): """ Tiny Decryption Algorithm decription (TEA) See https://en.wikipedia.org/wiki/Tiny_Encryption_Algorithm """ DELTA = 0x9e3779b9 n = len(v) rounds = 6 + 52//n sum = (rounds*DELTA) y = v[0] while sum != 0: e = (sum >> 2) & 3 for p in range(n-1, -1, -1): z = v[(n + p - 1) % n] v[p] = (v[p] - MX(z, y, sum, key, p, e)) & 0xffffffff y = v[p] sum -= DELTA return v
['def', 'tea_decipher', '(', 'v', ',', 'key', ')', ':', 'DELTA', '=', '0x9e3779b9', 'n', '=', 'len', '(', 'v', ')', 'rounds', '=', '6', '+', '52', '//', 'n', 'sum', '=', '(', 'rounds', '*', 'DELTA', ')', 'y', '=', 'v', '[', '0', ']', 'while', 'sum', '!=', '0', ':', 'e', '=', '(', 'sum', '>>', '2', ')', '&', '3', 'for', 'p', 'in', 'range', '(', 'n', '-', '1', ',', '-', '1', ',', '-', '1', ')', ':', 'z', '=', 'v', '[', '(', 'n', '+', 'p', '-', '1', ')', '%', 'n', ']', 'v', '[', 'p', ']', '=', '(', 'v', '[', 'p', ']', '-', 'MX', '(', 'z', ',', 'y', ',', 'sum', ',', 'key', ',', 'p', ',', 'e', ')', ')', '&', '0xffffffff', 'y', '=', 'v', '[', 'p', ']', 'sum', '-=', 'DELTA', 'return', 'v']
Tiny Decryption Algorithm decription (TEA) See https://en.wikipedia.org/wiki/Tiny_Encryption_Algorithm
['Tiny', 'Decryption', 'Algorithm', 'decription', '(', 'TEA', ')', 'See', 'https', ':', '//', 'en', '.', 'wikipedia', '.', 'org', '/', 'wiki', '/', 'Tiny_Encryption_Algorithm']
train
https://github.com/rocky/python-xdis/blob/46a2902ae8f5d8eee495eed67ac0690fd545453d/xdis/dropbox/decrypt25.py#L35-L52
3,599
Kunstmord/datalib
src/example.py
labels
def labels(): """ Path to labels file """ datapath = path.join(path.dirname(path.realpath(__file__)), path.pardir) datapath = path.join(datapath, '../gzoo_data', 'train_solution.csv') return path.normpath(datapath)
python
def labels(): """ Path to labels file """ datapath = path.join(path.dirname(path.realpath(__file__)), path.pardir) datapath = path.join(datapath, '../gzoo_data', 'train_solution.csv') return path.normpath(datapath)
['def', 'labels', '(', ')', ':', 'datapath', '=', 'path', '.', 'join', '(', 'path', '.', 'dirname', '(', 'path', '.', 'realpath', '(', '__file__', ')', ')', ',', 'path', '.', 'pardir', ')', 'datapath', '=', 'path', '.', 'join', '(', 'datapath', ',', "'../gzoo_data'", ',', "'train_solution.csv'", ')', 'return', 'path', '.', 'normpath', '(', 'datapath', ')']
Path to labels file
['Path', 'to', 'labels', 'file']
train
https://github.com/Kunstmord/datalib/blob/9d7db3e7c3a5feeeb5d19eb0dbee858bd2b50886/src/example.py#L41-L47