Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
7,000
opennode/waldur-core
waldur_core/core/tasks.py
BackgroundTask.is_previous_task_processing
def is_previous_task_processing(self, *args, **kwargs): """ Return True if exist task that is equal to current and is uncompleted """ app = self._get_app() inspect = app.control.inspect() active = inspect.active() or {} scheduled = inspect.scheduled() or {} reserved = inspect.reserved() or {} uncompleted = sum(list(active.values()) + list(scheduled.values()) + reserved.values(), []) return any(self.is_equal(task, *args, **kwargs) for task in uncompleted)
python
def is_previous_task_processing(self, *args, **kwargs): """ Return True if exist task that is equal to current and is uncompleted """ app = self._get_app() inspect = app.control.inspect() active = inspect.active() or {} scheduled = inspect.scheduled() or {} reserved = inspect.reserved() or {} uncompleted = sum(list(active.values()) + list(scheduled.values()) + reserved.values(), []) return any(self.is_equal(task, *args, **kwargs) for task in uncompleted)
['def', 'is_previous_task_processing', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'app', '=', 'self', '.', '_get_app', '(', ')', 'inspect', '=', 'app', '.', 'control', '.', 'inspect', '(', ')', 'active', '=', 'inspect', '.', 'active', '(', ')', 'or', '{', '}', 'scheduled', '=', 'inspect', '.', 'scheduled', '(', ')', 'or', '{', '}', 'reserved', '=', 'inspect', '.', 'reserved', '(', ')', 'or', '{', '}', 'uncompleted', '=', 'sum', '(', 'list', '(', 'active', '.', 'values', '(', ')', ')', '+', 'list', '(', 'scheduled', '.', 'values', '(', ')', ')', '+', 'reserved', '.', 'values', '(', ')', ',', '[', ']', ')', 'return', 'any', '(', 'self', '.', 'is_equal', '(', 'task', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'for', 'task', 'in', 'uncompleted', ')']
Return True if exist task that is equal to current and is uncompleted
['Return', 'True', 'if', 'exist', 'task', 'that', 'is', 'equal', 'to', 'current', 'and', 'is', 'uncompleted']
train
https://github.com/opennode/waldur-core/blob/d6c17a9592bb6c49c33567542eef8d099605a46a/waldur_core/core/tasks.py#L353-L361
7,001
spyder-ide/spyder
spyder/utils/vcs.py
get_hg_revision
def get_hg_revision(repopath): """Return Mercurial revision for the repository located at repopath Result is a tuple (global, local, branch), with None values on error For example: >>> get_hg_revision(".") ('eba7273c69df+', '2015+', 'default') """ try: assert osp.isdir(osp.join(repopath, '.hg')) proc = programs.run_program('hg', ['id', '-nib', repopath]) output, _err = proc.communicate() # output is now: ('eba7273c69df+ 2015+ default\n', None) # Split 2 times max to allow spaces in branch names. return tuple(output.decode().strip().split(None, 2)) except (subprocess.CalledProcessError, AssertionError, AttributeError, OSError): return (None, None, None)
python
def get_hg_revision(repopath): """Return Mercurial revision for the repository located at repopath Result is a tuple (global, local, branch), with None values on error For example: >>> get_hg_revision(".") ('eba7273c69df+', '2015+', 'default') """ try: assert osp.isdir(osp.join(repopath, '.hg')) proc = programs.run_program('hg', ['id', '-nib', repopath]) output, _err = proc.communicate() # output is now: ('eba7273c69df+ 2015+ default\n', None) # Split 2 times max to allow spaces in branch names. return tuple(output.decode().strip().split(None, 2)) except (subprocess.CalledProcessError, AssertionError, AttributeError, OSError): return (None, None, None)
['def', 'get_hg_revision', '(', 'repopath', ')', ':', 'try', ':', 'assert', 'osp', '.', 'isdir', '(', 'osp', '.', 'join', '(', 'repopath', ',', "'.hg'", ')', ')', 'proc', '=', 'programs', '.', 'run_program', '(', "'hg'", ',', '[', "'id'", ',', "'-nib'", ',', 'repopath', ']', ')', 'output', ',', '_err', '=', 'proc', '.', 'communicate', '(', ')', "# output is now: ('eba7273c69df+ 2015+ default\\n', None)\r", '# Split 2 times max to allow spaces in branch names.\r', 'return', 'tuple', '(', 'output', '.', 'decode', '(', ')', '.', 'strip', '(', ')', '.', 'split', '(', 'None', ',', '2', ')', ')', 'except', '(', 'subprocess', '.', 'CalledProcessError', ',', 'AssertionError', ',', 'AttributeError', ',', 'OSError', ')', ':', 'return', '(', 'None', ',', 'None', ',', 'None', ')']
Return Mercurial revision for the repository located at repopath Result is a tuple (global, local, branch), with None values on error For example: >>> get_hg_revision(".") ('eba7273c69df+', '2015+', 'default')
['Return', 'Mercurial', 'revision', 'for', 'the', 'repository', 'located', 'at', 'repopath', 'Result', 'is', 'a', 'tuple', '(', 'global', 'local', 'branch', ')', 'with', 'None', 'values', 'on', 'error', 'For', 'example', ':', '>>>', 'get_hg_revision', '(', '.', ')', '(', 'eba7273c69df', '+', '2015', '+', 'default', ')']
train
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/vcs.py#L100-L116
7,002
HPENetworking/PYHPEIMC
pyhpeimc/plat/system.py
modify_ssh_template
def modify_ssh_template(auth, url, ssh_template, template_name= None, template_id = None): """ Function takes input of a dictionry containing the required key/value pair for the modification of a ssh template. :param auth: :param url: :param ssh_template: Human readable label which is the name of the specific ssh template :param template_id Internal IMC number which designates the specific ssh template :return: int value of HTTP response code 201 for proper creation or 404 for failed creation :rtype int Sample of proper KV pairs. Please see documentation for valid values for different fields. ssh_template = { "type": "0", "name": "ssh_admin_template", "authType": "3", "authTypeStr": "Password + Super Password", "userName": "newadmin", "password": "password", "superPassword": "password", "port": "22", "timeout": "10", "retries": "3", "keyFileName": "", "keyPhrase": "" } """ if template_name is None: template_name = ssh_template['name'] if template_id is None: ssh_templates = get_ssh_template(auth, url) template_id = None for template in ssh_templates: if template['name'] == template_name: template_id = template['id'] f_url = url + "/imcrs/plat/res/ssh/"+str(template_id)+"/update" response = requests.put(f_url, data = json.dumps(ssh_template), auth=auth, headers=HEADERS) try: return response.status_code except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + " modify_ssh_template: An Error has occured"
python
def modify_ssh_template(auth, url, ssh_template, template_name= None, template_id = None): """ Function takes input of a dictionry containing the required key/value pair for the modification of a ssh template. :param auth: :param url: :param ssh_template: Human readable label which is the name of the specific ssh template :param template_id Internal IMC number which designates the specific ssh template :return: int value of HTTP response code 201 for proper creation or 404 for failed creation :rtype int Sample of proper KV pairs. Please see documentation for valid values for different fields. ssh_template = { "type": "0", "name": "ssh_admin_template", "authType": "3", "authTypeStr": "Password + Super Password", "userName": "newadmin", "password": "password", "superPassword": "password", "port": "22", "timeout": "10", "retries": "3", "keyFileName": "", "keyPhrase": "" } """ if template_name is None: template_name = ssh_template['name'] if template_id is None: ssh_templates = get_ssh_template(auth, url) template_id = None for template in ssh_templates: if template['name'] == template_name: template_id = template['id'] f_url = url + "/imcrs/plat/res/ssh/"+str(template_id)+"/update" response = requests.put(f_url, data = json.dumps(ssh_template), auth=auth, headers=HEADERS) try: return response.status_code except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + " modify_ssh_template: An Error has occured"
['def', 'modify_ssh_template', '(', 'auth', ',', 'url', ',', 'ssh_template', ',', 'template_name', '=', 'None', ',', 'template_id', '=', 'None', ')', ':', 'if', 'template_name', 'is', 'None', ':', 'template_name', '=', 'ssh_template', '[', "'name'", ']', 'if', 'template_id', 'is', 'None', ':', 'ssh_templates', '=', 'get_ssh_template', '(', 'auth', ',', 'url', ')', 'template_id', '=', 'None', 'for', 'template', 'in', 'ssh_templates', ':', 'if', 'template', '[', "'name'", ']', '==', 'template_name', ':', 'template_id', '=', 'template', '[', "'id'", ']', 'f_url', '=', 'url', '+', '"/imcrs/plat/res/ssh/"', '+', 'str', '(', 'template_id', ')', '+', '"/update"', 'response', '=', 'requests', '.', 'put', '(', 'f_url', ',', 'data', '=', 'json', '.', 'dumps', '(', 'ssh_template', ')', ',', 'auth', '=', 'auth', ',', 'headers', '=', 'HEADERS', ')', 'try', ':', 'return', 'response', '.', 'status_code', 'except', 'requests', '.', 'exceptions', '.', 'RequestException', 'as', 'error', ':', 'return', '"Error:\\n"', '+', 'str', '(', 'error', ')', '+', '" modify_ssh_template: An Error has occured"']
Function takes input of a dictionry containing the required key/value pair for the modification of a ssh template. :param auth: :param url: :param ssh_template: Human readable label which is the name of the specific ssh template :param template_id Internal IMC number which designates the specific ssh template :return: int value of HTTP response code 201 for proper creation or 404 for failed creation :rtype int Sample of proper KV pairs. Please see documentation for valid values for different fields. ssh_template = { "type": "0", "name": "ssh_admin_template", "authType": "3", "authTypeStr": "Password + Super Password", "userName": "newadmin", "password": "password", "superPassword": "password", "port": "22", "timeout": "10", "retries": "3", "keyFileName": "", "keyPhrase": "" }
['Function', 'takes', 'input', 'of', 'a', 'dictionry', 'containing', 'the', 'required', 'key', '/', 'value', 'pair', 'for', 'the', 'modification', 'of', 'a', 'ssh', 'template', '.']
train
https://github.com/HPENetworking/PYHPEIMC/blob/4fba31827573587e03a6233c7db60f188038c8e5/pyhpeimc/plat/system.py#L397-L439
7,003
CodeReclaimers/neat-python
examples/xor/evolve-feedforward-threaded.py
run
def run(config_file): """load the config, create a population, evolve and show the result""" # Load configuration. config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, config_file) # Create the population, which is the top-level object for a NEAT run. p = neat.Population(config) # Add a stdout reporter to show progress in the terminal. p.add_reporter(neat.StdOutReporter(True)) stats = neat.StatisticsReporter() p.add_reporter(stats) # Run for up to 300 generations. pe = neat.ThreadedEvaluator(4, eval_genome) winner = p.run(pe.evaluate, 300) pe.stop() # Display the winning genome. print('\nBest genome:\n{!s}'.format(winner)) # Show output of the most fit genome against training data. print('\nOutput:') winner_net = neat.nn.FeedForwardNetwork.create(winner, config) for xi, xo in zip(xor_inputs, xor_outputs): output = winner_net.activate(xi) print( "input {!r}, expected output {!r}, got {!r}".format(xi, xo, output) ) if visualize is not None: node_names = {-1: 'A', -2: 'B', 0: 'A XOR B'} visualize.draw_net(config, winner, True, node_names=node_names) visualize.plot_stats(stats, ylog=False, view=True) visualize.plot_species(stats, view=True)
python
def run(config_file): """load the config, create a population, evolve and show the result""" # Load configuration. config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, config_file) # Create the population, which is the top-level object for a NEAT run. p = neat.Population(config) # Add a stdout reporter to show progress in the terminal. p.add_reporter(neat.StdOutReporter(True)) stats = neat.StatisticsReporter() p.add_reporter(stats) # Run for up to 300 generations. pe = neat.ThreadedEvaluator(4, eval_genome) winner = p.run(pe.evaluate, 300) pe.stop() # Display the winning genome. print('\nBest genome:\n{!s}'.format(winner)) # Show output of the most fit genome against training data. print('\nOutput:') winner_net = neat.nn.FeedForwardNetwork.create(winner, config) for xi, xo in zip(xor_inputs, xor_outputs): output = winner_net.activate(xi) print( "input {!r}, expected output {!r}, got {!r}".format(xi, xo, output) ) if visualize is not None: node_names = {-1: 'A', -2: 'B', 0: 'A XOR B'} visualize.draw_net(config, winner, True, node_names=node_names) visualize.plot_stats(stats, ylog=False, view=True) visualize.plot_species(stats, view=True)
['def', 'run', '(', 'config_file', ')', ':', '# Load configuration.', 'config', '=', 'neat', '.', 'Config', '(', 'neat', '.', 'DefaultGenome', ',', 'neat', '.', 'DefaultReproduction', ',', 'neat', '.', 'DefaultSpeciesSet', ',', 'neat', '.', 'DefaultStagnation', ',', 'config_file', ')', '# Create the population, which is the top-level object for a NEAT run.', 'p', '=', 'neat', '.', 'Population', '(', 'config', ')', '# Add a stdout reporter to show progress in the terminal.', 'p', '.', 'add_reporter', '(', 'neat', '.', 'StdOutReporter', '(', 'True', ')', ')', 'stats', '=', 'neat', '.', 'StatisticsReporter', '(', ')', 'p', '.', 'add_reporter', '(', 'stats', ')', '# Run for up to 300 generations.', 'pe', '=', 'neat', '.', 'ThreadedEvaluator', '(', '4', ',', 'eval_genome', ')', 'winner', '=', 'p', '.', 'run', '(', 'pe', '.', 'evaluate', ',', '300', ')', 'pe', '.', 'stop', '(', ')', '# Display the winning genome.', 'print', '(', "'\\nBest genome:\\n{!s}'", '.', 'format', '(', 'winner', ')', ')', '# Show output of the most fit genome against training data.', 'print', '(', "'\\nOutput:'", ')', 'winner_net', '=', 'neat', '.', 'nn', '.', 'FeedForwardNetwork', '.', 'create', '(', 'winner', ',', 'config', ')', 'for', 'xi', ',', 'xo', 'in', 'zip', '(', 'xor_inputs', ',', 'xor_outputs', ')', ':', 'output', '=', 'winner_net', '.', 'activate', '(', 'xi', ')', 'print', '(', '"input {!r}, expected output {!r}, got {!r}"', '.', 'format', '(', 'xi', ',', 'xo', ',', 'output', ')', ')', 'if', 'visualize', 'is', 'not', 'None', ':', 'node_names', '=', '{', '-', '1', ':', "'A'", ',', '-', '2', ':', "'B'", ',', '0', ':', "'A XOR B'", '}', 'visualize', '.', 'draw_net', '(', 'config', ',', 'winner', ',', 'True', ',', 'node_names', '=', 'node_names', ')', 'visualize', '.', 'plot_stats', '(', 'stats', ',', 'ylog', '=', 'False', ',', 'view', '=', 'True', ')', 'visualize', '.', 'plot_species', '(', 'stats', ',', 'view', '=', 'True', ')']
load the config, create a population, evolve and show the result
['load', 'the', 'config', 'create', 'a', 'population', 'evolve', 'and', 'show', 'the', 'result']
train
https://github.com/CodeReclaimers/neat-python/blob/e3dbe77c0d776eae41d598e6439e6ac02ab90b18/examples/xor/evolve-feedforward-threaded.py#L49-L85
7,004
onnx/onnxmltools
onnxmltools/convert/coreml/shape_calculators/TensorToLabel.py
calculte_tensor_to_label_output_shapes
def calculte_tensor_to_label_output_shapes(operator): ''' Allowed input/output patterns are 1. [N, C] ---> [N, 1] Note that N must be 1 currently because TensorToProbability doesn't support batch size larger than 1. ''' check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1) check_input_and_output_types(operator, good_input_types=[FloatTensorType]) N = operator.inputs[0].type.shape[0] if operator.target_opset < 7: output_shape = [1, 1] else: output_shape = [N, 1] if type(operator.outputs[0].type) in [Int64Type, Int64TensorType]: operator.outputs[0].type = Int64TensorType(output_shape, doc_string=operator.outputs[0].type.doc_string) elif type(operator.outputs[0].type) in [StringType, StringTensorType]: operator.outputs[0].type = StringTensorType(output_shape, doc_string=operator.outputs[0].type.doc_string) else: raise ValueError('Unsupported label type')
python
def calculte_tensor_to_label_output_shapes(operator): ''' Allowed input/output patterns are 1. [N, C] ---> [N, 1] Note that N must be 1 currently because TensorToProbability doesn't support batch size larger than 1. ''' check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1) check_input_and_output_types(operator, good_input_types=[FloatTensorType]) N = operator.inputs[0].type.shape[0] if operator.target_opset < 7: output_shape = [1, 1] else: output_shape = [N, 1] if type(operator.outputs[0].type) in [Int64Type, Int64TensorType]: operator.outputs[0].type = Int64TensorType(output_shape, doc_string=operator.outputs[0].type.doc_string) elif type(operator.outputs[0].type) in [StringType, StringTensorType]: operator.outputs[0].type = StringTensorType(output_shape, doc_string=operator.outputs[0].type.doc_string) else: raise ValueError('Unsupported label type')
['def', 'calculte_tensor_to_label_output_shapes', '(', 'operator', ')', ':', 'check_input_and_output_numbers', '(', 'operator', ',', 'input_count_range', '=', '1', ',', 'output_count_range', '=', '1', ')', 'check_input_and_output_types', '(', 'operator', ',', 'good_input_types', '=', '[', 'FloatTensorType', ']', ')', 'N', '=', 'operator', '.', 'inputs', '[', '0', ']', '.', 'type', '.', 'shape', '[', '0', ']', 'if', 'operator', '.', 'target_opset', '<', '7', ':', 'output_shape', '=', '[', '1', ',', '1', ']', 'else', ':', 'output_shape', '=', '[', 'N', ',', '1', ']', 'if', 'type', '(', 'operator', '.', 'outputs', '[', '0', ']', '.', 'type', ')', 'in', '[', 'Int64Type', ',', 'Int64TensorType', ']', ':', 'operator', '.', 'outputs', '[', '0', ']', '.', 'type', '=', 'Int64TensorType', '(', 'output_shape', ',', 'doc_string', '=', 'operator', '.', 'outputs', '[', '0', ']', '.', 'type', '.', 'doc_string', ')', 'elif', 'type', '(', 'operator', '.', 'outputs', '[', '0', ']', '.', 'type', ')', 'in', '[', 'StringType', ',', 'StringTensorType', ']', ':', 'operator', '.', 'outputs', '[', '0', ']', '.', 'type', '=', 'StringTensorType', '(', 'output_shape', ',', 'doc_string', '=', 'operator', '.', 'outputs', '[', '0', ']', '.', 'type', '.', 'doc_string', ')', 'else', ':', 'raise', 'ValueError', '(', "'Unsupported label type'", ')']
Allowed input/output patterns are 1. [N, C] ---> [N, 1] Note that N must be 1 currently because TensorToProbability doesn't support batch size larger than 1.
['Allowed', 'input', '/', 'output', 'patterns', 'are', '1', '.', '[', 'N', 'C', ']', '---', '>', '[', 'N', '1', ']']
train
https://github.com/onnx/onnxmltools/blob/d4e4c31990fc2d9fd1f92139f497d360914c9df2/onnxmltools/convert/coreml/shape_calculators/TensorToLabel.py#L12-L33
7,005
dddomodossola/remi
remi/gui.py
SvgShape.set_position
def set_position(self, x, y): """Sets the shape position. Args: x (int): the x coordinate y (int): the y coordinate """ self.attributes['x'] = str(x) self.attributes['y'] = str(y)
python
def set_position(self, x, y): """Sets the shape position. Args: x (int): the x coordinate y (int): the y coordinate """ self.attributes['x'] = str(x) self.attributes['y'] = str(y)
['def', 'set_position', '(', 'self', ',', 'x', ',', 'y', ')', ':', 'self', '.', 'attributes', '[', "'x'", ']', '=', 'str', '(', 'x', ')', 'self', '.', 'attributes', '[', "'y'", ']', '=', 'str', '(', 'y', ')']
Sets the shape position. Args: x (int): the x coordinate y (int): the y coordinate
['Sets', 'the', 'shape', 'position', '.']
train
https://github.com/dddomodossola/remi/blob/85206f62220662bb7ecd471042268def71ccad28/remi/gui.py#L3423-L3431
7,006
rfosterslo/wagtailplus
wagtailplus/utils/views/chooser.py
ChooserView.get_form_kwargs
def get_form_kwargs(self): """ Returns the keyword arguments for instantiating the form. :rtype: dict. """ kwargs = { 'initial': self.get_initial(), 'prefix': self.get_prefix(), } #noinspection PyUnresolvedReferences if self.request.method in ('POST', 'PUT'): #noinspection PyUnresolvedReferences kwargs.update({ 'data': self.request.POST, 'files': self.request.FILES, }) if hasattr(self, 'object'): kwargs.update({'instance': self.object}) return kwargs
python
def get_form_kwargs(self): """ Returns the keyword arguments for instantiating the form. :rtype: dict. """ kwargs = { 'initial': self.get_initial(), 'prefix': self.get_prefix(), } #noinspection PyUnresolvedReferences if self.request.method in ('POST', 'PUT'): #noinspection PyUnresolvedReferences kwargs.update({ 'data': self.request.POST, 'files': self.request.FILES, }) if hasattr(self, 'object'): kwargs.update({'instance': self.object}) return kwargs
['def', 'get_form_kwargs', '(', 'self', ')', ':', 'kwargs', '=', '{', "'initial'", ':', 'self', '.', 'get_initial', '(', ')', ',', "'prefix'", ':', 'self', '.', 'get_prefix', '(', ')', ',', '}', '#noinspection PyUnresolvedReferences', 'if', 'self', '.', 'request', '.', 'method', 'in', '(', "'POST'", ',', "'PUT'", ')', ':', '#noinspection PyUnresolvedReferences', 'kwargs', '.', 'update', '(', '{', "'data'", ':', 'self', '.', 'request', '.', 'POST', ',', "'files'", ':', 'self', '.', 'request', '.', 'FILES', ',', '}', ')', 'if', 'hasattr', '(', 'self', ',', "'object'", ')', ':', 'kwargs', '.', 'update', '(', '{', "'instance'", ':', 'self', '.', 'object', '}', ')', 'return', 'kwargs']
Returns the keyword arguments for instantiating the form. :rtype: dict.
['Returns', 'the', 'keyword', 'arguments', 'for', 'instantiating', 'the', 'form', '.']
train
https://github.com/rfosterslo/wagtailplus/blob/22cac857175d8a6f77e470751831c14a92ccd768/wagtailplus/utils/views/chooser.py#L106-L128
7,007
rstoneback/pysat
pysat/_instrument.py
Instrument.download
def download(self, start, stop, freq='D', user=None, password=None, **kwargs): """Download data for given Instrument object from start to stop. Parameters ---------- start : pandas.datetime start date to download data stop : pandas.datetime stop date to download data freq : string Stepsize between dates for season, 'D' for daily, 'M' monthly (see pandas) user : string username, if required by instrument data archive password : string password, if required by instrument data archive **kwargs : dict Dictionary of keywords that may be options for specific instruments Note ---- Data will be downloaded to pysat_data_dir/patform/name/tag If Instrument bounds are set to defaults they are updated after files are downloaded. """ import errno # make sure directories are there, otherwise create them try: os.makedirs(self.files.data_path) except OSError as e: if e.errno != errno.EEXIST: raise print('Downloading data to: ', self.files.data_path) date_array = utils.season_date_range(start, stop, freq=freq) if user is None: self._download_rtn(date_array, tag=self.tag, sat_id=self.sat_id, data_path=self.files.data_path, **kwargs) else: self._download_rtn(date_array, tag=self.tag, sat_id=self.sat_id, data_path=self.files.data_path, user=user, password=password, **kwargs) # get current file date range first_date = self.files.start_date last_date = self.files.stop_date print('Updating pysat file list') self.files.refresh() # if instrument object has default bounds, update them if len(self.bounds[0]) == 1: if(self.bounds[0][0] == first_date and self.bounds[1][0] == last_date): print('Updating instrument object bounds.') self.bounds = None
python
def download(self, start, stop, freq='D', user=None, password=None, **kwargs): """Download data for given Instrument object from start to stop. Parameters ---------- start : pandas.datetime start date to download data stop : pandas.datetime stop date to download data freq : string Stepsize between dates for season, 'D' for daily, 'M' monthly (see pandas) user : string username, if required by instrument data archive password : string password, if required by instrument data archive **kwargs : dict Dictionary of keywords that may be options for specific instruments Note ---- Data will be downloaded to pysat_data_dir/patform/name/tag If Instrument bounds are set to defaults they are updated after files are downloaded. """ import errno # make sure directories are there, otherwise create them try: os.makedirs(self.files.data_path) except OSError as e: if e.errno != errno.EEXIST: raise print('Downloading data to: ', self.files.data_path) date_array = utils.season_date_range(start, stop, freq=freq) if user is None: self._download_rtn(date_array, tag=self.tag, sat_id=self.sat_id, data_path=self.files.data_path, **kwargs) else: self._download_rtn(date_array, tag=self.tag, sat_id=self.sat_id, data_path=self.files.data_path, user=user, password=password, **kwargs) # get current file date range first_date = self.files.start_date last_date = self.files.stop_date print('Updating pysat file list') self.files.refresh() # if instrument object has default bounds, update them if len(self.bounds[0]) == 1: if(self.bounds[0][0] == first_date and self.bounds[1][0] == last_date): print('Updating instrument object bounds.') self.bounds = None
['def', 'download', '(', 'self', ',', 'start', ',', 'stop', ',', 'freq', '=', "'D'", ',', 'user', '=', 'None', ',', 'password', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'import', 'errno', '# make sure directories are there, otherwise create them', 'try', ':', 'os', '.', 'makedirs', '(', 'self', '.', 'files', '.', 'data_path', ')', 'except', 'OSError', 'as', 'e', ':', 'if', 'e', '.', 'errno', '!=', 'errno', '.', 'EEXIST', ':', 'raise', 'print', '(', "'Downloading data to: '", ',', 'self', '.', 'files', '.', 'data_path', ')', 'date_array', '=', 'utils', '.', 'season_date_range', '(', 'start', ',', 'stop', ',', 'freq', '=', 'freq', ')', 'if', 'user', 'is', 'None', ':', 'self', '.', '_download_rtn', '(', 'date_array', ',', 'tag', '=', 'self', '.', 'tag', ',', 'sat_id', '=', 'self', '.', 'sat_id', ',', 'data_path', '=', 'self', '.', 'files', '.', 'data_path', ',', '*', '*', 'kwargs', ')', 'else', ':', 'self', '.', '_download_rtn', '(', 'date_array', ',', 'tag', '=', 'self', '.', 'tag', ',', 'sat_id', '=', 'self', '.', 'sat_id', ',', 'data_path', '=', 'self', '.', 'files', '.', 'data_path', ',', 'user', '=', 'user', ',', 'password', '=', 'password', ',', '*', '*', 'kwargs', ')', '# get current file date range', 'first_date', '=', 'self', '.', 'files', '.', 'start_date', 'last_date', '=', 'self', '.', 'files', '.', 'stop_date', 'print', '(', "'Updating pysat file list'", ')', 'self', '.', 'files', '.', 'refresh', '(', ')', '# if instrument object has default bounds, update them', 'if', 'len', '(', 'self', '.', 'bounds', '[', '0', ']', ')', '==', '1', ':', 'if', '(', 'self', '.', 'bounds', '[', '0', ']', '[', '0', ']', '==', 'first_date', 'and', 'self', '.', 'bounds', '[', '1', ']', '[', '0', ']', '==', 'last_date', ')', ':', 'print', '(', "'Updating instrument object bounds.'", ')', 'self', '.', 'bounds', '=', 'None']
Download data for given Instrument object from start to stop. Parameters ---------- start : pandas.datetime start date to download data stop : pandas.datetime stop date to download data freq : string Stepsize between dates for season, 'D' for daily, 'M' monthly (see pandas) user : string username, if required by instrument data archive password : string password, if required by instrument data archive **kwargs : dict Dictionary of keywords that may be options for specific instruments Note ---- Data will be downloaded to pysat_data_dir/patform/name/tag If Instrument bounds are set to defaults they are updated after files are downloaded.
['Download', 'data', 'for', 'given', 'Instrument', 'object', 'from', 'start', 'to', 'stop', '.', 'Parameters', '----------', 'start', ':', 'pandas', '.', 'datetime', 'start', 'date', 'to', 'download', 'data', 'stop', ':', 'pandas', '.', 'datetime', 'stop', 'date', 'to', 'download', 'data', 'freq', ':', 'string', 'Stepsize', 'between', 'dates', 'for', 'season', 'D', 'for', 'daily', 'M', 'monthly', '(', 'see', 'pandas', ')', 'user', ':', 'string', 'username', 'if', 'required', 'by', 'instrument', 'data', 'archive', 'password', ':', 'string', 'password', 'if', 'required', 'by', 'instrument', 'data', 'archive', '**', 'kwargs', ':', 'dict', 'Dictionary', 'of', 'keywords', 'that', 'may', 'be', 'options', 'for', 'specific', 'instruments', 'Note', '----', 'Data', 'will', 'be', 'downloaded', 'to', 'pysat_data_dir', '/', 'patform', '/', 'name', '/', 'tag', 'If', 'Instrument', 'bounds', 'are', 'set', 'to', 'defaults', 'they', 'are', 'updated', 'after', 'files', 'are', 'downloaded', '.']
train
https://github.com/rstoneback/pysat/blob/4ae1afd80e15e4449397d39dce8c3e969c32c422/pysat/_instrument.py#L918-L980
7,008
argaen/python-google-distance-matrix
google_distance_matrix/core.py
DM.__get_response_element_data
def __get_response_element_data(self, key1, key2): """ For each origin an elements object is created in the ouput. For each destination, an object is created inside elements object. For example, if there are 2 origins and 1 destination, 2 element objects with 1 object each are created. If there are 2 origins and 2 destinations, 2 element objects with 2 objects each are created. """ if not self.dict_response[key1][key2]: l = self.response for i, orig in enumerate(self.origins): self.dict_response[key1][key2][orig] = {} for j, dest in enumerate(self.destinations): if l[i]['elements'][j]['status'] == 'OK': self.dict_response[key1][key2][orig][dest] = l[i]['elements'][j][key1][key2] else: self.dict_response[key1][key2][orig][dest] = l[i]['elements'][j]['status'] return self.dict_response[key1][key2]
python
def __get_response_element_data(self, key1, key2): """ For each origin an elements object is created in the ouput. For each destination, an object is created inside elements object. For example, if there are 2 origins and 1 destination, 2 element objects with 1 object each are created. If there are 2 origins and 2 destinations, 2 element objects with 2 objects each are created. """ if not self.dict_response[key1][key2]: l = self.response for i, orig in enumerate(self.origins): self.dict_response[key1][key2][orig] = {} for j, dest in enumerate(self.destinations): if l[i]['elements'][j]['status'] == 'OK': self.dict_response[key1][key2][orig][dest] = l[i]['elements'][j][key1][key2] else: self.dict_response[key1][key2][orig][dest] = l[i]['elements'][j]['status'] return self.dict_response[key1][key2]
['def', '__get_response_element_data', '(', 'self', ',', 'key1', ',', 'key2', ')', ':', 'if', 'not', 'self', '.', 'dict_response', '[', 'key1', ']', '[', 'key2', ']', ':', 'l', '=', 'self', '.', 'response', 'for', 'i', ',', 'orig', 'in', 'enumerate', '(', 'self', '.', 'origins', ')', ':', 'self', '.', 'dict_response', '[', 'key1', ']', '[', 'key2', ']', '[', 'orig', ']', '=', '{', '}', 'for', 'j', ',', 'dest', 'in', 'enumerate', '(', 'self', '.', 'destinations', ')', ':', 'if', 'l', '[', 'i', ']', '[', "'elements'", ']', '[', 'j', ']', '[', "'status'", ']', '==', "'OK'", ':', 'self', '.', 'dict_response', '[', 'key1', ']', '[', 'key2', ']', '[', 'orig', ']', '[', 'dest', ']', '=', 'l', '[', 'i', ']', '[', "'elements'", ']', '[', 'j', ']', '[', 'key1', ']', '[', 'key2', ']', 'else', ':', 'self', '.', 'dict_response', '[', 'key1', ']', '[', 'key2', ']', '[', 'orig', ']', '[', 'dest', ']', '=', 'l', '[', 'i', ']', '[', "'elements'", ']', '[', 'j', ']', '[', "'status'", ']', 'return', 'self', '.', 'dict_response', '[', 'key1', ']', '[', 'key2', ']']
For each origin an elements object is created in the ouput. For each destination, an object is created inside elements object. For example, if there are 2 origins and 1 destination, 2 element objects with 1 object each are created. If there are 2 origins and 2 destinations, 2 element objects with 2 objects each are created.
['For', 'each', 'origin', 'an', 'elements', 'object', 'is', 'created', 'in', 'the', 'ouput', '.', 'For', 'each', 'destination', 'an', 'object', 'is', 'created', 'inside', 'elements', 'object', '.', 'For', 'example', 'if', 'there', 'are', '2', 'origins', 'and', '1', 'destination', '2', 'element', 'objects', 'with', '1', 'object', 'each', 'are', 'created', '.', 'If', 'there', 'are', '2', 'origins', 'and', '2', 'destinations', '2', 'element', 'objects', 'with', '2', 'objects', 'each', 'are', 'created', '.']
train
https://github.com/argaen/python-google-distance-matrix/blob/20c07bf7d560180ef380b3148616f67f55246a5c/google_distance_matrix/core.py#L69-L86
7,009
klen/graphite-beacon
graphite_beacon/alerts.py
BaseAlert.evaluate_rule
def evaluate_rule(self, rule, value, target): """Calculate the value.""" def evaluate(expr): if expr in LOGICAL_OPERATORS.values(): return expr rvalue = self.get_value_for_expr(expr, target) if rvalue is None: return False # ignore this result return expr['op'](value, rvalue) evaluated = [evaluate(expr) for expr in rule['exprs']] while len(evaluated) > 1: lhs, logical_op, rhs = (evaluated.pop(0) for _ in range(3)) evaluated.insert(0, logical_op(lhs, rhs)) return evaluated[0]
python
def evaluate_rule(self, rule, value, target): """Calculate the value.""" def evaluate(expr): if expr in LOGICAL_OPERATORS.values(): return expr rvalue = self.get_value_for_expr(expr, target) if rvalue is None: return False # ignore this result return expr['op'](value, rvalue) evaluated = [evaluate(expr) for expr in rule['exprs']] while len(evaluated) > 1: lhs, logical_op, rhs = (evaluated.pop(0) for _ in range(3)) evaluated.insert(0, logical_op(lhs, rhs)) return evaluated[0]
['def', 'evaluate_rule', '(', 'self', ',', 'rule', ',', 'value', ',', 'target', ')', ':', 'def', 'evaluate', '(', 'expr', ')', ':', 'if', 'expr', 'in', 'LOGICAL_OPERATORS', '.', 'values', '(', ')', ':', 'return', 'expr', 'rvalue', '=', 'self', '.', 'get_value_for_expr', '(', 'expr', ',', 'target', ')', 'if', 'rvalue', 'is', 'None', ':', 'return', 'False', '# ignore this result', 'return', 'expr', '[', "'op'", ']', '(', 'value', ',', 'rvalue', ')', 'evaluated', '=', '[', 'evaluate', '(', 'expr', ')', 'for', 'expr', 'in', 'rule', '[', "'exprs'", ']', ']', 'while', 'len', '(', 'evaluated', ')', '>', '1', ':', 'lhs', ',', 'logical_op', ',', 'rhs', '=', '(', 'evaluated', '.', 'pop', '(', '0', ')', 'for', '_', 'in', 'range', '(', '3', ')', ')', 'evaluated', '.', 'insert', '(', '0', ',', 'logical_op', '(', 'lhs', ',', 'rhs', ')', ')', 'return', 'evaluated', '[', '0', ']']
Calculate the value.
['Calculate', 'the', 'value', '.']
train
https://github.com/klen/graphite-beacon/blob/c1f071e9f557693bc90f6acbc314994985dc3b77/graphite_beacon/alerts.py#L184-L199
7,010
winstonwolff/expectorant
expectorant/runner.py
find_files
def find_files(args): ''' Return list of spec files. `args` may be filenames which are passed right through, or directories in which case they are searched recursively for *._spec.py ''' files_or_dirs = args or ['.'] filenames = [] for f in files_or_dirs: if path.isdir(f): filenames.extend(glob.glob(path.join(f, '**', '*_spec.py'), recursive=True)) elif path.isfile(f): filenames.append(f) else: raise FileNotFoundError('could not spec file {}'.format(repr(f))) return filenames
python
def find_files(args): ''' Return list of spec files. `args` may be filenames which are passed right through, or directories in which case they are searched recursively for *._spec.py ''' files_or_dirs = args or ['.'] filenames = [] for f in files_or_dirs: if path.isdir(f): filenames.extend(glob.glob(path.join(f, '**', '*_spec.py'), recursive=True)) elif path.isfile(f): filenames.append(f) else: raise FileNotFoundError('could not spec file {}'.format(repr(f))) return filenames
['def', 'find_files', '(', 'args', ')', ':', 'files_or_dirs', '=', 'args', 'or', '[', "'.'", ']', 'filenames', '=', '[', ']', 'for', 'f', 'in', 'files_or_dirs', ':', 'if', 'path', '.', 'isdir', '(', 'f', ')', ':', 'filenames', '.', 'extend', '(', 'glob', '.', 'glob', '(', 'path', '.', 'join', '(', 'f', ',', "'**'", ',', "'*_spec.py'", ')', ',', 'recursive', '=', 'True', ')', ')', 'elif', 'path', '.', 'isfile', '(', 'f', ')', ':', 'filenames', '.', 'append', '(', 'f', ')', 'else', ':', 'raise', 'FileNotFoundError', '(', "'could not spec file {}'", '.', 'format', '(', 'repr', '(', 'f', ')', ')', ')', 'return', 'filenames']
Return list of spec files. `args` may be filenames which are passed right through, or directories in which case they are searched recursively for *._spec.py
['Return', 'list', 'of', 'spec', 'files', '.', 'args', 'may', 'be', 'filenames', 'which', 'are', 'passed', 'right', 'through', 'or', 'directories', 'in', 'which', 'case', 'they', 'are', 'searched', 'recursively', 'for', '*', '.', '_spec', '.', 'py']
train
https://github.com/winstonwolff/expectorant/blob/cb9e4c1656d7c1f4394f0b0fbeb968a833030031/expectorant/runner.py#L17-L32
7,011
leancloud/python-sdk
leancloud/conversation.py
Conversation.send
def send(self, from_client, message, to_clients=None, transient=False, push_data=None): """ 在指定会话中发送消息。 :param from_client: 发送者 id :param message: 消息内容 :param to_clients: 接受者 id,只在系统会话中生效 :param transient: 是否以暂态形式发送消息 :param push_data: 推送消息内容,参考:https://url.leanapp.cn/pushData """ if isinstance(message, dict): message = json.dumps(message) params = { 'from_peer': from_client, 'conv_id': self.id, 'transient': transient, 'message': message, } if to_clients: params['to_peers'] = to_clients if push_data: params['push_data'] = push_data client.post('/rtm/messages', params=params).json()
python
def send(self, from_client, message, to_clients=None, transient=False, push_data=None): """ 在指定会话中发送消息。 :param from_client: 发送者 id :param message: 消息内容 :param to_clients: 接受者 id,只在系统会话中生效 :param transient: 是否以暂态形式发送消息 :param push_data: 推送消息内容,参考:https://url.leanapp.cn/pushData """ if isinstance(message, dict): message = json.dumps(message) params = { 'from_peer': from_client, 'conv_id': self.id, 'transient': transient, 'message': message, } if to_clients: params['to_peers'] = to_clients if push_data: params['push_data'] = push_data client.post('/rtm/messages', params=params).json()
['def', 'send', '(', 'self', ',', 'from_client', ',', 'message', ',', 'to_clients', '=', 'None', ',', 'transient', '=', 'False', ',', 'push_data', '=', 'None', ')', ':', 'if', 'isinstance', '(', 'message', ',', 'dict', ')', ':', 'message', '=', 'json', '.', 'dumps', '(', 'message', ')', 'params', '=', '{', "'from_peer'", ':', 'from_client', ',', "'conv_id'", ':', 'self', '.', 'id', ',', "'transient'", ':', 'transient', ',', "'message'", ':', 'message', ',', '}', 'if', 'to_clients', ':', 'params', '[', "'to_peers'", ']', '=', 'to_clients', 'if', 'push_data', ':', 'params', '[', "'push_data'", ']', '=', 'push_data', 'client', '.', 'post', '(', "'/rtm/messages'", ',', 'params', '=', 'params', ')', '.', 'json', '(', ')']
在指定会话中发送消息。 :param from_client: 发送者 id :param message: 消息内容 :param to_clients: 接受者 id,只在系统会话中生效 :param transient: 是否以暂态形式发送消息 :param push_data: 推送消息内容,参考:https://url.leanapp.cn/pushData
['在指定会话中发送消息。']
train
https://github.com/leancloud/python-sdk/blob/fea3240257ce65e6a32c7312a5cee1f94a51a587/leancloud/conversation.py#L91-L113
7,012
dropbox/stone
example/backend/ex1/ex1.stoneg.py
ExampleBackend.generate
def generate(self, api): """Generates a file that lists each namespace.""" with self.output_to_relative_path('ex1.out'): for namespace in api.namespaces.values(): self.emit(namespace.name)
python
def generate(self, api): """Generates a file that lists each namespace.""" with self.output_to_relative_path('ex1.out'): for namespace in api.namespaces.values(): self.emit(namespace.name)
['def', 'generate', '(', 'self', ',', 'api', ')', ':', 'with', 'self', '.', 'output_to_relative_path', '(', "'ex1.out'", ')', ':', 'for', 'namespace', 'in', 'api', '.', 'namespaces', '.', 'values', '(', ')', ':', 'self', '.', 'emit', '(', 'namespace', '.', 'name', ')']
Generates a file that lists each namespace.
['Generates', 'a', 'file', 'that', 'lists', 'each', 'namespace', '.']
train
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/example/backend/ex1/ex1.stoneg.py#L5-L9
7,013
python-cas/python-cas
cas.py
CASClientV1.verify_ticket
def verify_ticket(self, ticket): """Verifies CAS 1.0 authentication ticket. Returns username on success and None on failure. """ params = [('ticket', ticket), ('service', self.service_url)] url = (urllib_parse.urljoin(self.server_url, 'validate') + '?' + urllib_parse.urlencode(params)) page = requests.get( url, stream=True, verify=self.verify_ssl_certificate ) try: page_iterator = page.iter_lines(chunk_size=8192) verified = next(page_iterator).strip() if verified == 'yes': return next(page_iterator).strip(), None, None else: return None, None, None finally: page.close()
python
def verify_ticket(self, ticket): """Verifies CAS 1.0 authentication ticket. Returns username on success and None on failure. """ params = [('ticket', ticket), ('service', self.service_url)] url = (urllib_parse.urljoin(self.server_url, 'validate') + '?' + urllib_parse.urlencode(params)) page = requests.get( url, stream=True, verify=self.verify_ssl_certificate ) try: page_iterator = page.iter_lines(chunk_size=8192) verified = next(page_iterator).strip() if verified == 'yes': return next(page_iterator).strip(), None, None else: return None, None, None finally: page.close()
['def', 'verify_ticket', '(', 'self', ',', 'ticket', ')', ':', 'params', '=', '[', '(', "'ticket'", ',', 'ticket', ')', ',', '(', "'service'", ',', 'self', '.', 'service_url', ')', ']', 'url', '=', '(', 'urllib_parse', '.', 'urljoin', '(', 'self', '.', 'server_url', ',', "'validate'", ')', '+', "'?'", '+', 'urllib_parse', '.', 'urlencode', '(', 'params', ')', ')', 'page', '=', 'requests', '.', 'get', '(', 'url', ',', 'stream', '=', 'True', ',', 'verify', '=', 'self', '.', 'verify_ssl_certificate', ')', 'try', ':', 'page_iterator', '=', 'page', '.', 'iter_lines', '(', 'chunk_size', '=', '8192', ')', 'verified', '=', 'next', '(', 'page_iterator', ')', '.', 'strip', '(', ')', 'if', 'verified', '==', "'yes'", ':', 'return', 'next', '(', 'page_iterator', ')', '.', 'strip', '(', ')', ',', 'None', ',', 'None', 'else', ':', 'return', 'None', ',', 'None', ',', 'None', 'finally', ':', 'page', '.', 'close', '(', ')']
Verifies CAS 1.0 authentication ticket. Returns username on success and None on failure.
['Verifies', 'CAS', '1', '.', '0', 'authentication', 'ticket', '.']
train
https://github.com/python-cas/python-cas/blob/42fc76fbd2e50f167e752eba4bf5b0df74a83978/cas.py#L128-L149
7,014
sendgrid/sendgrid-python
sendgrid/helpers/mail/section.py
Section.get
def get(self): """ Get a JSON-ready representation of this Section. :returns: This Section, ready for use in a request body. :rtype: dict """ section = {} if self.key is not None and self.value is not None: section[self.key] = self.value return section
python
def get(self): """ Get a JSON-ready representation of this Section. :returns: This Section, ready for use in a request body. :rtype: dict """ section = {} if self.key is not None and self.value is not None: section[self.key] = self.value return section
['def', 'get', '(', 'self', ')', ':', 'section', '=', '{', '}', 'if', 'self', '.', 'key', 'is', 'not', 'None', 'and', 'self', '.', 'value', 'is', 'not', 'None', ':', 'section', '[', 'self', '.', 'key', ']', '=', 'self', '.', 'value', 'return', 'section']
Get a JSON-ready representation of this Section. :returns: This Section, ready for use in a request body. :rtype: dict
['Get', 'a', 'JSON', '-', 'ready', 'representation', 'of', 'this', 'Section', '.']
train
https://github.com/sendgrid/sendgrid-python/blob/266c2abde7a35dfcce263e06bedc6a0bbdebeac9/sendgrid/helpers/mail/section.py#L54-L64
7,015
tensorflow/mesh
mesh_tensorflow/ops.py
cwise
def cwise(tf_fn, xs, output_dtype=None, grad_function=None, name=None): """Component-wise operation with no broadcasting. Args: tf_fn: a component-wise function taking n tf.Tensor inputs and producing a tf.Tensor output xs: n Tensors output_dtype: an optional dtype grad_function: an optional python function name: an optional string Returns: a Tensor """ return slicewise( tf_fn, xs, output_dtype=output_dtype, splittable_dims=xs[0].shape.dims, grad_function=grad_function, name=name or "cwise")
python
def cwise(tf_fn, xs, output_dtype=None, grad_function=None, name=None): """Component-wise operation with no broadcasting. Args: tf_fn: a component-wise function taking n tf.Tensor inputs and producing a tf.Tensor output xs: n Tensors output_dtype: an optional dtype grad_function: an optional python function name: an optional string Returns: a Tensor """ return slicewise( tf_fn, xs, output_dtype=output_dtype, splittable_dims=xs[0].shape.dims, grad_function=grad_function, name=name or "cwise")
['def', 'cwise', '(', 'tf_fn', ',', 'xs', ',', 'output_dtype', '=', 'None', ',', 'grad_function', '=', 'None', ',', 'name', '=', 'None', ')', ':', 'return', 'slicewise', '(', 'tf_fn', ',', 'xs', ',', 'output_dtype', '=', 'output_dtype', ',', 'splittable_dims', '=', 'xs', '[', '0', ']', '.', 'shape', '.', 'dims', ',', 'grad_function', '=', 'grad_function', ',', 'name', '=', 'name', 'or', '"cwise"', ')']
Component-wise operation with no broadcasting. Args: tf_fn: a component-wise function taking n tf.Tensor inputs and producing a tf.Tensor output xs: n Tensors output_dtype: an optional dtype grad_function: an optional python function name: an optional string Returns: a Tensor
['Component', '-', 'wise', 'operation', 'with', 'no', 'broadcasting', '.']
train
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L1608-L1624
7,016
openstack/proliantutils
proliantutils/redfish/resources/update_service.py
HPEUpdateService.wait_for_redfish_firmware_update_to_complete
def wait_for_redfish_firmware_update_to_complete(self, redfish_object): """Continuously polls for iLO firmware update to complete. :param redfish_object: redfish instance """ p_state = ['Idle'] c_state = ['Idle'] def has_firmware_flash_completed(): """Checks for completion status of firmware update operation The below table shows the conditions for which the firmware update will be considered as DONE (be it success or error):: +-----------------------------------+-----------------------------+ | Previous state | Current state | +===================================+=============================+ | Idle | Error, Complete | +-----------------------------------+-----------------------------+ | Updating, Verifying, | Complete, Error, | | Uploading, Writing | Unknown, Idle | +-----------------------------------+-----------------------------+ :returns: True upon firmware update completion otherwise False """ curr_state, curr_percent = self.get_firmware_update_progress() p_state[0] = c_state[0] c_state[0] = curr_state if (((p_state[0] in ['Updating', 'Verifying', 'Uploading', 'Writing']) and (c_state[0] in ['Complete', 'Error', 'Unknown', 'Idle'])) or (p_state[0] == 'Idle' and (c_state[0] in ['Complete', 'Error']))): return True return False common.wait_for_operation_to_complete( has_firmware_flash_completed, delay_bw_retries=30, failover_msg='iLO firmware update has failed.' ) common.wait_for_ilo_after_reset(redfish_object)
python
def wait_for_redfish_firmware_update_to_complete(self, redfish_object): """Continuously polls for iLO firmware update to complete. :param redfish_object: redfish instance """ p_state = ['Idle'] c_state = ['Idle'] def has_firmware_flash_completed(): """Checks for completion status of firmware update operation The below table shows the conditions for which the firmware update will be considered as DONE (be it success or error):: +-----------------------------------+-----------------------------+ | Previous state | Current state | +===================================+=============================+ | Idle | Error, Complete | +-----------------------------------+-----------------------------+ | Updating, Verifying, | Complete, Error, | | Uploading, Writing | Unknown, Idle | +-----------------------------------+-----------------------------+ :returns: True upon firmware update completion otherwise False """ curr_state, curr_percent = self.get_firmware_update_progress() p_state[0] = c_state[0] c_state[0] = curr_state if (((p_state[0] in ['Updating', 'Verifying', 'Uploading', 'Writing']) and (c_state[0] in ['Complete', 'Error', 'Unknown', 'Idle'])) or (p_state[0] == 'Idle' and (c_state[0] in ['Complete', 'Error']))): return True return False common.wait_for_operation_to_complete( has_firmware_flash_completed, delay_bw_retries=30, failover_msg='iLO firmware update has failed.' ) common.wait_for_ilo_after_reset(redfish_object)
['def', 'wait_for_redfish_firmware_update_to_complete', '(', 'self', ',', 'redfish_object', ')', ':', 'p_state', '=', '[', "'Idle'", ']', 'c_state', '=', '[', "'Idle'", ']', 'def', 'has_firmware_flash_completed', '(', ')', ':', '"""Checks for completion status of firmware update operation\n\n The below table shows the conditions for which the firmware update\n will be considered as DONE (be it success or error)::\n\n +-----------------------------------+-----------------------------+\n | Previous state | Current state |\n +===================================+=============================+\n | Idle | Error, Complete |\n +-----------------------------------+-----------------------------+\n | Updating, Verifying, | Complete, Error, |\n | Uploading, Writing | Unknown, Idle |\n +-----------------------------------+-----------------------------+\n\n :returns: True upon firmware update completion otherwise False\n """', 'curr_state', ',', 'curr_percent', '=', 'self', '.', 'get_firmware_update_progress', '(', ')', 'p_state', '[', '0', ']', '=', 'c_state', '[', '0', ']', 'c_state', '[', '0', ']', '=', 'curr_state', 'if', '(', '(', '(', 'p_state', '[', '0', ']', 'in', '[', "'Updating'", ',', "'Verifying'", ',', "'Uploading'", ',', "'Writing'", ']', ')', 'and', '(', 'c_state', '[', '0', ']', 'in', '[', "'Complete'", ',', "'Error'", ',', "'Unknown'", ',', "'Idle'", ']', ')', ')', 'or', '(', 'p_state', '[', '0', ']', '==', "'Idle'", 'and', '(', 'c_state', '[', '0', ']', 'in', '[', "'Complete'", ',', "'Error'", ']', ')', ')', ')', ':', 'return', 'True', 'return', 'False', 'common', '.', 'wait_for_operation_to_complete', '(', 'has_firmware_flash_completed', ',', 'delay_bw_retries', '=', '30', ',', 'failover_msg', '=', "'iLO firmware update has failed.'", ')', 'common', '.', 'wait_for_ilo_after_reset', '(', 'redfish_object', ')']
Continuously polls for iLO firmware update to complete. :param redfish_object: redfish instance
['Continuously', 'polls', 'for', 'iLO', 'firmware', 'update', 'to', 'complete', '.']
train
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/update_service.py#L97-L139
7,017
Cue/scales
src/greplin/scales/__init__.py
StateTimeStatDict.incr
def incr(self, item, value): """Increment a key by the given amount.""" if item in self: old = UserDict.__getitem__(self, item) else: old = 0.0 self[item] = old + value
python
def incr(self, item, value): """Increment a key by the given amount.""" if item in self: old = UserDict.__getitem__(self, item) else: old = 0.0 self[item] = old + value
['def', 'incr', '(', 'self', ',', 'item', ',', 'value', ')', ':', 'if', 'item', 'in', 'self', ':', 'old', '=', 'UserDict', '.', '__getitem__', '(', 'self', ',', 'item', ')', 'else', ':', 'old', '=', '0.0', 'self', '[', 'item', ']', '=', 'old', '+', 'value']
Increment a key by the given amount.
['Increment', 'a', 'key', 'by', 'the', 'given', 'amount', '.']
train
https://github.com/Cue/scales/blob/0aced26eb050ceb98ee9d5d6cdca8db448666986/src/greplin/scales/__init__.py#L617-L623
7,018
MIT-LCP/wfdb-python
wfdb/io/record.py
BaseRecord.check_field
def check_field(self, field, required_channels='all'): """ Check whether a single field is valid in its basic form. Does not check compatibility with other fields. Parameters ---------- field : str The field name required_channels : list, optional Used for signal specification fields. All channels are checked for their integrity if present, but channels that do not lie in this field may be None. Notes ----- This function is called from wrheader to check fields before writing. It is also supposed to be usable at any point to check a specific field. """ item = getattr(self, field) if item is None: raise Exception('Missing field required: %s' % field) # We should have a list specifying these automatically. # Whether the item should be a list. Watch out for required_channels for `segments` expect_list = True if field in LIST_FIELDS else False # Check the type of the field (and of its elements if it should # be a list) _check_item_type(item, field_name=field, allowed_types=ALLOWED_TYPES[field], expect_list=expect_list, required_channels=required_channels) # Individual specific field checks if field in ['d_signal', 'p_signal']: check_np_array(item=item, field_name=field, ndim=2, parent_class=(lambda f: np.integer if f == 'd_signal' else np.floating)(field)) elif field in ['e_d_signal', 'e_p_signal']: for ch in range(len(item)): check_np_array(item=item[ch], field_name=field, ndim=1, parent_class=(lambda f: np.integer if f == 'e_d_signal' else np.floating)(field), channel_num=ch) # Record specification fields elif field == 'record_name': # Allow letters, digits, hyphens, and underscores. accepted_string = re.match('[-\w]+', self.record_name) if not accepted_string or accepted_string.string != self.record_name: raise ValueError('record_name must only comprise of letters, digits, hyphens, and underscores.') elif field == 'n_seg': if self.n_seg <= 0: raise ValueError('n_seg must be a positive integer') elif field == 'n_sig': if self.n_sig <= 0: raise ValueError('n_sig must be a positive integer') elif field == 'fs': if self.fs <= 0: raise ValueError('fs must be a positive number') elif field == 'counter_freq': if self.counter_freq <= 0: raise ValueError('counter_freq must be a positive number') elif field == 'base_counter': if self.base_counter <= 0: raise ValueError('base_counter must be a positive number') elif field == 'sig_len': if self.sig_len < 0: raise ValueError('sig_len must be a non-negative integer') # Signal specification fields elif field in _header.SIGNAL_SPECS.index: if required_channels == 'all': required_channels = range(len(item)) for ch in range(len(item)): # If the element is allowed to be None if ch not in required_channels: if item[ch] is None: continue if field == 'file_name': # Check for file_name characters accepted_string = re.match('[-\w]+\.?[\w]+', item[ch]) if not accepted_string or accepted_string.string != item[ch]: raise ValueError('File names should only contain alphanumerics, hyphens, and an extension. eg. record-100.dat') # Check that dat files are grouped together if not is_monotonic(self.file_name): raise ValueError('Signals in a record that share a given file must be consecutive.') elif field == 'fmt': if item[ch] not in _signal.DAT_FMTS: raise ValueError('File formats must be valid WFDB dat formats:', _signal.DAT_FMTS) elif field == 'samps_per_frame': if item[ch] < 1: raise ValueError('samps_per_frame values must be positive integers') elif field == 'skew': if item[ch] < 0: raise ValueError('skew values must be non-negative integers') elif field == 'byte_offset': if item[ch] < 0: raise ValueError('byte_offset values must be non-negative integers') elif field == 'adc_gain': if item[ch] <= 0: raise ValueError('adc_gain values must be positive') elif field == 'baseline': # Original WFDB library 10.5.24 only has 4 bytes for # baseline. if item[ch] < -2147483648 or item[ch] > 2147483648: raise ValueError('baseline values must be between -2147483648 (-2^31) and 2147483647 (2^31 -1)') elif field == 'units': if re.search('\s', item[ch]): raise ValueError('units strings may not contain whitespaces.') elif field == 'adc_res': if item[ch] < 0: raise ValueError('adc_res values must be non-negative integers') elif field == 'block_size': if item[ch] < 0: raise ValueError('block_size values must be non-negative integers') elif field == 'sig_name': if re.search('\s', item[ch]): raise ValueError('sig_name strings may not contain whitespaces.') if len(set(item)) != len(item): raise ValueError('sig_name strings must be unique.') # Segment specification fields and comments elif field in _header.SEGMENT_SPECS.index: for ch in range(len(item)): if field == 'seg_name': # Segment names must be alphanumerics or just a # single '~' if item[ch] == '~': continue accepted_string = re.match('[-\w]+', item[ch]) if not accepted_string or accepted_string.string != item[ch]: raise ValueError("Non-null segment names may only contain alphanumerics and dashes. Null segment names must be set to '~'") elif field == 'seg_len': # For records with more than 1 segment, the first # segment may be the layout specification segment # with a length of 0 min_len = 0 if ch == 0 else 1 if item[ch] < min_len: raise ValueError('seg_len values must be positive integers. Only seg_len[0] may be 0 to indicate a layout segment') # Comment field elif field == 'comments': if item[ch].startswith('#'): print("Note: comment strings do not need to begin with '#'. This library adds them automatically.") if re.search('[\t\n\r\f\v]', item[ch]): raise ValueError('comments may not contain tabs or newlines (they may contain spaces and underscores).')
python
def check_field(self, field, required_channels='all'): """ Check whether a single field is valid in its basic form. Does not check compatibility with other fields. Parameters ---------- field : str The field name required_channels : list, optional Used for signal specification fields. All channels are checked for their integrity if present, but channels that do not lie in this field may be None. Notes ----- This function is called from wrheader to check fields before writing. It is also supposed to be usable at any point to check a specific field. """ item = getattr(self, field) if item is None: raise Exception('Missing field required: %s' % field) # We should have a list specifying these automatically. # Whether the item should be a list. Watch out for required_channels for `segments` expect_list = True if field in LIST_FIELDS else False # Check the type of the field (and of its elements if it should # be a list) _check_item_type(item, field_name=field, allowed_types=ALLOWED_TYPES[field], expect_list=expect_list, required_channels=required_channels) # Individual specific field checks if field in ['d_signal', 'p_signal']: check_np_array(item=item, field_name=field, ndim=2, parent_class=(lambda f: np.integer if f == 'd_signal' else np.floating)(field)) elif field in ['e_d_signal', 'e_p_signal']: for ch in range(len(item)): check_np_array(item=item[ch], field_name=field, ndim=1, parent_class=(lambda f: np.integer if f == 'e_d_signal' else np.floating)(field), channel_num=ch) # Record specification fields elif field == 'record_name': # Allow letters, digits, hyphens, and underscores. accepted_string = re.match('[-\w]+', self.record_name) if not accepted_string or accepted_string.string != self.record_name: raise ValueError('record_name must only comprise of letters, digits, hyphens, and underscores.') elif field == 'n_seg': if self.n_seg <= 0: raise ValueError('n_seg must be a positive integer') elif field == 'n_sig': if self.n_sig <= 0: raise ValueError('n_sig must be a positive integer') elif field == 'fs': if self.fs <= 0: raise ValueError('fs must be a positive number') elif field == 'counter_freq': if self.counter_freq <= 0: raise ValueError('counter_freq must be a positive number') elif field == 'base_counter': if self.base_counter <= 0: raise ValueError('base_counter must be a positive number') elif field == 'sig_len': if self.sig_len < 0: raise ValueError('sig_len must be a non-negative integer') # Signal specification fields elif field in _header.SIGNAL_SPECS.index: if required_channels == 'all': required_channels = range(len(item)) for ch in range(len(item)): # If the element is allowed to be None if ch not in required_channels: if item[ch] is None: continue if field == 'file_name': # Check for file_name characters accepted_string = re.match('[-\w]+\.?[\w]+', item[ch]) if not accepted_string or accepted_string.string != item[ch]: raise ValueError('File names should only contain alphanumerics, hyphens, and an extension. eg. record-100.dat') # Check that dat files are grouped together if not is_monotonic(self.file_name): raise ValueError('Signals in a record that share a given file must be consecutive.') elif field == 'fmt': if item[ch] not in _signal.DAT_FMTS: raise ValueError('File formats must be valid WFDB dat formats:', _signal.DAT_FMTS) elif field == 'samps_per_frame': if item[ch] < 1: raise ValueError('samps_per_frame values must be positive integers') elif field == 'skew': if item[ch] < 0: raise ValueError('skew values must be non-negative integers') elif field == 'byte_offset': if item[ch] < 0: raise ValueError('byte_offset values must be non-negative integers') elif field == 'adc_gain': if item[ch] <= 0: raise ValueError('adc_gain values must be positive') elif field == 'baseline': # Original WFDB library 10.5.24 only has 4 bytes for # baseline. if item[ch] < -2147483648 or item[ch] > 2147483648: raise ValueError('baseline values must be between -2147483648 (-2^31) and 2147483647 (2^31 -1)') elif field == 'units': if re.search('\s', item[ch]): raise ValueError('units strings may not contain whitespaces.') elif field == 'adc_res': if item[ch] < 0: raise ValueError('adc_res values must be non-negative integers') elif field == 'block_size': if item[ch] < 0: raise ValueError('block_size values must be non-negative integers') elif field == 'sig_name': if re.search('\s', item[ch]): raise ValueError('sig_name strings may not contain whitespaces.') if len(set(item)) != len(item): raise ValueError('sig_name strings must be unique.') # Segment specification fields and comments elif field in _header.SEGMENT_SPECS.index: for ch in range(len(item)): if field == 'seg_name': # Segment names must be alphanumerics or just a # single '~' if item[ch] == '~': continue accepted_string = re.match('[-\w]+', item[ch]) if not accepted_string or accepted_string.string != item[ch]: raise ValueError("Non-null segment names may only contain alphanumerics and dashes. Null segment names must be set to '~'") elif field == 'seg_len': # For records with more than 1 segment, the first # segment may be the layout specification segment # with a length of 0 min_len = 0 if ch == 0 else 1 if item[ch] < min_len: raise ValueError('seg_len values must be positive integers. Only seg_len[0] may be 0 to indicate a layout segment') # Comment field elif field == 'comments': if item[ch].startswith('#'): print("Note: comment strings do not need to begin with '#'. This library adds them automatically.") if re.search('[\t\n\r\f\v]', item[ch]): raise ValueError('comments may not contain tabs or newlines (they may contain spaces and underscores).')
['def', 'check_field', '(', 'self', ',', 'field', ',', 'required_channels', '=', "'all'", ')', ':', 'item', '=', 'getattr', '(', 'self', ',', 'field', ')', 'if', 'item', 'is', 'None', ':', 'raise', 'Exception', '(', "'Missing field required: %s'", '%', 'field', ')', '# We should have a list specifying these automatically.', '# Whether the item should be a list. Watch out for required_channels for `segments`', 'expect_list', '=', 'True', 'if', 'field', 'in', 'LIST_FIELDS', 'else', 'False', '# Check the type of the field (and of its elements if it should', '# be a list)', '_check_item_type', '(', 'item', ',', 'field_name', '=', 'field', ',', 'allowed_types', '=', 'ALLOWED_TYPES', '[', 'field', ']', ',', 'expect_list', '=', 'expect_list', ',', 'required_channels', '=', 'required_channels', ')', '# Individual specific field checks', 'if', 'field', 'in', '[', "'d_signal'", ',', "'p_signal'", ']', ':', 'check_np_array', '(', 'item', '=', 'item', ',', 'field_name', '=', 'field', ',', 'ndim', '=', '2', ',', 'parent_class', '=', '(', 'lambda', 'f', ':', 'np', '.', 'integer', 'if', 'f', '==', "'d_signal'", 'else', 'np', '.', 'floating', ')', '(', 'field', ')', ')', 'elif', 'field', 'in', '[', "'e_d_signal'", ',', "'e_p_signal'", ']', ':', 'for', 'ch', 'in', 'range', '(', 'len', '(', 'item', ')', ')', ':', 'check_np_array', '(', 'item', '=', 'item', '[', 'ch', ']', ',', 'field_name', '=', 'field', ',', 'ndim', '=', '1', ',', 'parent_class', '=', '(', 'lambda', 'f', ':', 'np', '.', 'integer', 'if', 'f', '==', "'e_d_signal'", 'else', 'np', '.', 'floating', ')', '(', 'field', ')', ',', 'channel_num', '=', 'ch', ')', '# Record specification fields', 'elif', 'field', '==', "'record_name'", ':', '# Allow letters, digits, hyphens, and underscores.', 'accepted_string', '=', 're', '.', 'match', '(', "'[-\\w]+'", ',', 'self', '.', 'record_name', ')', 'if', 'not', 'accepted_string', 'or', 'accepted_string', '.', 'string', '!=', 'self', '.', 'record_name', ':', 'raise', 'ValueError', '(', "'record_name must only comprise of letters, digits, hyphens, and underscores.'", ')', 'elif', 'field', '==', "'n_seg'", ':', 'if', 'self', '.', 'n_seg', '<=', '0', ':', 'raise', 'ValueError', '(', "'n_seg must be a positive integer'", ')', 'elif', 'field', '==', "'n_sig'", ':', 'if', 'self', '.', 'n_sig', '<=', '0', ':', 'raise', 'ValueError', '(', "'n_sig must be a positive integer'", ')', 'elif', 'field', '==', "'fs'", ':', 'if', 'self', '.', 'fs', '<=', '0', ':', 'raise', 'ValueError', '(', "'fs must be a positive number'", ')', 'elif', 'field', '==', "'counter_freq'", ':', 'if', 'self', '.', 'counter_freq', '<=', '0', ':', 'raise', 'ValueError', '(', "'counter_freq must be a positive number'", ')', 'elif', 'field', '==', "'base_counter'", ':', 'if', 'self', '.', 'base_counter', '<=', '0', ':', 'raise', 'ValueError', '(', "'base_counter must be a positive number'", ')', 'elif', 'field', '==', "'sig_len'", ':', 'if', 'self', '.', 'sig_len', '<', '0', ':', 'raise', 'ValueError', '(', "'sig_len must be a non-negative integer'", ')', '# Signal specification fields', 'elif', 'field', 'in', '_header', '.', 'SIGNAL_SPECS', '.', 'index', ':', 'if', 'required_channels', '==', "'all'", ':', 'required_channels', '=', 'range', '(', 'len', '(', 'item', ')', ')', 'for', 'ch', 'in', 'range', '(', 'len', '(', 'item', ')', ')', ':', '# If the element is allowed to be None', 'if', 'ch', 'not', 'in', 'required_channels', ':', 'if', 'item', '[', 'ch', ']', 'is', 'None', ':', 'continue', 'if', 'field', '==', "'file_name'", ':', '# Check for file_name characters', 'accepted_string', '=', 're', '.', 'match', '(', "'[-\\w]+\\.?[\\w]+'", ',', 'item', '[', 'ch', ']', ')', 'if', 'not', 'accepted_string', 'or', 'accepted_string', '.', 'string', '!=', 'item', '[', 'ch', ']', ':', 'raise', 'ValueError', '(', "'File names should only contain alphanumerics, hyphens, and an extension. eg. record-100.dat'", ')', '# Check that dat files are grouped together', 'if', 'not', 'is_monotonic', '(', 'self', '.', 'file_name', ')', ':', 'raise', 'ValueError', '(', "'Signals in a record that share a given file must be consecutive.'", ')', 'elif', 'field', '==', "'fmt'", ':', 'if', 'item', '[', 'ch', ']', 'not', 'in', '_signal', '.', 'DAT_FMTS', ':', 'raise', 'ValueError', '(', "'File formats must be valid WFDB dat formats:'", ',', '_signal', '.', 'DAT_FMTS', ')', 'elif', 'field', '==', "'samps_per_frame'", ':', 'if', 'item', '[', 'ch', ']', '<', '1', ':', 'raise', 'ValueError', '(', "'samps_per_frame values must be positive integers'", ')', 'elif', 'field', '==', "'skew'", ':', 'if', 'item', '[', 'ch', ']', '<', '0', ':', 'raise', 'ValueError', '(', "'skew values must be non-negative integers'", ')', 'elif', 'field', '==', "'byte_offset'", ':', 'if', 'item', '[', 'ch', ']', '<', '0', ':', 'raise', 'ValueError', '(', "'byte_offset values must be non-negative integers'", ')', 'elif', 'field', '==', "'adc_gain'", ':', 'if', 'item', '[', 'ch', ']', '<=', '0', ':', 'raise', 'ValueError', '(', "'adc_gain values must be positive'", ')', 'elif', 'field', '==', "'baseline'", ':', '# Original WFDB library 10.5.24 only has 4 bytes for', '# baseline.', 'if', 'item', '[', 'ch', ']', '<', '-', '2147483648', 'or', 'item', '[', 'ch', ']', '>', '2147483648', ':', 'raise', 'ValueError', '(', "'baseline values must be between -2147483648 (-2^31) and 2147483647 (2^31 -1)'", ')', 'elif', 'field', '==', "'units'", ':', 'if', 're', '.', 'search', '(', "'\\s'", ',', 'item', '[', 'ch', ']', ')', ':', 'raise', 'ValueError', '(', "'units strings may not contain whitespaces.'", ')', 'elif', 'field', '==', "'adc_res'", ':', 'if', 'item', '[', 'ch', ']', '<', '0', ':', 'raise', 'ValueError', '(', "'adc_res values must be non-negative integers'", ')', 'elif', 'field', '==', "'block_size'", ':', 'if', 'item', '[', 'ch', ']', '<', '0', ':', 'raise', 'ValueError', '(', "'block_size values must be non-negative integers'", ')', 'elif', 'field', '==', "'sig_name'", ':', 'if', 're', '.', 'search', '(', "'\\s'", ',', 'item', '[', 'ch', ']', ')', ':', 'raise', 'ValueError', '(', "'sig_name strings may not contain whitespaces.'", ')', 'if', 'len', '(', 'set', '(', 'item', ')', ')', '!=', 'len', '(', 'item', ')', ':', 'raise', 'ValueError', '(', "'sig_name strings must be unique.'", ')', '# Segment specification fields and comments', 'elif', 'field', 'in', '_header', '.', 'SEGMENT_SPECS', '.', 'index', ':', 'for', 'ch', 'in', 'range', '(', 'len', '(', 'item', ')', ')', ':', 'if', 'field', '==', "'seg_name'", ':', '# Segment names must be alphanumerics or just a', "# single '~'", 'if', 'item', '[', 'ch', ']', '==', "'~'", ':', 'continue', 'accepted_string', '=', 're', '.', 'match', '(', "'[-\\w]+'", ',', 'item', '[', 'ch', ']', ')', 'if', 'not', 'accepted_string', 'or', 'accepted_string', '.', 'string', '!=', 'item', '[', 'ch', ']', ':', 'raise', 'ValueError', '(', '"Non-null segment names may only contain alphanumerics and dashes. Null segment names must be set to \'~\'"', ')', 'elif', 'field', '==', "'seg_len'", ':', '# For records with more than 1 segment, the first', '# segment may be the layout specification segment', '# with a length of 0', 'min_len', '=', '0', 'if', 'ch', '==', '0', 'else', '1', 'if', 'item', '[', 'ch', ']', '<', 'min_len', ':', 'raise', 'ValueError', '(', "'seg_len values must be positive integers. Only seg_len[0] may be 0 to indicate a layout segment'", ')', '# Comment field', 'elif', 'field', '==', "'comments'", ':', 'if', 'item', '[', 'ch', ']', '.', 'startswith', '(', "'#'", ')', ':', 'print', '(', '"Note: comment strings do not need to begin with \'#\'. This library adds them automatically."', ')', 'if', 're', '.', 'search', '(', "'[\\t\\n\\r\\f\\v]'", ',', 'item', '[', 'ch', ']', ')', ':', 'raise', 'ValueError', '(', "'comments may not contain tabs or newlines (they may contain spaces and underscores).'", ')']
Check whether a single field is valid in its basic form. Does not check compatibility with other fields. Parameters ---------- field : str The field name required_channels : list, optional Used for signal specification fields. All channels are checked for their integrity if present, but channels that do not lie in this field may be None. Notes ----- This function is called from wrheader to check fields before writing. It is also supposed to be usable at any point to check a specific field.
['Check', 'whether', 'a', 'single', 'field', 'is', 'valid', 'in', 'its', 'basic', 'form', '.', 'Does', 'not', 'check', 'compatibility', 'with', 'other', 'fields', '.']
train
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/record.py#L35-L186
7,019
openstack/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py
FabricBase._create_os_nwk
def _create_os_nwk(self, tenant_id, tenant_name, direc, is_fw_virt=False): """Function to create Openstack network. This function does the following: 1. Allocate an IP address with the net_id/subnet_id not filled in the DB. 2. Fill network parameters w/o vlan, segmentation_id, because we don't have net_id to store in DB. 3. Create a Openstack network, using the network parameters created in the previous step. At this point we will have a net_id. 4. Allocate segmentation_id, vlan and along with net_id store it in the DB. 5. Update IP DB with net_id created in step 3. So, after restart deallocate any IP DB entries that does not have a net_id/subnet_id. """ subnet = self.alloc_retrieve_subnet_info(tenant_id, direc) network = self.retrieve_network_info(tenant_id, direc) net_id, subnet_id = self.create_openstack_network(subnet, network, tenant_id, tenant_name, direc) if not net_id or not subnet_id: return net_id, subnet_id self.allocate_seg_vlan(net_id, is_fw_virt, direc, tenant_id) self.update_subnet_db_info(tenant_id, direc, net_id, subnet_id) return net_id, subnet_id
python
def _create_os_nwk(self, tenant_id, tenant_name, direc, is_fw_virt=False): """Function to create Openstack network. This function does the following: 1. Allocate an IP address with the net_id/subnet_id not filled in the DB. 2. Fill network parameters w/o vlan, segmentation_id, because we don't have net_id to store in DB. 3. Create a Openstack network, using the network parameters created in the previous step. At this point we will have a net_id. 4. Allocate segmentation_id, vlan and along with net_id store it in the DB. 5. Update IP DB with net_id created in step 3. So, after restart deallocate any IP DB entries that does not have a net_id/subnet_id. """ subnet = self.alloc_retrieve_subnet_info(tenant_id, direc) network = self.retrieve_network_info(tenant_id, direc) net_id, subnet_id = self.create_openstack_network(subnet, network, tenant_id, tenant_name, direc) if not net_id or not subnet_id: return net_id, subnet_id self.allocate_seg_vlan(net_id, is_fw_virt, direc, tenant_id) self.update_subnet_db_info(tenant_id, direc, net_id, subnet_id) return net_id, subnet_id
['def', '_create_os_nwk', '(', 'self', ',', 'tenant_id', ',', 'tenant_name', ',', 'direc', ',', 'is_fw_virt', '=', 'False', ')', ':', 'subnet', '=', 'self', '.', 'alloc_retrieve_subnet_info', '(', 'tenant_id', ',', 'direc', ')', 'network', '=', 'self', '.', 'retrieve_network_info', '(', 'tenant_id', ',', 'direc', ')', 'net_id', ',', 'subnet_id', '=', 'self', '.', 'create_openstack_network', '(', 'subnet', ',', 'network', ',', 'tenant_id', ',', 'tenant_name', ',', 'direc', ')', 'if', 'not', 'net_id', 'or', 'not', 'subnet_id', ':', 'return', 'net_id', ',', 'subnet_id', 'self', '.', 'allocate_seg_vlan', '(', 'net_id', ',', 'is_fw_virt', ',', 'direc', ',', 'tenant_id', ')', 'self', '.', 'update_subnet_db_info', '(', 'tenant_id', ',', 'direc', ',', 'net_id', ',', 'subnet_id', ')', 'return', 'net_id', ',', 'subnet_id']
Function to create Openstack network. This function does the following: 1. Allocate an IP address with the net_id/subnet_id not filled in the DB. 2. Fill network parameters w/o vlan, segmentation_id, because we don't have net_id to store in DB. 3. Create a Openstack network, using the network parameters created in the previous step. At this point we will have a net_id. 4. Allocate segmentation_id, vlan and along with net_id store it in the DB. 5. Update IP DB with net_id created in step 3. So, after restart deallocate any IP DB entries that does not have a net_id/subnet_id.
['Function', 'to', 'create', 'Openstack', 'network', '.']
train
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py#L1021-L1045
7,020
saltstack/salt
salt/modules/at.py
atq
def atq(tag=None): ''' List all queued and running jobs or only those with an optional 'tag'. CLI Example: .. code-block:: bash salt '*' at.atq salt '*' at.atq [tag] salt '*' at.atq [job number] ''' jobs = [] # Shim to produce output similar to what __virtual__() should do # but __salt__ isn't available in __virtual__() # Tested on CentOS 5.8 if __grains__['os_family'] == 'RedHat': output = _cmd('at', '-l') else: output = _cmd('atq') if output is None: return '\'at.atq\' is not available.' # No jobs so return if output == '': return {'jobs': jobs} # Jobs created with at.at() will use the following # comment to denote a tagged job. job_kw_regex = re.compile(r'^### SALT: (\w+)') # Split each job into a dictionary and handle # pulling out tags or only listing jobs with a certain # tag for line in output.splitlines(): job_tag = '' # Redhat/CentOS if __grains__['os_family'] == 'RedHat': job, spec = line.split('\t') specs = spec.split() elif __grains__['os'] == 'OpenBSD': if line.startswith(' Rank'): continue else: tmp = line.split() timestr = ' '.join(tmp[1:5]) job = tmp[6] specs = datetime.datetime(*(time.strptime(timestr, '%b %d, %Y ' '%H:%M')[0:5])).isoformat().split('T') specs.append(tmp[7]) specs.append(tmp[5]) elif __grains__['os'] == 'FreeBSD': if line.startswith('Date'): continue else: tmp = line.split() timestr = ' '.join(tmp[1:6]) job = tmp[8] specs = datetime.datetime(*(time.strptime(timestr, '%b %d %H:%M:%S %Z %Y')[0:5])).isoformat().split('T') specs.append(tmp[7]) specs.append(tmp[6]) else: job, spec = line.split('\t') tmp = spec.split() timestr = ' '.join(tmp[0:5]) specs = datetime.datetime(*(time.strptime(timestr) [0:5])).isoformat().split('T') specs.append(tmp[5]) specs.append(tmp[6]) # Search for any tags atc_out = _cmd('at', '-c', job) for line in atc_out.splitlines(): tmp = job_kw_regex.match(line) if tmp: job_tag = tmp.groups()[0] if __grains__['os'] in BSD: job = six.text_type(job) else: job = int(job) # If a tag is supplied, only list jobs with that tag if tag: # TODO: Looks like there is a difference between salt and salt-call # If I don't wrap job in an int(), it fails on salt but works on # salt-call. With the int(), it fails with salt-call but not salt. if tag == job_tag or tag == job: jobs.append({'job': job, 'date': specs[0], 'time': specs[1], 'queue': specs[2], 'user': specs[3], 'tag': job_tag}) else: jobs.append({'job': job, 'date': specs[0], 'time': specs[1], 'queue': specs[2], 'user': specs[3], 'tag': job_tag}) return {'jobs': jobs}
python
def atq(tag=None): ''' List all queued and running jobs or only those with an optional 'tag'. CLI Example: .. code-block:: bash salt '*' at.atq salt '*' at.atq [tag] salt '*' at.atq [job number] ''' jobs = [] # Shim to produce output similar to what __virtual__() should do # but __salt__ isn't available in __virtual__() # Tested on CentOS 5.8 if __grains__['os_family'] == 'RedHat': output = _cmd('at', '-l') else: output = _cmd('atq') if output is None: return '\'at.atq\' is not available.' # No jobs so return if output == '': return {'jobs': jobs} # Jobs created with at.at() will use the following # comment to denote a tagged job. job_kw_regex = re.compile(r'^### SALT: (\w+)') # Split each job into a dictionary and handle # pulling out tags or only listing jobs with a certain # tag for line in output.splitlines(): job_tag = '' # Redhat/CentOS if __grains__['os_family'] == 'RedHat': job, spec = line.split('\t') specs = spec.split() elif __grains__['os'] == 'OpenBSD': if line.startswith(' Rank'): continue else: tmp = line.split() timestr = ' '.join(tmp[1:5]) job = tmp[6] specs = datetime.datetime(*(time.strptime(timestr, '%b %d, %Y ' '%H:%M')[0:5])).isoformat().split('T') specs.append(tmp[7]) specs.append(tmp[5]) elif __grains__['os'] == 'FreeBSD': if line.startswith('Date'): continue else: tmp = line.split() timestr = ' '.join(tmp[1:6]) job = tmp[8] specs = datetime.datetime(*(time.strptime(timestr, '%b %d %H:%M:%S %Z %Y')[0:5])).isoformat().split('T') specs.append(tmp[7]) specs.append(tmp[6]) else: job, spec = line.split('\t') tmp = spec.split() timestr = ' '.join(tmp[0:5]) specs = datetime.datetime(*(time.strptime(timestr) [0:5])).isoformat().split('T') specs.append(tmp[5]) specs.append(tmp[6]) # Search for any tags atc_out = _cmd('at', '-c', job) for line in atc_out.splitlines(): tmp = job_kw_regex.match(line) if tmp: job_tag = tmp.groups()[0] if __grains__['os'] in BSD: job = six.text_type(job) else: job = int(job) # If a tag is supplied, only list jobs with that tag if tag: # TODO: Looks like there is a difference between salt and salt-call # If I don't wrap job in an int(), it fails on salt but works on # salt-call. With the int(), it fails with salt-call but not salt. if tag == job_tag or tag == job: jobs.append({'job': job, 'date': specs[0], 'time': specs[1], 'queue': specs[2], 'user': specs[3], 'tag': job_tag}) else: jobs.append({'job': job, 'date': specs[0], 'time': specs[1], 'queue': specs[2], 'user': specs[3], 'tag': job_tag}) return {'jobs': jobs}
['def', 'atq', '(', 'tag', '=', 'None', ')', ':', 'jobs', '=', '[', ']', '# Shim to produce output similar to what __virtual__() should do', "# but __salt__ isn't available in __virtual__()", '# Tested on CentOS 5.8', 'if', '__grains__', '[', "'os_family'", ']', '==', "'RedHat'", ':', 'output', '=', '_cmd', '(', "'at'", ',', "'-l'", ')', 'else', ':', 'output', '=', '_cmd', '(', "'atq'", ')', 'if', 'output', 'is', 'None', ':', 'return', "'\\'at.atq\\' is not available.'", '# No jobs so return', 'if', 'output', '==', "''", ':', 'return', '{', "'jobs'", ':', 'jobs', '}', '# Jobs created with at.at() will use the following', '# comment to denote a tagged job.', 'job_kw_regex', '=', 're', '.', 'compile', '(', "r'^### SALT: (\\w+)'", ')', '# Split each job into a dictionary and handle', '# pulling out tags or only listing jobs with a certain', '# tag', 'for', 'line', 'in', 'output', '.', 'splitlines', '(', ')', ':', 'job_tag', '=', "''", '# Redhat/CentOS', 'if', '__grains__', '[', "'os_family'", ']', '==', "'RedHat'", ':', 'job', ',', 'spec', '=', 'line', '.', 'split', '(', "'\\t'", ')', 'specs', '=', 'spec', '.', 'split', '(', ')', 'elif', '__grains__', '[', "'os'", ']', '==', "'OpenBSD'", ':', 'if', 'line', '.', 'startswith', '(', "' Rank'", ')', ':', 'continue', 'else', ':', 'tmp', '=', 'line', '.', 'split', '(', ')', 'timestr', '=', "' '", '.', 'join', '(', 'tmp', '[', '1', ':', '5', ']', ')', 'job', '=', 'tmp', '[', '6', ']', 'specs', '=', 'datetime', '.', 'datetime', '(', '*', '(', 'time', '.', 'strptime', '(', 'timestr', ',', "'%b %d, %Y '", "'%H:%M'", ')', '[', '0', ':', '5', ']', ')', ')', '.', 'isoformat', '(', ')', '.', 'split', '(', "'T'", ')', 'specs', '.', 'append', '(', 'tmp', '[', '7', ']', ')', 'specs', '.', 'append', '(', 'tmp', '[', '5', ']', ')', 'elif', '__grains__', '[', "'os'", ']', '==', "'FreeBSD'", ':', 'if', 'line', '.', 'startswith', '(', "'Date'", ')', ':', 'continue', 'else', ':', 'tmp', '=', 'line', '.', 'split', '(', ')', 'timestr', '=', "' '", '.', 'join', '(', 'tmp', '[', '1', ':', '6', ']', ')', 'job', '=', 'tmp', '[', '8', ']', 'specs', '=', 'datetime', '.', 'datetime', '(', '*', '(', 'time', '.', 'strptime', '(', 'timestr', ',', "'%b %d %H:%M:%S %Z %Y'", ')', '[', '0', ':', '5', ']', ')', ')', '.', 'isoformat', '(', ')', '.', 'split', '(', "'T'", ')', 'specs', '.', 'append', '(', 'tmp', '[', '7', ']', ')', 'specs', '.', 'append', '(', 'tmp', '[', '6', ']', ')', 'else', ':', 'job', ',', 'spec', '=', 'line', '.', 'split', '(', "'\\t'", ')', 'tmp', '=', 'spec', '.', 'split', '(', ')', 'timestr', '=', "' '", '.', 'join', '(', 'tmp', '[', '0', ':', '5', ']', ')', 'specs', '=', 'datetime', '.', 'datetime', '(', '*', '(', 'time', '.', 'strptime', '(', 'timestr', ')', '[', '0', ':', '5', ']', ')', ')', '.', 'isoformat', '(', ')', '.', 'split', '(', "'T'", ')', 'specs', '.', 'append', '(', 'tmp', '[', '5', ']', ')', 'specs', '.', 'append', '(', 'tmp', '[', '6', ']', ')', '# Search for any tags', 'atc_out', '=', '_cmd', '(', "'at'", ',', "'-c'", ',', 'job', ')', 'for', 'line', 'in', 'atc_out', '.', 'splitlines', '(', ')', ':', 'tmp', '=', 'job_kw_regex', '.', 'match', '(', 'line', ')', 'if', 'tmp', ':', 'job_tag', '=', 'tmp', '.', 'groups', '(', ')', '[', '0', ']', 'if', '__grains__', '[', "'os'", ']', 'in', 'BSD', ':', 'job', '=', 'six', '.', 'text_type', '(', 'job', ')', 'else', ':', 'job', '=', 'int', '(', 'job', ')', '# If a tag is supplied, only list jobs with that tag', 'if', 'tag', ':', '# TODO: Looks like there is a difference between salt and salt-call', "# If I don't wrap job in an int(), it fails on salt but works on", '# salt-call. With the int(), it fails with salt-call but not salt.', 'if', 'tag', '==', 'job_tag', 'or', 'tag', '==', 'job', ':', 'jobs', '.', 'append', '(', '{', "'job'", ':', 'job', ',', "'date'", ':', 'specs', '[', '0', ']', ',', "'time'", ':', 'specs', '[', '1', ']', ',', "'queue'", ':', 'specs', '[', '2', ']', ',', "'user'", ':', 'specs', '[', '3', ']', ',', "'tag'", ':', 'job_tag', '}', ')', 'else', ':', 'jobs', '.', 'append', '(', '{', "'job'", ':', 'job', ',', "'date'", ':', 'specs', '[', '0', ']', ',', "'time'", ':', 'specs', '[', '1', ']', ',', "'queue'", ':', 'specs', '[', '2', ']', ',', "'user'", ':', 'specs', '[', '3', ']', ',', "'tag'", ':', 'job_tag', '}', ')', 'return', '{', "'jobs'", ':', 'jobs', '}']
List all queued and running jobs or only those with an optional 'tag'. CLI Example: .. code-block:: bash salt '*' at.atq salt '*' at.atq [tag] salt '*' at.atq [job number]
['List', 'all', 'queued', 'and', 'running', 'jobs', 'or', 'only', 'those', 'with', 'an', 'optional', 'tag', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/at.py#L63-L163
7,021
gabstopper/smc-python
smc/elements/netlink.py
StaticNetlink.update_or_create
def update_or_create(cls, with_status=False, **kwargs): """ Update or create static netlink. DNS entry differences are not resolved, instead any entries provided will be the final state for this netlink. If the intent is to add/remove DNS entries you can use the :meth:`~domain_server_address` method to add or remove. :raises CreateElementFailed: failed creating element :return: element instance by type or 3-tuple if with_status set """ dns_address = kwargs.pop('domain_server_address', []) element, updated, created = super(StaticNetlink, cls).update_or_create( with_status=True, defer_update=True, **kwargs) if not created: if dns_address: new_entries = RankedDNSAddress([]) new_entries.add(dns_address) element.data.update(domain_server_address=new_entries.entries) updated = True if updated: element.update() if with_status: return element, updated, created return element
python
def update_or_create(cls, with_status=False, **kwargs): """ Update or create static netlink. DNS entry differences are not resolved, instead any entries provided will be the final state for this netlink. If the intent is to add/remove DNS entries you can use the :meth:`~domain_server_address` method to add or remove. :raises CreateElementFailed: failed creating element :return: element instance by type or 3-tuple if with_status set """ dns_address = kwargs.pop('domain_server_address', []) element, updated, created = super(StaticNetlink, cls).update_or_create( with_status=True, defer_update=True, **kwargs) if not created: if dns_address: new_entries = RankedDNSAddress([]) new_entries.add(dns_address) element.data.update(domain_server_address=new_entries.entries) updated = True if updated: element.update() if with_status: return element, updated, created return element
['def', 'update_or_create', '(', 'cls', ',', 'with_status', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', 'dns_address', '=', 'kwargs', '.', 'pop', '(', "'domain_server_address'", ',', '[', ']', ')', 'element', ',', 'updated', ',', 'created', '=', 'super', '(', 'StaticNetlink', ',', 'cls', ')', '.', 'update_or_create', '(', 'with_status', '=', 'True', ',', 'defer_update', '=', 'True', ',', '*', '*', 'kwargs', ')', 'if', 'not', 'created', ':', 'if', 'dns_address', ':', 'new_entries', '=', 'RankedDNSAddress', '(', '[', ']', ')', 'new_entries', '.', 'add', '(', 'dns_address', ')', 'element', '.', 'data', '.', 'update', '(', 'domain_server_address', '=', 'new_entries', '.', 'entries', ')', 'updated', '=', 'True', 'if', 'updated', ':', 'element', '.', 'update', '(', ')', 'if', 'with_status', ':', 'return', 'element', ',', 'updated', ',', 'created', 'return', 'element']
Update or create static netlink. DNS entry differences are not resolved, instead any entries provided will be the final state for this netlink. If the intent is to add/remove DNS entries you can use the :meth:`~domain_server_address` method to add or remove. :raises CreateElementFailed: failed creating element :return: element instance by type or 3-tuple if with_status set
['Update', 'or', 'create', 'static', 'netlink', '.', 'DNS', 'entry', 'differences', 'are', 'not', 'resolved', 'instead', 'any', 'entries', 'provided', 'will', 'be', 'the', 'final', 'state', 'for', 'this', 'netlink', '.', 'If', 'the', 'intent', 'is', 'to', 'add', '/', 'remove', 'DNS', 'entries', 'you', 'can', 'use', 'the', ':', 'meth', ':', '~domain_server_address', 'method', 'to', 'add', 'or', 'remove', '.', ':', 'raises', 'CreateElementFailed', ':', 'failed', 'creating', 'element', ':', 'return', ':', 'element', 'instance', 'by', 'type', 'or', '3', '-', 'tuple', 'if', 'with_status', 'set']
train
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/elements/netlink.py#L152-L176
7,022
quiltdata/quilt
compiler/quilt/tools/store.py
PackageStore.user_path
def user_path(self, team, user): """ Returns the path to directory with the user's package repositories. """ return os.path.join(self.team_path(team), user)
python
def user_path(self, team, user): """ Returns the path to directory with the user's package repositories. """ return os.path.join(self.team_path(team), user)
['def', 'user_path', '(', 'self', ',', 'team', ',', 'user', ')', ':', 'return', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'team_path', '(', 'team', ')', ',', 'user', ')']
Returns the path to directory with the user's package repositories.
['Returns', 'the', 'path', 'to', 'directory', 'with', 'the', 'user', 's', 'package', 'repositories', '.']
train
https://github.com/quiltdata/quilt/blob/651853e7e89a8af86e0ff26167e752efa5878c12/compiler/quilt/tools/store.py#L335-L339
7,023
ryukinix/decorating
decorating/animation.py
AnimatedDecorator.auto_message
def auto_message(self, args): """Try guess the message by the args passed args: a set of args passed on the wrapper __call__ in the definition above. if the object already have some message (defined in __init__), we don't change that. If the first arg is a function, so is decorated without argument, use the func name as the message. If not self.message anyway, use the default_message global, another else use the default self.message already """ if any(args) and callable(args[0]) and not self.message: return args[0].__name__ elif not self.message: return self.default_message else: return self.message
python
def auto_message(self, args): """Try guess the message by the args passed args: a set of args passed on the wrapper __call__ in the definition above. if the object already have some message (defined in __init__), we don't change that. If the first arg is a function, so is decorated without argument, use the func name as the message. If not self.message anyway, use the default_message global, another else use the default self.message already """ if any(args) and callable(args[0]) and not self.message: return args[0].__name__ elif not self.message: return self.default_message else: return self.message
['def', 'auto_message', '(', 'self', ',', 'args', ')', ':', 'if', 'any', '(', 'args', ')', 'and', 'callable', '(', 'args', '[', '0', ']', ')', 'and', 'not', 'self', '.', 'message', ':', 'return', 'args', '[', '0', ']', '.', '__name__', 'elif', 'not', 'self', '.', 'message', ':', 'return', 'self', '.', 'default_message', 'else', ':', 'return', 'self', '.', 'message']
Try guess the message by the args passed args: a set of args passed on the wrapper __call__ in the definition above. if the object already have some message (defined in __init__), we don't change that. If the first arg is a function, so is decorated without argument, use the func name as the message. If not self.message anyway, use the default_message global, another else use the default self.message already
['Try', 'guess', 'the', 'message', 'by', 'the', 'args', 'passed']
train
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/animation.py#L296-L315
7,024
gabstopper/smc-python
smc/core/engine_vss.py
VSSContainer.add_context
def add_context(self, isc_name, isc_policy_id, isc_traffic_tag): """ Create the VSS Context within the VSSContainer :param str isc_name: ISC name, possibly append policy name?? :param str isc_policy_id: Policy ID in SMC (the 'key' attribute) :param str isc_traffic_tag: NSX groupId (serviceprofile-145) :raises CreateElementFailed: failed to create :return: VSSContext """ if 'add_context' in self.data.links: # SMC >=6.5 element = ElementCreator( VSSContext, href=self.get_relation('add_context'), json = { 'name': isc_name, 'vc_isc': { 'isc_name': isc_name, 'isc_policy_id': isc_policy_id, 'isc_traffic_tag': isc_traffic_tag } }) else: # SMC < 6.5 element = VSSContext.create( isc_name=isc_name, isc_policy_id=isc_policy_id, isc_traffic_tag=isc_traffic_tag, vss_container=self) # Delete cache since the virtualResources node is attached to # the engine json self._del_cache() return element
python
def add_context(self, isc_name, isc_policy_id, isc_traffic_tag): """ Create the VSS Context within the VSSContainer :param str isc_name: ISC name, possibly append policy name?? :param str isc_policy_id: Policy ID in SMC (the 'key' attribute) :param str isc_traffic_tag: NSX groupId (serviceprofile-145) :raises CreateElementFailed: failed to create :return: VSSContext """ if 'add_context' in self.data.links: # SMC >=6.5 element = ElementCreator( VSSContext, href=self.get_relation('add_context'), json = { 'name': isc_name, 'vc_isc': { 'isc_name': isc_name, 'isc_policy_id': isc_policy_id, 'isc_traffic_tag': isc_traffic_tag } }) else: # SMC < 6.5 element = VSSContext.create( isc_name=isc_name, isc_policy_id=isc_policy_id, isc_traffic_tag=isc_traffic_tag, vss_container=self) # Delete cache since the virtualResources node is attached to # the engine json self._del_cache() return element
['def', 'add_context', '(', 'self', ',', 'isc_name', ',', 'isc_policy_id', ',', 'isc_traffic_tag', ')', ':', 'if', "'add_context'", 'in', 'self', '.', 'data', '.', 'links', ':', '# SMC >=6.5', 'element', '=', 'ElementCreator', '(', 'VSSContext', ',', 'href', '=', 'self', '.', 'get_relation', '(', "'add_context'", ')', ',', 'json', '=', '{', "'name'", ':', 'isc_name', ',', "'vc_isc'", ':', '{', "'isc_name'", ':', 'isc_name', ',', "'isc_policy_id'", ':', 'isc_policy_id', ',', "'isc_traffic_tag'", ':', 'isc_traffic_tag', '}', '}', ')', 'else', ':', '# SMC < 6.5', 'element', '=', 'VSSContext', '.', 'create', '(', 'isc_name', '=', 'isc_name', ',', 'isc_policy_id', '=', 'isc_policy_id', ',', 'isc_traffic_tag', '=', 'isc_traffic_tag', ',', 'vss_container', '=', 'self', ')', '# Delete cache since the virtualResources node is attached to', '# the engine json', 'self', '.', '_del_cache', '(', ')', 'return', 'element']
Create the VSS Context within the VSSContainer :param str isc_name: ISC name, possibly append policy name?? :param str isc_policy_id: Policy ID in SMC (the 'key' attribute) :param str isc_traffic_tag: NSX groupId (serviceprofile-145) :raises CreateElementFailed: failed to create :return: VSSContext
['Create', 'the', 'VSS', 'Context', 'within', 'the', 'VSSContainer']
train
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/core/engine_vss.py#L135-L167
7,025
gawel/irc3
irc3/__init__.py
IrcBot.dcc_accept
def dcc_accept(self, mask, filepath, port, pos): """accept a DCC RESUME for an axisting DCC SEND. filepath is the filename to sent. port is the port opened on the server. pos is the expected offset""" return self.dcc.resume(mask, filepath, port, pos)
python
def dcc_accept(self, mask, filepath, port, pos): """accept a DCC RESUME for an axisting DCC SEND. filepath is the filename to sent. port is the port opened on the server. pos is the expected offset""" return self.dcc.resume(mask, filepath, port, pos)
['def', 'dcc_accept', '(', 'self', ',', 'mask', ',', 'filepath', ',', 'port', ',', 'pos', ')', ':', 'return', 'self', '.', 'dcc', '.', 'resume', '(', 'mask', ',', 'filepath', ',', 'port', ',', 'pos', ')']
accept a DCC RESUME for an axisting DCC SEND. filepath is the filename to sent. port is the port opened on the server. pos is the expected offset
['accept', 'a', 'DCC', 'RESUME', 'for', 'an', 'axisting', 'DCC', 'SEND', '.', 'filepath', 'is', 'the', 'filename', 'to', 'sent', '.', 'port', 'is', 'the', 'port', 'opened', 'on', 'the', 'server', '.', 'pos', 'is', 'the', 'expected', 'offset']
train
https://github.com/gawel/irc3/blob/cd27840a5809a1f803dc620860fe75d83d2a2ec8/irc3/__init__.py#L387-L391
7,026
materialsproject/pymatgen
pymatgen/analysis/defects/corrections.py
FreysoldtCorrection.get_correction
def get_correction(self, entry): """ Gets the Freysoldt correction for a defect entry Args: entry (DefectEntry): defect entry to compute Freysoldt correction on. Requires following parameters in the DefectEntry to exist: axis_grid (3 x NGX where NGX is the length of the NGX grid in the x,y and z axis directions. Same length as planar average lists): A list of 3 numpy arrays which contain the cartesian axis values (in angstroms) that correspond to each planar avg potential supplied. bulk_planar_averages (3 x NGX where NGX is the length of the NGX grid in the x,y and z axis directions.): A list of 3 numpy arrays which contain the planar averaged electrostatic potential for the bulk supercell. defect_planar_averages (3 x NGX where NGX is the length of the NGX grid in the x,y and z axis directions.): A list of 3 numpy arrays which contain the planar averaged electrostatic potential for the defective supercell. scaling_matrix (3 x 1 matrix): scaling matrix required to convert the entry.defect.bulk_structure object into the lattice which is used by the bulk_planar_average and defect_planar_average """ if not self.axis: list_axis_grid = np.array(entry.parameters["axis_grid"]) list_bulk_plnr_avg_esp = np.array(entry.parameters["bulk_planar_averages"]) list_defect_plnr_avg_esp = np.array(entry.parameters["defect_planar_averages"]) list_axes = range(len(list_axis_grid)) else: list_axes = np.array(self.axis) list_axis_grid, list_bulk_plnr_avg_esp, list_defect_plnr_avg_esp = [], [], [] for ax in list_axes: list_axis_grid.append(np.array(entry.parameters["axis_grid"][ax])) list_bulk_plnr_avg_esp.append(np.array(entry.parameters["bulk_planar_averages"][ax])) list_defect_plnr_avg_esp.append(np.array(entry.parameters["defect_planar_averages"][ax])) bulk_struct = entry.defect.bulk_structure.copy() if "scaling_matrix" in entry.parameters.keys(): bulk_struct.make_supercell(entry.parameters["scaling_matrix"]) lattice = bulk_struct.lattice q = entry.defect.charge es_corr = self.perform_es_corr(lattice, entry.charge) pot_corr_tracker = [] for x, pureavg, defavg, axis in zip(list_axis_grid, list_bulk_plnr_avg_esp, list_defect_plnr_avg_esp, list_axes): tmp_pot_corr = self.perform_pot_corr( x, pureavg, defavg, lattice, entry.charge, entry.site.coords, axis, widthsample=1.0) pot_corr_tracker.append(tmp_pot_corr) pot_corr = np.mean(pot_corr_tracker) entry.parameters["freysoldt_meta"] = dict(self.metadata) entry.parameters["potalign"] = pot_corr / (-q) if q else 0. return {"freysoldt_electrostatic": es_corr, "freysoldt_potential_alignment": pot_corr}
python
def get_correction(self, entry): """ Gets the Freysoldt correction for a defect entry Args: entry (DefectEntry): defect entry to compute Freysoldt correction on. Requires following parameters in the DefectEntry to exist: axis_grid (3 x NGX where NGX is the length of the NGX grid in the x,y and z axis directions. Same length as planar average lists): A list of 3 numpy arrays which contain the cartesian axis values (in angstroms) that correspond to each planar avg potential supplied. bulk_planar_averages (3 x NGX where NGX is the length of the NGX grid in the x,y and z axis directions.): A list of 3 numpy arrays which contain the planar averaged electrostatic potential for the bulk supercell. defect_planar_averages (3 x NGX where NGX is the length of the NGX grid in the x,y and z axis directions.): A list of 3 numpy arrays which contain the planar averaged electrostatic potential for the defective supercell. scaling_matrix (3 x 1 matrix): scaling matrix required to convert the entry.defect.bulk_structure object into the lattice which is used by the bulk_planar_average and defect_planar_average """ if not self.axis: list_axis_grid = np.array(entry.parameters["axis_grid"]) list_bulk_plnr_avg_esp = np.array(entry.parameters["bulk_planar_averages"]) list_defect_plnr_avg_esp = np.array(entry.parameters["defect_planar_averages"]) list_axes = range(len(list_axis_grid)) else: list_axes = np.array(self.axis) list_axis_grid, list_bulk_plnr_avg_esp, list_defect_plnr_avg_esp = [], [], [] for ax in list_axes: list_axis_grid.append(np.array(entry.parameters["axis_grid"][ax])) list_bulk_plnr_avg_esp.append(np.array(entry.parameters["bulk_planar_averages"][ax])) list_defect_plnr_avg_esp.append(np.array(entry.parameters["defect_planar_averages"][ax])) bulk_struct = entry.defect.bulk_structure.copy() if "scaling_matrix" in entry.parameters.keys(): bulk_struct.make_supercell(entry.parameters["scaling_matrix"]) lattice = bulk_struct.lattice q = entry.defect.charge es_corr = self.perform_es_corr(lattice, entry.charge) pot_corr_tracker = [] for x, pureavg, defavg, axis in zip(list_axis_grid, list_bulk_plnr_avg_esp, list_defect_plnr_avg_esp, list_axes): tmp_pot_corr = self.perform_pot_corr( x, pureavg, defavg, lattice, entry.charge, entry.site.coords, axis, widthsample=1.0) pot_corr_tracker.append(tmp_pot_corr) pot_corr = np.mean(pot_corr_tracker) entry.parameters["freysoldt_meta"] = dict(self.metadata) entry.parameters["potalign"] = pot_corr / (-q) if q else 0. return {"freysoldt_electrostatic": es_corr, "freysoldt_potential_alignment": pot_corr}
['def', 'get_correction', '(', 'self', ',', 'entry', ')', ':', 'if', 'not', 'self', '.', 'axis', ':', 'list_axis_grid', '=', 'np', '.', 'array', '(', 'entry', '.', 'parameters', '[', '"axis_grid"', ']', ')', 'list_bulk_plnr_avg_esp', '=', 'np', '.', 'array', '(', 'entry', '.', 'parameters', '[', '"bulk_planar_averages"', ']', ')', 'list_defect_plnr_avg_esp', '=', 'np', '.', 'array', '(', 'entry', '.', 'parameters', '[', '"defect_planar_averages"', ']', ')', 'list_axes', '=', 'range', '(', 'len', '(', 'list_axis_grid', ')', ')', 'else', ':', 'list_axes', '=', 'np', '.', 'array', '(', 'self', '.', 'axis', ')', 'list_axis_grid', ',', 'list_bulk_plnr_avg_esp', ',', 'list_defect_plnr_avg_esp', '=', '[', ']', ',', '[', ']', ',', '[', ']', 'for', 'ax', 'in', 'list_axes', ':', 'list_axis_grid', '.', 'append', '(', 'np', '.', 'array', '(', 'entry', '.', 'parameters', '[', '"axis_grid"', ']', '[', 'ax', ']', ')', ')', 'list_bulk_plnr_avg_esp', '.', 'append', '(', 'np', '.', 'array', '(', 'entry', '.', 'parameters', '[', '"bulk_planar_averages"', ']', '[', 'ax', ']', ')', ')', 'list_defect_plnr_avg_esp', '.', 'append', '(', 'np', '.', 'array', '(', 'entry', '.', 'parameters', '[', '"defect_planar_averages"', ']', '[', 'ax', ']', ')', ')', 'bulk_struct', '=', 'entry', '.', 'defect', '.', 'bulk_structure', '.', 'copy', '(', ')', 'if', '"scaling_matrix"', 'in', 'entry', '.', 'parameters', '.', 'keys', '(', ')', ':', 'bulk_struct', '.', 'make_supercell', '(', 'entry', '.', 'parameters', '[', '"scaling_matrix"', ']', ')', 'lattice', '=', 'bulk_struct', '.', 'lattice', 'q', '=', 'entry', '.', 'defect', '.', 'charge', 'es_corr', '=', 'self', '.', 'perform_es_corr', '(', 'lattice', ',', 'entry', '.', 'charge', ')', 'pot_corr_tracker', '=', '[', ']', 'for', 'x', ',', 'pureavg', ',', 'defavg', ',', 'axis', 'in', 'zip', '(', 'list_axis_grid', ',', 'list_bulk_plnr_avg_esp', ',', 'list_defect_plnr_avg_esp', ',', 'list_axes', ')', ':', 'tmp_pot_corr', '=', 'self', '.', 'perform_pot_corr', '(', 'x', ',', 'pureavg', ',', 'defavg', ',', 'lattice', ',', 'entry', '.', 'charge', ',', 'entry', '.', 'site', '.', 'coords', ',', 'axis', ',', 'widthsample', '=', '1.0', ')', 'pot_corr_tracker', '.', 'append', '(', 'tmp_pot_corr', ')', 'pot_corr', '=', 'np', '.', 'mean', '(', 'pot_corr_tracker', ')', 'entry', '.', 'parameters', '[', '"freysoldt_meta"', ']', '=', 'dict', '(', 'self', '.', 'metadata', ')', 'entry', '.', 'parameters', '[', '"potalign"', ']', '=', 'pot_corr', '/', '(', '-', 'q', ')', 'if', 'q', 'else', '0.', 'return', '{', '"freysoldt_electrostatic"', ':', 'es_corr', ',', '"freysoldt_potential_alignment"', ':', 'pot_corr', '}']
Gets the Freysoldt correction for a defect entry Args: entry (DefectEntry): defect entry to compute Freysoldt correction on. Requires following parameters in the DefectEntry to exist: axis_grid (3 x NGX where NGX is the length of the NGX grid in the x,y and z axis directions. Same length as planar average lists): A list of 3 numpy arrays which contain the cartesian axis values (in angstroms) that correspond to each planar avg potential supplied. bulk_planar_averages (3 x NGX where NGX is the length of the NGX grid in the x,y and z axis directions.): A list of 3 numpy arrays which contain the planar averaged electrostatic potential for the bulk supercell. defect_planar_averages (3 x NGX where NGX is the length of the NGX grid in the x,y and z axis directions.): A list of 3 numpy arrays which contain the planar averaged electrostatic potential for the defective supercell. scaling_matrix (3 x 1 matrix): scaling matrix required to convert the entry.defect.bulk_structure object into the lattice which is used by the bulk_planar_average and defect_planar_average
['Gets', 'the', 'Freysoldt', 'correction', 'for', 'a', 'defect', 'entry', 'Args', ':', 'entry', '(', 'DefectEntry', ')', ':', 'defect', 'entry', 'to', 'compute', 'Freysoldt', 'correction', 'on', '.', 'Requires', 'following', 'parameters', 'in', 'the', 'DefectEntry', 'to', 'exist', ':']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/defects/corrections.py#L56-L121
7,027
wummel/linkchecker
linkcheck/logger/html.py
HtmlLogger.write_outro
def write_outro (self): """Write end of check message.""" self.writeln(u"<br/>") self.write(_("That's it.")+" ") if self.stats.number >= 0: self.write(_n("%d link checked.", "%d links checked.", self.stats.number) % self.stats.number) self.write(u" ") self.write(_n("%d warning found", "%d warnings found", self.stats.warnings_printed) % self.stats.warnings_printed) if self.stats.warnings != self.stats.warnings_printed: self.write(_(" (%d ignored or duplicates not printed)") % (self.stats.warnings - self.stats.warnings_printed)) self.write(u". ") self.write(_n("%d error found", "%d errors found", self.stats.errors_printed) % self.stats.errors_printed) if self.stats.errors != self.stats.errors_printed: self.write(_(" (%d duplicates not printed)") % (self.stats.errors - self.stats.errors_printed)) self.writeln(u".") self.writeln(u"<br/>") num = self.stats.internal_errors if num: self.write(_n("There was %(num)d internal error.", "There were %(num)d internal errors.", num) % {"num": num}) self.writeln(u"<br/>") self.stoptime = time.time() duration = self.stoptime - self.starttime self.writeln(_("Stopped checking at %(time)s (%(duration)s)") % {"time": strformat.strtime(self.stoptime), "duration": strformat.strduration_long(duration)}) self.writeln(u'</blockquote><br/><hr><small>'+ configuration.HtmlAppInfo+u"<br/>") self.writeln(_("Get the newest version at %s") % (u'<a href="'+configuration.Url+u'" target="_top">'+ configuration.Url+u"</a>.<br/>")) self.writeln(_("Write comments and bugs to %s") % (u'<a href="'+configuration.SupportUrl+u'">'+ configuration.SupportUrl+u"</a>.<br/>")) self.writeln(_("Support this project at %s") % (u'<a href="'+configuration.DonateUrl+u'">'+ configuration.DonateUrl+u"</a>.")) self.writeln(u"</small></body></html>")
python
def write_outro (self): """Write end of check message.""" self.writeln(u"<br/>") self.write(_("That's it.")+" ") if self.stats.number >= 0: self.write(_n("%d link checked.", "%d links checked.", self.stats.number) % self.stats.number) self.write(u" ") self.write(_n("%d warning found", "%d warnings found", self.stats.warnings_printed) % self.stats.warnings_printed) if self.stats.warnings != self.stats.warnings_printed: self.write(_(" (%d ignored or duplicates not printed)") % (self.stats.warnings - self.stats.warnings_printed)) self.write(u". ") self.write(_n("%d error found", "%d errors found", self.stats.errors_printed) % self.stats.errors_printed) if self.stats.errors != self.stats.errors_printed: self.write(_(" (%d duplicates not printed)") % (self.stats.errors - self.stats.errors_printed)) self.writeln(u".") self.writeln(u"<br/>") num = self.stats.internal_errors if num: self.write(_n("There was %(num)d internal error.", "There were %(num)d internal errors.", num) % {"num": num}) self.writeln(u"<br/>") self.stoptime = time.time() duration = self.stoptime - self.starttime self.writeln(_("Stopped checking at %(time)s (%(duration)s)") % {"time": strformat.strtime(self.stoptime), "duration": strformat.strduration_long(duration)}) self.writeln(u'</blockquote><br/><hr><small>'+ configuration.HtmlAppInfo+u"<br/>") self.writeln(_("Get the newest version at %s") % (u'<a href="'+configuration.Url+u'" target="_top">'+ configuration.Url+u"</a>.<br/>")) self.writeln(_("Write comments and bugs to %s") % (u'<a href="'+configuration.SupportUrl+u'">'+ configuration.SupportUrl+u"</a>.<br/>")) self.writeln(_("Support this project at %s") % (u'<a href="'+configuration.DonateUrl+u'">'+ configuration.DonateUrl+u"</a>.")) self.writeln(u"</small></body></html>")
['def', 'write_outro', '(', 'self', ')', ':', 'self', '.', 'writeln', '(', 'u"<br/>"', ')', 'self', '.', 'write', '(', '_', '(', '"That\'s it."', ')', '+', '" "', ')', 'if', 'self', '.', 'stats', '.', 'number', '>=', '0', ':', 'self', '.', 'write', '(', '_n', '(', '"%d link checked."', ',', '"%d links checked."', ',', 'self', '.', 'stats', '.', 'number', ')', '%', 'self', '.', 'stats', '.', 'number', ')', 'self', '.', 'write', '(', 'u" "', ')', 'self', '.', 'write', '(', '_n', '(', '"%d warning found"', ',', '"%d warnings found"', ',', 'self', '.', 'stats', '.', 'warnings_printed', ')', '%', 'self', '.', 'stats', '.', 'warnings_printed', ')', 'if', 'self', '.', 'stats', '.', 'warnings', '!=', 'self', '.', 'stats', '.', 'warnings_printed', ':', 'self', '.', 'write', '(', '_', '(', '" (%d ignored or duplicates not printed)"', ')', '%', '(', 'self', '.', 'stats', '.', 'warnings', '-', 'self', '.', 'stats', '.', 'warnings_printed', ')', ')', 'self', '.', 'write', '(', 'u". "', ')', 'self', '.', 'write', '(', '_n', '(', '"%d error found"', ',', '"%d errors found"', ',', 'self', '.', 'stats', '.', 'errors_printed', ')', '%', 'self', '.', 'stats', '.', 'errors_printed', ')', 'if', 'self', '.', 'stats', '.', 'errors', '!=', 'self', '.', 'stats', '.', 'errors_printed', ':', 'self', '.', 'write', '(', '_', '(', '" (%d duplicates not printed)"', ')', '%', '(', 'self', '.', 'stats', '.', 'errors', '-', 'self', '.', 'stats', '.', 'errors_printed', ')', ')', 'self', '.', 'writeln', '(', 'u"."', ')', 'self', '.', 'writeln', '(', 'u"<br/>"', ')', 'num', '=', 'self', '.', 'stats', '.', 'internal_errors', 'if', 'num', ':', 'self', '.', 'write', '(', '_n', '(', '"There was %(num)d internal error."', ',', '"There were %(num)d internal errors."', ',', 'num', ')', '%', '{', '"num"', ':', 'num', '}', ')', 'self', '.', 'writeln', '(', 'u"<br/>"', ')', 'self', '.', 'stoptime', '=', 'time', '.', 'time', '(', ')', 'duration', '=', 'self', '.', 'stoptime', '-', 'self', '.', 'starttime', 'self', '.', 'writeln', '(', '_', '(', '"Stopped checking at %(time)s (%(duration)s)"', ')', '%', '{', '"time"', ':', 'strformat', '.', 'strtime', '(', 'self', '.', 'stoptime', ')', ',', '"duration"', ':', 'strformat', '.', 'strduration_long', '(', 'duration', ')', '}', ')', 'self', '.', 'writeln', '(', "u'</blockquote><br/><hr><small>'", '+', 'configuration', '.', 'HtmlAppInfo', '+', 'u"<br/>"', ')', 'self', '.', 'writeln', '(', '_', '(', '"Get the newest version at %s"', ')', '%', '(', 'u\'<a href="\'', '+', 'configuration', '.', 'Url', '+', 'u\'" target="_top">\'', '+', 'configuration', '.', 'Url', '+', 'u"</a>.<br/>"', ')', ')', 'self', '.', 'writeln', '(', '_', '(', '"Write comments and bugs to %s"', ')', '%', '(', 'u\'<a href="\'', '+', 'configuration', '.', 'SupportUrl', '+', 'u\'">\'', '+', 'configuration', '.', 'SupportUrl', '+', 'u"</a>.<br/>"', ')', ')', 'self', '.', 'writeln', '(', '_', '(', '"Support this project at %s"', ')', '%', '(', 'u\'<a href="\'', '+', 'configuration', '.', 'DonateUrl', '+', 'u\'">\'', '+', 'configuration', '.', 'DonateUrl', '+', 'u"</a>."', ')', ')', 'self', '.', 'writeln', '(', 'u"</small></body></html>"', ')']
Write end of check message.
['Write', 'end', 'of', 'check', 'message', '.']
train
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/logger/html.py#L287-L329
7,028
google/grr
grr/server/grr_response_server/databases/mysql_paths.py
MySQLDBPathMixin.ReadPathInfos
def ReadPathInfos(self, client_id, path_type, components_list, cursor=None): """Retrieves path info records for given paths.""" if not components_list: return {} path_ids = list(map(rdf_objects.PathID.FromComponents, components_list)) path_infos = {components: None for components in components_list} query = """ SELECT path, directory, UNIX_TIMESTAMP(client_paths.timestamp), stat_entry, UNIX_TIMESTAMP(last_stat_entry_timestamp), hash_entry, UNIX_TIMESTAMP(last_hash_entry_timestamp) FROM client_paths LEFT JOIN client_path_stat_entries ON (client_paths.client_id = client_path_stat_entries.client_id AND client_paths.path_type = client_path_stat_entries.path_type AND client_paths.path_id = client_path_stat_entries.path_id AND client_paths.last_stat_entry_timestamp = client_path_stat_entries.timestamp) LEFT JOIN client_path_hash_entries ON (client_paths.client_id = client_path_hash_entries.client_id AND client_paths.path_type = client_path_hash_entries.path_type AND client_paths.path_id = client_path_hash_entries.path_id AND client_paths.last_hash_entry_timestamp = client_path_hash_entries.timestamp) WHERE client_paths.client_id = %(client_id)s AND client_paths.path_type = %(path_type)s AND client_paths.path_id IN %(path_ids)s """ values = { "client_id": db_utils.ClientIDToInt(client_id), "path_type": int(path_type), "path_ids": [path_id.AsBytes() for path_id in path_ids] } cursor.execute(query, values) for row in cursor.fetchall(): # pyformat: disable (path, directory, timestamp, stat_entry_bytes, last_stat_entry_timestamp, hash_entry_bytes, last_hash_entry_timestamp) = row # pyformat: enable components = mysql_utils.PathToComponents(path) if stat_entry_bytes is not None: stat_entry = rdf_client_fs.StatEntry.FromSerializedString( stat_entry_bytes) else: stat_entry = None if hash_entry_bytes is not None: hash_entry = rdf_crypto.Hash.FromSerializedString(hash_entry_bytes) else: hash_entry = None datetime = mysql_utils.TimestampToRDFDatetime path_info = rdf_objects.PathInfo( path_type=path_type, components=components, timestamp=datetime(timestamp), last_stat_entry_timestamp=datetime(last_stat_entry_timestamp), last_hash_entry_timestamp=datetime(last_hash_entry_timestamp), directory=directory, stat_entry=stat_entry, hash_entry=hash_entry) path_infos[components] = path_info return path_infos
python
def ReadPathInfos(self, client_id, path_type, components_list, cursor=None): """Retrieves path info records for given paths.""" if not components_list: return {} path_ids = list(map(rdf_objects.PathID.FromComponents, components_list)) path_infos = {components: None for components in components_list} query = """ SELECT path, directory, UNIX_TIMESTAMP(client_paths.timestamp), stat_entry, UNIX_TIMESTAMP(last_stat_entry_timestamp), hash_entry, UNIX_TIMESTAMP(last_hash_entry_timestamp) FROM client_paths LEFT JOIN client_path_stat_entries ON (client_paths.client_id = client_path_stat_entries.client_id AND client_paths.path_type = client_path_stat_entries.path_type AND client_paths.path_id = client_path_stat_entries.path_id AND client_paths.last_stat_entry_timestamp = client_path_stat_entries.timestamp) LEFT JOIN client_path_hash_entries ON (client_paths.client_id = client_path_hash_entries.client_id AND client_paths.path_type = client_path_hash_entries.path_type AND client_paths.path_id = client_path_hash_entries.path_id AND client_paths.last_hash_entry_timestamp = client_path_hash_entries.timestamp) WHERE client_paths.client_id = %(client_id)s AND client_paths.path_type = %(path_type)s AND client_paths.path_id IN %(path_ids)s """ values = { "client_id": db_utils.ClientIDToInt(client_id), "path_type": int(path_type), "path_ids": [path_id.AsBytes() for path_id in path_ids] } cursor.execute(query, values) for row in cursor.fetchall(): # pyformat: disable (path, directory, timestamp, stat_entry_bytes, last_stat_entry_timestamp, hash_entry_bytes, last_hash_entry_timestamp) = row # pyformat: enable components = mysql_utils.PathToComponents(path) if stat_entry_bytes is not None: stat_entry = rdf_client_fs.StatEntry.FromSerializedString( stat_entry_bytes) else: stat_entry = None if hash_entry_bytes is not None: hash_entry = rdf_crypto.Hash.FromSerializedString(hash_entry_bytes) else: hash_entry = None datetime = mysql_utils.TimestampToRDFDatetime path_info = rdf_objects.PathInfo( path_type=path_type, components=components, timestamp=datetime(timestamp), last_stat_entry_timestamp=datetime(last_stat_entry_timestamp), last_hash_entry_timestamp=datetime(last_hash_entry_timestamp), directory=directory, stat_entry=stat_entry, hash_entry=hash_entry) path_infos[components] = path_info return path_infos
['def', 'ReadPathInfos', '(', 'self', ',', 'client_id', ',', 'path_type', ',', 'components_list', ',', 'cursor', '=', 'None', ')', ':', 'if', 'not', 'components_list', ':', 'return', '{', '}', 'path_ids', '=', 'list', '(', 'map', '(', 'rdf_objects', '.', 'PathID', '.', 'FromComponents', ',', 'components_list', ')', ')', 'path_infos', '=', '{', 'components', ':', 'None', 'for', 'components', 'in', 'components_list', '}', 'query', '=', '"""\n SELECT path, directory, UNIX_TIMESTAMP(client_paths.timestamp),\n stat_entry, UNIX_TIMESTAMP(last_stat_entry_timestamp),\n hash_entry, UNIX_TIMESTAMP(last_hash_entry_timestamp)\n FROM client_paths\n LEFT JOIN client_path_stat_entries ON\n (client_paths.client_id = client_path_stat_entries.client_id AND\n client_paths.path_type = client_path_stat_entries.path_type AND\n client_paths.path_id = client_path_stat_entries.path_id AND\n client_paths.last_stat_entry_timestamp = client_path_stat_entries.timestamp)\n LEFT JOIN client_path_hash_entries ON\n (client_paths.client_id = client_path_hash_entries.client_id AND\n client_paths.path_type = client_path_hash_entries.path_type AND\n client_paths.path_id = client_path_hash_entries.path_id AND\n client_paths.last_hash_entry_timestamp = client_path_hash_entries.timestamp)\n WHERE client_paths.client_id = %(client_id)s\n AND client_paths.path_type = %(path_type)s\n AND client_paths.path_id IN %(path_ids)s\n """', 'values', '=', '{', '"client_id"', ':', 'db_utils', '.', 'ClientIDToInt', '(', 'client_id', ')', ',', '"path_type"', ':', 'int', '(', 'path_type', ')', ',', '"path_ids"', ':', '[', 'path_id', '.', 'AsBytes', '(', ')', 'for', 'path_id', 'in', 'path_ids', ']', '}', 'cursor', '.', 'execute', '(', 'query', ',', 'values', ')', 'for', 'row', 'in', 'cursor', '.', 'fetchall', '(', ')', ':', '# pyformat: disable', '(', 'path', ',', 'directory', ',', 'timestamp', ',', 'stat_entry_bytes', ',', 'last_stat_entry_timestamp', ',', 'hash_entry_bytes', ',', 'last_hash_entry_timestamp', ')', '=', 'row', '# pyformat: enable', 'components', '=', 'mysql_utils', '.', 'PathToComponents', '(', 'path', ')', 'if', 'stat_entry_bytes', 'is', 'not', 'None', ':', 'stat_entry', '=', 'rdf_client_fs', '.', 'StatEntry', '.', 'FromSerializedString', '(', 'stat_entry_bytes', ')', 'else', ':', 'stat_entry', '=', 'None', 'if', 'hash_entry_bytes', 'is', 'not', 'None', ':', 'hash_entry', '=', 'rdf_crypto', '.', 'Hash', '.', 'FromSerializedString', '(', 'hash_entry_bytes', ')', 'else', ':', 'hash_entry', '=', 'None', 'datetime', '=', 'mysql_utils', '.', 'TimestampToRDFDatetime', 'path_info', '=', 'rdf_objects', '.', 'PathInfo', '(', 'path_type', '=', 'path_type', ',', 'components', '=', 'components', ',', 'timestamp', '=', 'datetime', '(', 'timestamp', ')', ',', 'last_stat_entry_timestamp', '=', 'datetime', '(', 'last_stat_entry_timestamp', ')', ',', 'last_hash_entry_timestamp', '=', 'datetime', '(', 'last_hash_entry_timestamp', ')', ',', 'directory', '=', 'directory', ',', 'stat_entry', '=', 'stat_entry', ',', 'hash_entry', '=', 'hash_entry', ')', 'path_infos', '[', 'components', ']', '=', 'path_info', 'return', 'path_infos']
Retrieves path info records for given paths.
['Retrieves', 'path', 'info', 'records', 'for', 'given', 'paths', '.']
train
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_paths.py#L116-L184
7,029
polyaxon/polyaxon
polyaxon/scheduler/spawners/templates/jobs/manager.py
ResourceManager.get_init_container
def get_init_container(self, init_command, init_args, env_vars, context_mounts, persistence_outputs, persistence_data): """Pod init container for setting outputs path.""" env_vars = to_list(env_vars, check_none=True) outputs_path = stores.get_job_outputs_path( persistence=persistence_outputs, job_name=self.job_name) _, outputs_volume_mount = get_pod_outputs_volume(persistence_outputs=persistence_outputs) volume_mounts = outputs_volume_mount + to_list(context_mounts, check_none=True) init_command = init_command or ["/bin/sh", "-c"] init_args = init_args or to_list( get_output_args(command=InitCommands.CREATE, outputs_path=outputs_path)) init_args += to_list(get_auth_context_args(entity='job', entity_name=self.job_name)) return client.V1Container( name=self.init_container_name, image=self.init_docker_image, image_pull_policy=self.init_docker_image_pull_policy, command=init_command, args=[''.join(init_args)], env=env_vars, volume_mounts=volume_mounts)
python
def get_init_container(self, init_command, init_args, env_vars, context_mounts, persistence_outputs, persistence_data): """Pod init container for setting outputs path.""" env_vars = to_list(env_vars, check_none=True) outputs_path = stores.get_job_outputs_path( persistence=persistence_outputs, job_name=self.job_name) _, outputs_volume_mount = get_pod_outputs_volume(persistence_outputs=persistence_outputs) volume_mounts = outputs_volume_mount + to_list(context_mounts, check_none=True) init_command = init_command or ["/bin/sh", "-c"] init_args = init_args or to_list( get_output_args(command=InitCommands.CREATE, outputs_path=outputs_path)) init_args += to_list(get_auth_context_args(entity='job', entity_name=self.job_name)) return client.V1Container( name=self.init_container_name, image=self.init_docker_image, image_pull_policy=self.init_docker_image_pull_policy, command=init_command, args=[''.join(init_args)], env=env_vars, volume_mounts=volume_mounts)
['def', 'get_init_container', '(', 'self', ',', 'init_command', ',', 'init_args', ',', 'env_vars', ',', 'context_mounts', ',', 'persistence_outputs', ',', 'persistence_data', ')', ':', 'env_vars', '=', 'to_list', '(', 'env_vars', ',', 'check_none', '=', 'True', ')', 'outputs_path', '=', 'stores', '.', 'get_job_outputs_path', '(', 'persistence', '=', 'persistence_outputs', ',', 'job_name', '=', 'self', '.', 'job_name', ')', '_', ',', 'outputs_volume_mount', '=', 'get_pod_outputs_volume', '(', 'persistence_outputs', '=', 'persistence_outputs', ')', 'volume_mounts', '=', 'outputs_volume_mount', '+', 'to_list', '(', 'context_mounts', ',', 'check_none', '=', 'True', ')', 'init_command', '=', 'init_command', 'or', '[', '"/bin/sh"', ',', '"-c"', ']', 'init_args', '=', 'init_args', 'or', 'to_list', '(', 'get_output_args', '(', 'command', '=', 'InitCommands', '.', 'CREATE', ',', 'outputs_path', '=', 'outputs_path', ')', ')', 'init_args', '+=', 'to_list', '(', 'get_auth_context_args', '(', 'entity', '=', "'job'", ',', 'entity_name', '=', 'self', '.', 'job_name', ')', ')', 'return', 'client', '.', 'V1Container', '(', 'name', '=', 'self', '.', 'init_container_name', ',', 'image', '=', 'self', '.', 'init_docker_image', ',', 'image_pull_policy', '=', 'self', '.', 'init_docker_image_pull_policy', ',', 'command', '=', 'init_command', ',', 'args', '=', '[', "''", '.', 'join', '(', 'init_args', ')', ']', ',', 'env', '=', 'env_vars', ',', 'volume_mounts', '=', 'volume_mounts', ')']
Pod init container for setting outputs path.
['Pod', 'init', 'container', 'for', 'setting', 'outputs', 'path', '.']
train
https://github.com/polyaxon/polyaxon/blob/e1724f0756b1a42f9e7aa08a976584a84ef7f016/polyaxon/scheduler/spawners/templates/jobs/manager.py#L131-L158
7,030
bukun/TorCMS
torcms/handlers/page_handler.py
PageHandler.__could_edit
def __could_edit(self, slug): ''' Test if the user could edit the page. ''' page_rec = MWiki.get_by_uid(slug) if not page_rec: return False if self.check_post_role()['EDIT']: return True elif page_rec.user_name == self.userinfo.user_name: return True else: return False
python
def __could_edit(self, slug): ''' Test if the user could edit the page. ''' page_rec = MWiki.get_by_uid(slug) if not page_rec: return False if self.check_post_role()['EDIT']: return True elif page_rec.user_name == self.userinfo.user_name: return True else: return False
['def', '__could_edit', '(', 'self', ',', 'slug', ')', ':', 'page_rec', '=', 'MWiki', '.', 'get_by_uid', '(', 'slug', ')', 'if', 'not', 'page_rec', ':', 'return', 'False', 'if', 'self', '.', 'check_post_role', '(', ')', '[', "'EDIT'", ']', ':', 'return', 'True', 'elif', 'page_rec', '.', 'user_name', '==', 'self', '.', 'userinfo', '.', 'user_name', ':', 'return', 'True', 'else', ':', 'return', 'False']
Test if the user could edit the page.
['Test', 'if', 'the', 'user', 'could', 'edit', 'the', 'page', '.']
train
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/page_handler.py#L96-L108
7,031
Chilipp/psyplot
psyplot/plotter.py
Plotter.check_key
def check_key(self, key, raise_error=True, *args, **kwargs): """ Checks whether the key is a valid formatoption Parameters ---------- %(check_key.parameters.no_possible_keys|name)s Returns ------- %(check_key.returns)s Raises ------ %(check_key.raises)s""" return check_key( key, possible_keys=list(self), raise_error=raise_error, name='formatoption keyword', *args, **kwargs)
python
def check_key(self, key, raise_error=True, *args, **kwargs): """ Checks whether the key is a valid formatoption Parameters ---------- %(check_key.parameters.no_possible_keys|name)s Returns ------- %(check_key.returns)s Raises ------ %(check_key.raises)s""" return check_key( key, possible_keys=list(self), raise_error=raise_error, name='formatoption keyword', *args, **kwargs)
['def', 'check_key', '(', 'self', ',', 'key', ',', 'raise_error', '=', 'True', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'return', 'check_key', '(', 'key', ',', 'possible_keys', '=', 'list', '(', 'self', ')', ',', 'raise_error', '=', 'raise_error', ',', 'name', '=', "'formatoption keyword'", ',', '*', 'args', ',', '*', '*', 'kwargs', ')']
Checks whether the key is a valid formatoption Parameters ---------- %(check_key.parameters.no_possible_keys|name)s Returns ------- %(check_key.returns)s Raises ------ %(check_key.raises)s
['Checks', 'whether', 'the', 'key', 'is', 'a', 'valid', 'formatoption']
train
https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/plotter.py#L1234-L1251
7,032
lobocv/pyperform
pyperform/__init__.py
enable
def enable(): """ Enable all benchmarking. """ Benchmark.enable = True ComparisonBenchmark.enable = True BenchmarkedFunction.enable = True BenchmarkedClass.enable = True
python
def enable(): """ Enable all benchmarking. """ Benchmark.enable = True ComparisonBenchmark.enable = True BenchmarkedFunction.enable = True BenchmarkedClass.enable = True
['def', 'enable', '(', ')', ':', 'Benchmark', '.', 'enable', '=', 'True', 'ComparisonBenchmark', '.', 'enable', '=', 'True', 'BenchmarkedFunction', '.', 'enable', '=', 'True', 'BenchmarkedClass', '.', 'enable', '=', 'True']
Enable all benchmarking.
['Enable', 'all', 'benchmarking', '.']
train
https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/__init__.py#L23-L30
7,033
p3trus/slave
slave/srs/sr850.py
Mark.bin
def bin(self): """The bin index of this mark. :returns: An integer bin index or None if the mark is inactive. """ bin = self._query(('MBIN?', Integer, Integer), self.idx) return None if bin == -1 else bin
python
def bin(self): """The bin index of this mark. :returns: An integer bin index or None if the mark is inactive. """ bin = self._query(('MBIN?', Integer, Integer), self.idx) return None if bin == -1 else bin
['def', 'bin', '(', 'self', ')', ':', 'bin', '=', 'self', '.', '_query', '(', '(', "'MBIN?'", ',', 'Integer', ',', 'Integer', ')', ',', 'self', '.', 'idx', ')', 'return', 'None', 'if', 'bin', '==', '-', '1', 'else', 'bin']
The bin index of this mark. :returns: An integer bin index or None if the mark is inactive.
['The', 'bin', 'index', 'of', 'this', 'mark', '.']
train
https://github.com/p3trus/slave/blob/bdc74e73bd0f47b74a090c43aa2283c469cde3be/slave/srs/sr850.py#L1180-L1187
7,034
hozn/stravalib
stravalib/client.py
Client.get_route
def get_route(self, route_id): """ Gets specified route. Will be detail-level if owned by authenticated user; otherwise summary-level. https://strava.github.io/api/v3/routes/#retreive :param route_id: The ID of route to fetch. :type route_id: int :rtype: :class:`stravalib.model.Route` """ raw = self.protocol.get('/routes/{id}', id=route_id) return model.Route.deserialize(raw, bind_client=self)
python
def get_route(self, route_id): """ Gets specified route. Will be detail-level if owned by authenticated user; otherwise summary-level. https://strava.github.io/api/v3/routes/#retreive :param route_id: The ID of route to fetch. :type route_id: int :rtype: :class:`stravalib.model.Route` """ raw = self.protocol.get('/routes/{id}', id=route_id) return model.Route.deserialize(raw, bind_client=self)
['def', 'get_route', '(', 'self', ',', 'route_id', ')', ':', 'raw', '=', 'self', '.', 'protocol', '.', 'get', '(', "'/routes/{id}'", ',', 'id', '=', 'route_id', ')', 'return', 'model', '.', 'Route', '.', 'deserialize', '(', 'raw', ',', 'bind_client', '=', 'self', ')']
Gets specified route. Will be detail-level if owned by authenticated user; otherwise summary-level. https://strava.github.io/api/v3/routes/#retreive :param route_id: The ID of route to fetch. :type route_id: int :rtype: :class:`stravalib.model.Route`
['Gets', 'specified', 'route', '.']
train
https://github.com/hozn/stravalib/blob/5500ebc39e0bf4706bb1ca4c27b25e56becaaa5f/stravalib/client.py#L1430-L1444
7,035
mojaie/chorus
chorus/util/debug.py
malloc
def malloc(func): """ Decorator Execute tracemalloc """ def _f(*args, **kwargs): print("\n<<<---") tracemalloc.start() res = func(*args, **kwargs) snapshot = tracemalloc.take_snapshot() top_stats = snapshot.statistics('lineno') print("[ Top 10 ]") for i, stat in enumerate(top_stats[:10]): frame = stat.traceback[0] filename = os.sep.join(frame.filename.split(os.sep)[-2:]) print("#%s: %s:%s: %.1f KiB" % (i, filename, frame.lineno, stat.size / 1024)) print(linecache.getline(frame.filename, frame.lineno).strip()) # print(stat) print("--->>>\n") return res return _f
python
def malloc(func): """ Decorator Execute tracemalloc """ def _f(*args, **kwargs): print("\n<<<---") tracemalloc.start() res = func(*args, **kwargs) snapshot = tracemalloc.take_snapshot() top_stats = snapshot.statistics('lineno') print("[ Top 10 ]") for i, stat in enumerate(top_stats[:10]): frame = stat.traceback[0] filename = os.sep.join(frame.filename.split(os.sep)[-2:]) print("#%s: %s:%s: %.1f KiB" % (i, filename, frame.lineno, stat.size / 1024)) print(linecache.getline(frame.filename, frame.lineno).strip()) # print(stat) print("--->>>\n") return res return _f
['def', 'malloc', '(', 'func', ')', ':', 'def', '_f', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'print', '(', '"\\n<<<---"', ')', 'tracemalloc', '.', 'start', '(', ')', 'res', '=', 'func', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'snapshot', '=', 'tracemalloc', '.', 'take_snapshot', '(', ')', 'top_stats', '=', 'snapshot', '.', 'statistics', '(', "'lineno'", ')', 'print', '(', '"[ Top 10 ]"', ')', 'for', 'i', ',', 'stat', 'in', 'enumerate', '(', 'top_stats', '[', ':', '10', ']', ')', ':', 'frame', '=', 'stat', '.', 'traceback', '[', '0', ']', 'filename', '=', 'os', '.', 'sep', '.', 'join', '(', 'frame', '.', 'filename', '.', 'split', '(', 'os', '.', 'sep', ')', '[', '-', '2', ':', ']', ')', 'print', '(', '"#%s: %s:%s: %.1f KiB"', '%', '(', 'i', ',', 'filename', ',', 'frame', '.', 'lineno', ',', 'stat', '.', 'size', '/', '1024', ')', ')', 'print', '(', 'linecache', '.', 'getline', '(', 'frame', '.', 'filename', ',', 'frame', '.', 'lineno', ')', '.', 'strip', '(', ')', ')', '# print(stat)', 'print', '(', '"--->>>\\n"', ')', 'return', 'res', 'return', '_f']
Decorator Execute tracemalloc
['Decorator', 'Execute', 'tracemalloc']
train
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/util/debug.py#L55-L75
7,036
quantopian/zipline
zipline/data/bcolz_daily_bars.py
BcolzDailyBarWriter.write
def write(self, data, assets=None, show_progress=False, invalid_data_behavior='warn'): """ Parameters ---------- data : iterable[tuple[int, pandas.DataFrame or bcolz.ctable]] The data chunks to write. Each chunk should be a tuple of sid and the data for that asset. assets : set[int], optional The assets that should be in ``data``. If this is provided we will check ``data`` against the assets and provide better progress information. show_progress : bool, optional Whether or not to show a progress bar while writing. invalid_data_behavior : {'warn', 'raise', 'ignore'}, optional What to do when data is encountered that is outside the range of a uint32. Returns ------- table : bcolz.ctable The newly-written table. """ ctx = maybe_show_progress( ( (sid, self.to_ctable(df, invalid_data_behavior)) for sid, df in data ), show_progress=show_progress, item_show_func=self.progress_bar_item_show_func, label=self.progress_bar_message, length=len(assets) if assets is not None else None, ) with ctx as it: return self._write_internal(it, assets)
python
def write(self, data, assets=None, show_progress=False, invalid_data_behavior='warn'): """ Parameters ---------- data : iterable[tuple[int, pandas.DataFrame or bcolz.ctable]] The data chunks to write. Each chunk should be a tuple of sid and the data for that asset. assets : set[int], optional The assets that should be in ``data``. If this is provided we will check ``data`` against the assets and provide better progress information. show_progress : bool, optional Whether or not to show a progress bar while writing. invalid_data_behavior : {'warn', 'raise', 'ignore'}, optional What to do when data is encountered that is outside the range of a uint32. Returns ------- table : bcolz.ctable The newly-written table. """ ctx = maybe_show_progress( ( (sid, self.to_ctable(df, invalid_data_behavior)) for sid, df in data ), show_progress=show_progress, item_show_func=self.progress_bar_item_show_func, label=self.progress_bar_message, length=len(assets) if assets is not None else None, ) with ctx as it: return self._write_internal(it, assets)
['def', 'write', '(', 'self', ',', 'data', ',', 'assets', '=', 'None', ',', 'show_progress', '=', 'False', ',', 'invalid_data_behavior', '=', "'warn'", ')', ':', 'ctx', '=', 'maybe_show_progress', '(', '(', '(', 'sid', ',', 'self', '.', 'to_ctable', '(', 'df', ',', 'invalid_data_behavior', ')', ')', 'for', 'sid', ',', 'df', 'in', 'data', ')', ',', 'show_progress', '=', 'show_progress', ',', 'item_show_func', '=', 'self', '.', 'progress_bar_item_show_func', ',', 'label', '=', 'self', '.', 'progress_bar_message', ',', 'length', '=', 'len', '(', 'assets', ')', 'if', 'assets', 'is', 'not', 'None', 'else', 'None', ',', ')', 'with', 'ctx', 'as', 'it', ':', 'return', 'self', '.', '_write_internal', '(', 'it', ',', 'assets', ')']
Parameters ---------- data : iterable[tuple[int, pandas.DataFrame or bcolz.ctable]] The data chunks to write. Each chunk should be a tuple of sid and the data for that asset. assets : set[int], optional The assets that should be in ``data``. If this is provided we will check ``data`` against the assets and provide better progress information. show_progress : bool, optional Whether or not to show a progress bar while writing. invalid_data_behavior : {'warn', 'raise', 'ignore'}, optional What to do when data is encountered that is outside the range of a uint32. Returns ------- table : bcolz.ctable The newly-written table.
['Parameters', '----------', 'data', ':', 'iterable', '[', 'tuple', '[', 'int', 'pandas', '.', 'DataFrame', 'or', 'bcolz', '.', 'ctable', ']]', 'The', 'data', 'chunks', 'to', 'write', '.', 'Each', 'chunk', 'should', 'be', 'a', 'tuple', 'of', 'sid', 'and', 'the', 'data', 'for', 'that', 'asset', '.', 'assets', ':', 'set', '[', 'int', ']', 'optional', 'The', 'assets', 'that', 'should', 'be', 'in', 'data', '.', 'If', 'this', 'is', 'provided', 'we', 'will', 'check', 'data', 'against', 'the', 'assets', 'and', 'provide', 'better', 'progress', 'information', '.', 'show_progress', ':', 'bool', 'optional', 'Whether', 'or', 'not', 'to', 'show', 'a', 'progress', 'bar', 'while', 'writing', '.', 'invalid_data_behavior', ':', '{', 'warn', 'raise', 'ignore', '}', 'optional', 'What', 'to', 'do', 'when', 'data', 'is', 'encountered', 'that', 'is', 'outside', 'the', 'range', 'of', 'a', 'uint32', '.']
train
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/bcolz_daily_bars.py#L170-L207
7,037
nccgroup/Scout2
AWSScout2/configs/base.py
BaseConfig.fetch_all
def fetch_all(self, credentials, regions = [], partition_name = 'aws', targets = None): """ Generic fetching function that iterates through all of the service's targets :param credentials: F :param service: Name of the service :param regions: Name of regions to fetch data from :param partition_name: AWS partition to connect to :param targets: Type of resources to be fetched; defaults to all. """ global status, formatted_string # Initialize targets if not targets: targets = type(self).targets printInfo('Fetching %s config...' % format_service_name(self.service)) formatted_string = None api_service = self.service.lower() # Connect to the service if self.service in [ 's3' ]: # S3 namespace is global but APIs aren't.... api_clients = {} for region in build_region_list(self.service, regions, partition_name): api_clients[region] = connect_service('s3', credentials, region, silent = True) api_client = api_clients[list(api_clients.keys())[0]] elif self.service == 'route53domains': api_client = connect_service(self.service, credentials, 'us-east-1', silent = True) # TODO: use partition's default region else: api_client = connect_service(self.service, credentials, silent = True) # Threading to fetch & parse resources (queue consumer) params = {'api_client': api_client} if self.service in ['s3']: params['api_clients'] = api_clients q = self._init_threading(self.__fetch_target, params, self.thread_config['parse']) # Threading to list resources (queue feeder) params = {'api_client': api_client, 'q': q} if self.service in ['s3']: params['api_clients'] = api_clients qt = self._init_threading(self.__fetch_service, params, self.thread_config['list']) # Init display self.fetchstatuslogger = FetchStatusLogger(targets) # Go for target in targets: qt.put(target) # Join qt.join() q.join() # Show completion and force newline if self.service != 'iam': self.fetchstatuslogger.show(True)
python
def fetch_all(self, credentials, regions = [], partition_name = 'aws', targets = None): """ Generic fetching function that iterates through all of the service's targets :param credentials: F :param service: Name of the service :param regions: Name of regions to fetch data from :param partition_name: AWS partition to connect to :param targets: Type of resources to be fetched; defaults to all. """ global status, formatted_string # Initialize targets if not targets: targets = type(self).targets printInfo('Fetching %s config...' % format_service_name(self.service)) formatted_string = None api_service = self.service.lower() # Connect to the service if self.service in [ 's3' ]: # S3 namespace is global but APIs aren't.... api_clients = {} for region in build_region_list(self.service, regions, partition_name): api_clients[region] = connect_service('s3', credentials, region, silent = True) api_client = api_clients[list(api_clients.keys())[0]] elif self.service == 'route53domains': api_client = connect_service(self.service, credentials, 'us-east-1', silent = True) # TODO: use partition's default region else: api_client = connect_service(self.service, credentials, silent = True) # Threading to fetch & parse resources (queue consumer) params = {'api_client': api_client} if self.service in ['s3']: params['api_clients'] = api_clients q = self._init_threading(self.__fetch_target, params, self.thread_config['parse']) # Threading to list resources (queue feeder) params = {'api_client': api_client, 'q': q} if self.service in ['s3']: params['api_clients'] = api_clients qt = self._init_threading(self.__fetch_service, params, self.thread_config['list']) # Init display self.fetchstatuslogger = FetchStatusLogger(targets) # Go for target in targets: qt.put(target) # Join qt.join() q.join() # Show completion and force newline if self.service != 'iam': self.fetchstatuslogger.show(True)
['def', 'fetch_all', '(', 'self', ',', 'credentials', ',', 'regions', '=', '[', ']', ',', 'partition_name', '=', "'aws'", ',', 'targets', '=', 'None', ')', ':', 'global', 'status', ',', 'formatted_string', '# Initialize targets', 'if', 'not', 'targets', ':', 'targets', '=', 'type', '(', 'self', ')', '.', 'targets', 'printInfo', '(', "'Fetching %s config...'", '%', 'format_service_name', '(', 'self', '.', 'service', ')', ')', 'formatted_string', '=', 'None', 'api_service', '=', 'self', '.', 'service', '.', 'lower', '(', ')', '# Connect to the service', 'if', 'self', '.', 'service', 'in', '[', "'s3'", ']', ':', "# S3 namespace is global but APIs aren't....", 'api_clients', '=', '{', '}', 'for', 'region', 'in', 'build_region_list', '(', 'self', '.', 'service', ',', 'regions', ',', 'partition_name', ')', ':', 'api_clients', '[', 'region', ']', '=', 'connect_service', '(', "'s3'", ',', 'credentials', ',', 'region', ',', 'silent', '=', 'True', ')', 'api_client', '=', 'api_clients', '[', 'list', '(', 'api_clients', '.', 'keys', '(', ')', ')', '[', '0', ']', ']', 'elif', 'self', '.', 'service', '==', "'route53domains'", ':', 'api_client', '=', 'connect_service', '(', 'self', '.', 'service', ',', 'credentials', ',', "'us-east-1'", ',', 'silent', '=', 'True', ')', "# TODO: use partition's default region", 'else', ':', 'api_client', '=', 'connect_service', '(', 'self', '.', 'service', ',', 'credentials', ',', 'silent', '=', 'True', ')', '# Threading to fetch & parse resources (queue consumer)', 'params', '=', '{', "'api_client'", ':', 'api_client', '}', 'if', 'self', '.', 'service', 'in', '[', "'s3'", ']', ':', 'params', '[', "'api_clients'", ']', '=', 'api_clients', 'q', '=', 'self', '.', '_init_threading', '(', 'self', '.', '__fetch_target', ',', 'params', ',', 'self', '.', 'thread_config', '[', "'parse'", ']', ')', '# Threading to list resources (queue feeder)', 'params', '=', '{', "'api_client'", ':', 'api_client', ',', "'q'", ':', 'q', '}', 'if', 'self', '.', 'service', 'in', '[', "'s3'", ']', ':', 'params', '[', "'api_clients'", ']', '=', 'api_clients', 'qt', '=', 'self', '.', '_init_threading', '(', 'self', '.', '__fetch_service', ',', 'params', ',', 'self', '.', 'thread_config', '[', "'list'", ']', ')', '# Init display', 'self', '.', 'fetchstatuslogger', '=', 'FetchStatusLogger', '(', 'targets', ')', '# Go', 'for', 'target', 'in', 'targets', ':', 'qt', '.', 'put', '(', 'target', ')', '# Join', 'qt', '.', 'join', '(', ')', 'q', '.', 'join', '(', ')', '# Show completion and force newline', 'if', 'self', '.', 'service', '!=', "'iam'", ':', 'self', '.', 'fetchstatuslogger', '.', 'show', '(', 'True', ')']
Generic fetching function that iterates through all of the service's targets :param credentials: F :param service: Name of the service :param regions: Name of regions to fetch data from :param partition_name: AWS partition to connect to :param targets: Type of resources to be fetched; defaults to all.
['Generic', 'fetching', 'function', 'that', 'iterates', 'through', 'all', 'of', 'the', 'service', 's', 'targets']
train
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/configs/base.py#L53-L101
7,038
volfpeter/graphscraper
src/graphscraper/spotifyartist.py
SpotifyClient.similar_artists
def similar_artists(self, artist_id: str) -> List[NameExternalIDPair]: """ Returns zero or more similar artists (in the form of artist name - external ID pairs) to the one corresponding to the given artist ID. Arguments: artist_id ([str]): The Spotify ID of the artist for whom similar artists are requested. Returns: Zero or more artist name - external ID pairs. Raises: requests.HTTPError: If an HTTP error occurred during the request. SpotifyClientError: If an invalid item is found. """ response: requests.Response = requests.get( self._API_URL_TEMPLATE.format("artists/{}/related-artists".format(artist_id)), headers={"Authorization": "Bearer {}".format(self._token.access_token)} ) # TODO: handle API rate limiting response.raise_for_status() if not response.text: return [] result: List[NameExternalIDPair] = [] data: List[Dict] = response.json()["artists"] for artist in data: artist = NameExternalIDPair(artist["name"], artist["id"]) if artist.name is None or artist.external_id is None: raise SpotifyClientError("Name or ID is missing") result.append(artist) return result
python
def similar_artists(self, artist_id: str) -> List[NameExternalIDPair]: """ Returns zero or more similar artists (in the form of artist name - external ID pairs) to the one corresponding to the given artist ID. Arguments: artist_id ([str]): The Spotify ID of the artist for whom similar artists are requested. Returns: Zero or more artist name - external ID pairs. Raises: requests.HTTPError: If an HTTP error occurred during the request. SpotifyClientError: If an invalid item is found. """ response: requests.Response = requests.get( self._API_URL_TEMPLATE.format("artists/{}/related-artists".format(artist_id)), headers={"Authorization": "Bearer {}".format(self._token.access_token)} ) # TODO: handle API rate limiting response.raise_for_status() if not response.text: return [] result: List[NameExternalIDPair] = [] data: List[Dict] = response.json()["artists"] for artist in data: artist = NameExternalIDPair(artist["name"], artist["id"]) if artist.name is None or artist.external_id is None: raise SpotifyClientError("Name or ID is missing") result.append(artist) return result
['def', 'similar_artists', '(', 'self', ',', 'artist_id', ':', 'str', ')', '->', 'List', '[', 'NameExternalIDPair', ']', ':', 'response', ':', 'requests', '.', 'Response', '=', 'requests', '.', 'get', '(', 'self', '.', '_API_URL_TEMPLATE', '.', 'format', '(', '"artists/{}/related-artists"', '.', 'format', '(', 'artist_id', ')', ')', ',', 'headers', '=', '{', '"Authorization"', ':', '"Bearer {}"', '.', 'format', '(', 'self', '.', '_token', '.', 'access_token', ')', '}', ')', '# TODO: handle API rate limiting', 'response', '.', 'raise_for_status', '(', ')', 'if', 'not', 'response', '.', 'text', ':', 'return', '[', ']', 'result', ':', 'List', '[', 'NameExternalIDPair', ']', '=', '[', ']', 'data', ':', 'List', '[', 'Dict', ']', '=', 'response', '.', 'json', '(', ')', '[', '"artists"', ']', 'for', 'artist', 'in', 'data', ':', 'artist', '=', 'NameExternalIDPair', '(', 'artist', '[', '"name"', ']', ',', 'artist', '[', '"id"', ']', ')', 'if', 'artist', '.', 'name', 'is', 'None', 'or', 'artist', '.', 'external_id', 'is', 'None', ':', 'raise', 'SpotifyClientError', '(', '"Name or ID is missing"', ')', 'result', '.', 'append', '(', 'artist', ')', 'return', 'result']
Returns zero or more similar artists (in the form of artist name - external ID pairs) to the one corresponding to the given artist ID. Arguments: artist_id ([str]): The Spotify ID of the artist for whom similar artists are requested. Returns: Zero or more artist name - external ID pairs. Raises: requests.HTTPError: If an HTTP error occurred during the request. SpotifyClientError: If an invalid item is found.
['Returns', 'zero', 'or', 'more', 'similar', 'artists', '(', 'in', 'the', 'form', 'of', 'artist', 'name', '-', 'external', 'ID', 'pairs', ')', 'to', 'the', 'one', 'corresponding', 'to', 'the', 'given', 'artist', 'ID', '.']
train
https://github.com/volfpeter/graphscraper/blob/11d407509956a282ee25190ed6491a162fc0fe7f/src/graphscraper/spotifyartist.py#L384-L418
7,039
yero13/na3x
na3x/db/data.py
CRUD.upsert_multi
def upsert_multi(db, collection, object, match_params=None): """ Wrapper for pymongo.insert_many() and update_many() :param db: db connection :param collection: collection to update :param object: the modifications to apply :param match_params: a query that matches the documents to update :return: ids of inserted/updated document """ if isinstance(object, list) and len(object) > 0: return str(db[collection].insert_many(object).inserted_ids) elif isinstance(object, dict): return str(db[collection].update_many(match_params, {"$set": object}, upsert=False).upserted_id)
python
def upsert_multi(db, collection, object, match_params=None): """ Wrapper for pymongo.insert_many() and update_many() :param db: db connection :param collection: collection to update :param object: the modifications to apply :param match_params: a query that matches the documents to update :return: ids of inserted/updated document """ if isinstance(object, list) and len(object) > 0: return str(db[collection].insert_many(object).inserted_ids) elif isinstance(object, dict): return str(db[collection].update_many(match_params, {"$set": object}, upsert=False).upserted_id)
['def', 'upsert_multi', '(', 'db', ',', 'collection', ',', 'object', ',', 'match_params', '=', 'None', ')', ':', 'if', 'isinstance', '(', 'object', ',', 'list', ')', 'and', 'len', '(', 'object', ')', '>', '0', ':', 'return', 'str', '(', 'db', '[', 'collection', ']', '.', 'insert_many', '(', 'object', ')', '.', 'inserted_ids', ')', 'elif', 'isinstance', '(', 'object', ',', 'dict', ')', ':', 'return', 'str', '(', 'db', '[', 'collection', ']', '.', 'update_many', '(', 'match_params', ',', '{', '"$set"', ':', 'object', '}', ',', 'upsert', '=', 'False', ')', '.', 'upserted_id', ')']
Wrapper for pymongo.insert_many() and update_many() :param db: db connection :param collection: collection to update :param object: the modifications to apply :param match_params: a query that matches the documents to update :return: ids of inserted/updated document
['Wrapper', 'for', 'pymongo', '.', 'insert_many', '()', 'and', 'update_many', '()', ':', 'param', 'db', ':', 'db', 'connection', ':', 'param', 'collection', ':', 'collection', 'to', 'update', ':', 'param', 'object', ':', 'the', 'modifications', 'to', 'apply', ':', 'param', 'match_params', ':', 'a', 'query', 'that', 'matches', 'the', 'documents', 'to', 'update', ':', 'return', ':', 'ids', 'of', 'inserted', '/', 'updated', 'document']
train
https://github.com/yero13/na3x/blob/b31ef801ea574081125020a7d0f9c4242f8f8b02/na3x/db/data.py#L69-L81
7,040
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
MAVLink.set_home_position_encode
def set_home_position_encode(self, target_system, latitude, longitude, altitude, x, y, z, q, approach_x, approach_y, approach_z): ''' The position the system will return to and land on. The position is set automatically by the system during the takeoff in case it was not explicitely set by the operator before or after. The global and local positions encode the position in the respective coordinate frames, while the q parameter encodes the orientation of the surface. Under normal conditions it describes the heading and terrain slope, which can be used by the aircraft to adjust the approach. The approach 3D vector describes the point to which the system should fly in normal flight mode and then perform a landing sequence along the vector. target_system : System ID. (uint8_t) latitude : Latitude (WGS84), in degrees * 1E7 (int32_t) longitude : Longitude (WGS84, in degrees * 1E7 (int32_t) altitude : Altitude (AMSL), in meters * 1000 (positive for up) (int32_t) x : Local X position of this position in the local coordinate frame (float) y : Local Y position of this position in the local coordinate frame (float) z : Local Z position of this position in the local coordinate frame (float) q : World to surface normal and heading transformation of the takeoff position. Used to indicate the heading and slope of the ground (float) approach_x : Local X position of the end of the approach vector. Multicopters should set this position based on their takeoff path. Grass-landing fixed wing aircraft should set it the same way as multicopters. Runway-landing fixed wing aircraft should set it to the opposite direction of the takeoff, assuming the takeoff happened from the threshold / touchdown zone. (float) approach_y : Local Y position of the end of the approach vector. Multicopters should set this position based on their takeoff path. Grass-landing fixed wing aircraft should set it the same way as multicopters. Runway-landing fixed wing aircraft should set it to the opposite direction of the takeoff, assuming the takeoff happened from the threshold / touchdown zone. (float) approach_z : Local Z position of the end of the approach vector. Multicopters should set this position based on their takeoff path. Grass-landing fixed wing aircraft should set it the same way as multicopters. Runway-landing fixed wing aircraft should set it to the opposite direction of the takeoff, assuming the takeoff happened from the threshold / touchdown zone. (float) ''' return MAVLink_set_home_position_message(target_system, latitude, longitude, altitude, x, y, z, q, approach_x, approach_y, approach_z)
python
def set_home_position_encode(self, target_system, latitude, longitude, altitude, x, y, z, q, approach_x, approach_y, approach_z): ''' The position the system will return to and land on. The position is set automatically by the system during the takeoff in case it was not explicitely set by the operator before or after. The global and local positions encode the position in the respective coordinate frames, while the q parameter encodes the orientation of the surface. Under normal conditions it describes the heading and terrain slope, which can be used by the aircraft to adjust the approach. The approach 3D vector describes the point to which the system should fly in normal flight mode and then perform a landing sequence along the vector. target_system : System ID. (uint8_t) latitude : Latitude (WGS84), in degrees * 1E7 (int32_t) longitude : Longitude (WGS84, in degrees * 1E7 (int32_t) altitude : Altitude (AMSL), in meters * 1000 (positive for up) (int32_t) x : Local X position of this position in the local coordinate frame (float) y : Local Y position of this position in the local coordinate frame (float) z : Local Z position of this position in the local coordinate frame (float) q : World to surface normal and heading transformation of the takeoff position. Used to indicate the heading and slope of the ground (float) approach_x : Local X position of the end of the approach vector. Multicopters should set this position based on their takeoff path. Grass-landing fixed wing aircraft should set it the same way as multicopters. Runway-landing fixed wing aircraft should set it to the opposite direction of the takeoff, assuming the takeoff happened from the threshold / touchdown zone. (float) approach_y : Local Y position of the end of the approach vector. Multicopters should set this position based on their takeoff path. Grass-landing fixed wing aircraft should set it the same way as multicopters. Runway-landing fixed wing aircraft should set it to the opposite direction of the takeoff, assuming the takeoff happened from the threshold / touchdown zone. (float) approach_z : Local Z position of the end of the approach vector. Multicopters should set this position based on their takeoff path. Grass-landing fixed wing aircraft should set it the same way as multicopters. Runway-landing fixed wing aircraft should set it to the opposite direction of the takeoff, assuming the takeoff happened from the threshold / touchdown zone. (float) ''' return MAVLink_set_home_position_message(target_system, latitude, longitude, altitude, x, y, z, q, approach_x, approach_y, approach_z)
['def', 'set_home_position_encode', '(', 'self', ',', 'target_system', ',', 'latitude', ',', 'longitude', ',', 'altitude', ',', 'x', ',', 'y', ',', 'z', ',', 'q', ',', 'approach_x', ',', 'approach_y', ',', 'approach_z', ')', ':', 'return', 'MAVLink_set_home_position_message', '(', 'target_system', ',', 'latitude', ',', 'longitude', ',', 'altitude', ',', 'x', ',', 'y', ',', 'z', ',', 'q', ',', 'approach_x', ',', 'approach_y', ',', 'approach_z', ')']
The position the system will return to and land on. The position is set automatically by the system during the takeoff in case it was not explicitely set by the operator before or after. The global and local positions encode the position in the respective coordinate frames, while the q parameter encodes the orientation of the surface. Under normal conditions it describes the heading and terrain slope, which can be used by the aircraft to adjust the approach. The approach 3D vector describes the point to which the system should fly in normal flight mode and then perform a landing sequence along the vector. target_system : System ID. (uint8_t) latitude : Latitude (WGS84), in degrees * 1E7 (int32_t) longitude : Longitude (WGS84, in degrees * 1E7 (int32_t) altitude : Altitude (AMSL), in meters * 1000 (positive for up) (int32_t) x : Local X position of this position in the local coordinate frame (float) y : Local Y position of this position in the local coordinate frame (float) z : Local Z position of this position in the local coordinate frame (float) q : World to surface normal and heading transformation of the takeoff position. Used to indicate the heading and slope of the ground (float) approach_x : Local X position of the end of the approach vector. Multicopters should set this position based on their takeoff path. Grass-landing fixed wing aircraft should set it the same way as multicopters. Runway-landing fixed wing aircraft should set it to the opposite direction of the takeoff, assuming the takeoff happened from the threshold / touchdown zone. (float) approach_y : Local Y position of the end of the approach vector. Multicopters should set this position based on their takeoff path. Grass-landing fixed wing aircraft should set it the same way as multicopters. Runway-landing fixed wing aircraft should set it to the opposite direction of the takeoff, assuming the takeoff happened from the threshold / touchdown zone. (float) approach_z : Local Z position of the end of the approach vector. Multicopters should set this position based on their takeoff path. Grass-landing fixed wing aircraft should set it the same way as multicopters. Runway-landing fixed wing aircraft should set it to the opposite direction of the takeoff, assuming the takeoff happened from the threshold / touchdown zone. (float)
['The', 'position', 'the', 'system', 'will', 'return', 'to', 'and', 'land', 'on', '.', 'The', 'position', 'is', 'set', 'automatically', 'by', 'the', 'system', 'during', 'the', 'takeoff', 'in', 'case', 'it', 'was', 'not', 'explicitely', 'set', 'by', 'the', 'operator', 'before', 'or', 'after', '.', 'The', 'global', 'and', 'local', 'positions', 'encode', 'the', 'position', 'in', 'the', 'respective', 'coordinate', 'frames', 'while', 'the', 'q', 'parameter', 'encodes', 'the', 'orientation', 'of', 'the', 'surface', '.', 'Under', 'normal', 'conditions', 'it', 'describes', 'the', 'heading', 'and', 'terrain', 'slope', 'which', 'can', 'be', 'used', 'by', 'the', 'aircraft', 'to', 'adjust', 'the', 'approach', '.', 'The', 'approach', '3D', 'vector', 'describes', 'the', 'point', 'to', 'which', 'the', 'system', 'should', 'fly', 'in', 'normal', 'flight', 'mode', 'and', 'then', 'perform', 'a', 'landing', 'sequence', 'along', 'the', 'vector', '.']
train
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L12833-L12861
7,041
bwhite/hadoopy
hadoopy/thirdparty/pyinstaller/PyInstaller/build.py
MERGE
def MERGE(*args): """ Wipe repeated dependencies from a list of (Analysis, id, filename) tuples, supplied as argument. Replace id with the correct filename. """ # Get the longest common path common_prefix = os.path.dirname(os.path.commonprefix([os.path.abspath(a.scripts[-1][1]) for a, _, _ in args])) if common_prefix[-1] != os.sep: common_prefix += os.sep logger.info("Common prefix: %s", common_prefix) # Adjust dependencies for each Analysis object; the first Analysis in the # list will include all dependencies. id_to_path = {} for _, i, p in args: id_to_path[i] = p dependencies = {} for analysis, _, _ in args: path = os.path.abspath(analysis.scripts[-1][1]).replace(common_prefix, "", 1) path = os.path.splitext(path)[0] if path in id_to_path: path = id_to_path[path] set_dependencies(analysis, dependencies, path)
python
def MERGE(*args): """ Wipe repeated dependencies from a list of (Analysis, id, filename) tuples, supplied as argument. Replace id with the correct filename. """ # Get the longest common path common_prefix = os.path.dirname(os.path.commonprefix([os.path.abspath(a.scripts[-1][1]) for a, _, _ in args])) if common_prefix[-1] != os.sep: common_prefix += os.sep logger.info("Common prefix: %s", common_prefix) # Adjust dependencies for each Analysis object; the first Analysis in the # list will include all dependencies. id_to_path = {} for _, i, p in args: id_to_path[i] = p dependencies = {} for analysis, _, _ in args: path = os.path.abspath(analysis.scripts[-1][1]).replace(common_prefix, "", 1) path = os.path.splitext(path)[0] if path in id_to_path: path = id_to_path[path] set_dependencies(analysis, dependencies, path)
['def', 'MERGE', '(', '*', 'args', ')', ':', '# Get the longest common path', 'common_prefix', '=', 'os', '.', 'path', '.', 'dirname', '(', 'os', '.', 'path', '.', 'commonprefix', '(', '[', 'os', '.', 'path', '.', 'abspath', '(', 'a', '.', 'scripts', '[', '-', '1', ']', '[', '1', ']', ')', 'for', 'a', ',', '_', ',', '_', 'in', 'args', ']', ')', ')', 'if', 'common_prefix', '[', '-', '1', ']', '!=', 'os', '.', 'sep', ':', 'common_prefix', '+=', 'os', '.', 'sep', 'logger', '.', 'info', '(', '"Common prefix: %s"', ',', 'common_prefix', ')', '# Adjust dependencies for each Analysis object; the first Analysis in the', '# list will include all dependencies.', 'id_to_path', '=', '{', '}', 'for', '_', ',', 'i', ',', 'p', 'in', 'args', ':', 'id_to_path', '[', 'i', ']', '=', 'p', 'dependencies', '=', '{', '}', 'for', 'analysis', ',', '_', ',', '_', 'in', 'args', ':', 'path', '=', 'os', '.', 'path', '.', 'abspath', '(', 'analysis', '.', 'scripts', '[', '-', '1', ']', '[', '1', ']', ')', '.', 'replace', '(', 'common_prefix', ',', '""', ',', '1', ')', 'path', '=', 'os', '.', 'path', '.', 'splitext', '(', 'path', ')', '[', '0', ']', 'if', 'path', 'in', 'id_to_path', ':', 'path', '=', 'id_to_path', '[', 'path', ']', 'set_dependencies', '(', 'analysis', ',', 'dependencies', ',', 'path', ')']
Wipe repeated dependencies from a list of (Analysis, id, filename) tuples, supplied as argument. Replace id with the correct filename.
['Wipe', 'repeated', 'dependencies', 'from', 'a', 'list', 'of', '(', 'Analysis', 'id', 'filename', ')', 'tuples', 'supplied', 'as', 'argument', '.', 'Replace', 'id', 'with', 'the', 'correct', 'filename', '.']
train
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/build.py#L1477-L1499
7,042
srittau/python-asserts
asserts/__init__.py
assert_is_not_none
def assert_is_not_none(expr, msg_fmt="{msg}"): """Fail if the expression is None. >>> assert_is_not_none(0) >>> assert_is_not_none(None) Traceback (most recent call last): ... AssertionError: expression is None The following msg_fmt arguments are supported: * msg - the default error message * expr - tested expression """ if expr is None: msg = "expression is None" fail(msg_fmt.format(msg=msg, expr=expr))
python
def assert_is_not_none(expr, msg_fmt="{msg}"): """Fail if the expression is None. >>> assert_is_not_none(0) >>> assert_is_not_none(None) Traceback (most recent call last): ... AssertionError: expression is None The following msg_fmt arguments are supported: * msg - the default error message * expr - tested expression """ if expr is None: msg = "expression is None" fail(msg_fmt.format(msg=msg, expr=expr))
['def', 'assert_is_not_none', '(', 'expr', ',', 'msg_fmt', '=', '"{msg}"', ')', ':', 'if', 'expr', 'is', 'None', ':', 'msg', '=', '"expression is None"', 'fail', '(', 'msg_fmt', '.', 'format', '(', 'msg', '=', 'msg', ',', 'expr', '=', 'expr', ')', ')']
Fail if the expression is None. >>> assert_is_not_none(0) >>> assert_is_not_none(None) Traceback (most recent call last): ... AssertionError: expression is None The following msg_fmt arguments are supported: * msg - the default error message * expr - tested expression
['Fail', 'if', 'the', 'expression', 'is', 'None', '.']
train
https://github.com/srittau/python-asserts/blob/1d5c797031c68ee27552d1c94e7f918c3d3d0453/asserts/__init__.py#L137-L152
7,043
jaraco/path.py
path/__init__.py
Path.removedirs_p
def removedirs_p(self): """ Like :meth:`removedirs`, but does not raise an exception if the directory is not empty or does not exist. """ with contextlib.suppress(FileExistsError, DirectoryNotEmpty): with DirectoryNotEmpty.translate(): self.removedirs() return self
python
def removedirs_p(self): """ Like :meth:`removedirs`, but does not raise an exception if the directory is not empty or does not exist. """ with contextlib.suppress(FileExistsError, DirectoryNotEmpty): with DirectoryNotEmpty.translate(): self.removedirs() return self
['def', 'removedirs_p', '(', 'self', ')', ':', 'with', 'contextlib', '.', 'suppress', '(', 'FileExistsError', ',', 'DirectoryNotEmpty', ')', ':', 'with', 'DirectoryNotEmpty', '.', 'translate', '(', ')', ':', 'self', '.', 'removedirs', '(', ')', 'return', 'self']
Like :meth:`removedirs`, but does not raise an exception if the directory is not empty or does not exist.
['Like', ':', 'meth', ':', 'removedirs', 'but', 'does', 'not', 'raise', 'an', 'exception', 'if', 'the', 'directory', 'is', 'not', 'empty', 'or', 'does', 'not', 'exist', '.']
train
https://github.com/jaraco/path.py/blob/bbe7d99e7a64a004f866ace9ec12bd9b296908f5/path/__init__.py#L1147-L1153
7,044
APSL/transmanager
transmanager/search_indexes.py
TransTaskIndex.index_queryset
def index_queryset(self, using=None): """ Used when the entire index for model is updated. """ return self.get_model().objects.filter(date_creation__lte=datetime.datetime.now())
python
def index_queryset(self, using=None): """ Used when the entire index for model is updated. """ return self.get_model().objects.filter(date_creation__lte=datetime.datetime.now())
['def', 'index_queryset', '(', 'self', ',', 'using', '=', 'None', ')', ':', 'return', 'self', '.', 'get_model', '(', ')', '.', 'objects', '.', 'filter', '(', 'date_creation__lte', '=', 'datetime', '.', 'datetime', '.', 'now', '(', ')', ')']
Used when the entire index for model is updated.
['Used', 'when', 'the', 'entire', 'index', 'for', 'model', 'is', 'updated', '.']
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/search_indexes.py#L17-L21
7,045
bcbio/bcbio-nextgen
bcbio/heterogeneity/phylowgs.py
_prep_inputs
def _prep_inputs(vrn_info, cnv_info, somatic_info, work_dir, config): """Prepare inputs for running PhyloWGS from variant and CNV calls. """ exe = os.path.join(os.path.dirname(sys.executable), "create_phylowgs_inputs.py") assert os.path.exists(exe), "Could not find input prep script for PhyloWGS runs." ssm_file = os.path.join(work_dir, "ssm_data.txt") cnv_file = os.path.join(work_dir, "cnv_data.txt") if not utils.file_exists(ssm_file) or not utils.file_exists(cnv_file): with file_transaction(somatic_info.tumor_data, ssm_file, cnv_file) as (tx_ssm_file, tx_cnv_file): variant_type, input_vcf_file = _prep_vrn_file(vrn_info["vrn_file"], vrn_info["variantcaller"], work_dir, somatic_info, cnv_info["ignore"], config) input_cnv_file = _prep_cnv_file(cnv_info["subclones"], work_dir, somatic_info) cmd = [sys.executable, exe, "--sample-size", str(config["sample_size"]), "--tumor-sample", somatic_info.tumor_name, "--battenberg", input_cnv_file, "--cellularity", _read_contam(cnv_info["contamination"]), "--output-cnvs", tx_cnv_file, "--output-variants", tx_ssm_file, "--variant-type", variant_type, input_vcf_file] do.run(cmd, "Prepare PhyloWGS inputs.") return ssm_file, cnv_file
python
def _prep_inputs(vrn_info, cnv_info, somatic_info, work_dir, config): """Prepare inputs for running PhyloWGS from variant and CNV calls. """ exe = os.path.join(os.path.dirname(sys.executable), "create_phylowgs_inputs.py") assert os.path.exists(exe), "Could not find input prep script for PhyloWGS runs." ssm_file = os.path.join(work_dir, "ssm_data.txt") cnv_file = os.path.join(work_dir, "cnv_data.txt") if not utils.file_exists(ssm_file) or not utils.file_exists(cnv_file): with file_transaction(somatic_info.tumor_data, ssm_file, cnv_file) as (tx_ssm_file, tx_cnv_file): variant_type, input_vcf_file = _prep_vrn_file(vrn_info["vrn_file"], vrn_info["variantcaller"], work_dir, somatic_info, cnv_info["ignore"], config) input_cnv_file = _prep_cnv_file(cnv_info["subclones"], work_dir, somatic_info) cmd = [sys.executable, exe, "--sample-size", str(config["sample_size"]), "--tumor-sample", somatic_info.tumor_name, "--battenberg", input_cnv_file, "--cellularity", _read_contam(cnv_info["contamination"]), "--output-cnvs", tx_cnv_file, "--output-variants", tx_ssm_file, "--variant-type", variant_type, input_vcf_file] do.run(cmd, "Prepare PhyloWGS inputs.") return ssm_file, cnv_file
['def', '_prep_inputs', '(', 'vrn_info', ',', 'cnv_info', ',', 'somatic_info', ',', 'work_dir', ',', 'config', ')', ':', 'exe', '=', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'path', '.', 'dirname', '(', 'sys', '.', 'executable', ')', ',', '"create_phylowgs_inputs.py"', ')', 'assert', 'os', '.', 'path', '.', 'exists', '(', 'exe', ')', ',', '"Could not find input prep script for PhyloWGS runs."', 'ssm_file', '=', 'os', '.', 'path', '.', 'join', '(', 'work_dir', ',', '"ssm_data.txt"', ')', 'cnv_file', '=', 'os', '.', 'path', '.', 'join', '(', 'work_dir', ',', '"cnv_data.txt"', ')', 'if', 'not', 'utils', '.', 'file_exists', '(', 'ssm_file', ')', 'or', 'not', 'utils', '.', 'file_exists', '(', 'cnv_file', ')', ':', 'with', 'file_transaction', '(', 'somatic_info', '.', 'tumor_data', ',', 'ssm_file', ',', 'cnv_file', ')', 'as', '(', 'tx_ssm_file', ',', 'tx_cnv_file', ')', ':', 'variant_type', ',', 'input_vcf_file', '=', '_prep_vrn_file', '(', 'vrn_info', '[', '"vrn_file"', ']', ',', 'vrn_info', '[', '"variantcaller"', ']', ',', 'work_dir', ',', 'somatic_info', ',', 'cnv_info', '[', '"ignore"', ']', ',', 'config', ')', 'input_cnv_file', '=', '_prep_cnv_file', '(', 'cnv_info', '[', '"subclones"', ']', ',', 'work_dir', ',', 'somatic_info', ')', 'cmd', '=', '[', 'sys', '.', 'executable', ',', 'exe', ',', '"--sample-size"', ',', 'str', '(', 'config', '[', '"sample_size"', ']', ')', ',', '"--tumor-sample"', ',', 'somatic_info', '.', 'tumor_name', ',', '"--battenberg"', ',', 'input_cnv_file', ',', '"--cellularity"', ',', '_read_contam', '(', 'cnv_info', '[', '"contamination"', ']', ')', ',', '"--output-cnvs"', ',', 'tx_cnv_file', ',', '"--output-variants"', ',', 'tx_ssm_file', ',', '"--variant-type"', ',', 'variant_type', ',', 'input_vcf_file', ']', 'do', '.', 'run', '(', 'cmd', ',', '"Prepare PhyloWGS inputs."', ')', 'return', 'ssm_file', ',', 'cnv_file']
Prepare inputs for running PhyloWGS from variant and CNV calls.
['Prepare', 'inputs', 'for', 'running', 'PhyloWGS', 'from', 'variant', 'and', 'CNV', 'calls', '.']
train
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/phylowgs.py#L153-L171
7,046
AltSchool/dynamic-rest
dynamic_rest/datastructures.py
TreeMap.get_paths
def get_paths(self): """Get all paths from the root to the leaves. For example, given a chain like `{'a':{'b':{'c':None}}}`, this method would return `[['a', 'b', 'c']]`. Returns: A list of lists of paths. """ paths = [] for key, child in six.iteritems(self): if isinstance(child, TreeMap) and child: # current child is an intermediate node for path in child.get_paths(): path.insert(0, key) paths.append(path) else: # current child is an endpoint paths.append([key]) return paths
python
def get_paths(self): """Get all paths from the root to the leaves. For example, given a chain like `{'a':{'b':{'c':None}}}`, this method would return `[['a', 'b', 'c']]`. Returns: A list of lists of paths. """ paths = [] for key, child in six.iteritems(self): if isinstance(child, TreeMap) and child: # current child is an intermediate node for path in child.get_paths(): path.insert(0, key) paths.append(path) else: # current child is an endpoint paths.append([key]) return paths
['def', 'get_paths', '(', 'self', ')', ':', 'paths', '=', '[', ']', 'for', 'key', ',', 'child', 'in', 'six', '.', 'iteritems', '(', 'self', ')', ':', 'if', 'isinstance', '(', 'child', ',', 'TreeMap', ')', 'and', 'child', ':', '# current child is an intermediate node', 'for', 'path', 'in', 'child', '.', 'get_paths', '(', ')', ':', 'path', '.', 'insert', '(', '0', ',', 'key', ')', 'paths', '.', 'append', '(', 'path', ')', 'else', ':', '# current child is an endpoint', 'paths', '.', 'append', '(', '[', 'key', ']', ')', 'return', 'paths']
Get all paths from the root to the leaves. For example, given a chain like `{'a':{'b':{'c':None}}}`, this method would return `[['a', 'b', 'c']]`. Returns: A list of lists of paths.
['Get', 'all', 'paths', 'from', 'the', 'root', 'to', 'the', 'leaves', '.']
train
https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/datastructures.py#L8-L27
7,047
heuer/segno
segno/encoder.py
add_format_info
def add_format_info(matrix, version, error, mask_pattern): """\ Adds the format information into the provided matrix. ISO/IEC 18004:2015(E) -- 7.9 Format information (page 55) ISO/IEC 18004:2015(E) -- 7.9.1 QR Code symbols ISO/IEC 18004:2015(E) -- 7.9.2 Micro QR Code symbols :param matrix: The matrix. :param int version: Version constant :param int error: Error level constant. :param int mask_pattern: Mask pattern number. """ # 14: most significant bit # 0: least significant bit # # QR Code format info: Micro QR format info # col 0 col 7 col matrix[-1] col 1 # 0 | | [ ] # 1 0 # 2 1 # 3 2 # 4 3 # 5 4 # [ ] 5 # 6 6 # 14 13 12 11 10 9 [ ] 8 7 ... 7 6 5 4 3 2 1 0 14 13 12 11 10 9 8 7 # # ... # [ ] (dark module) # 8 # 9 # 10 # 11 # 12 # 13 # 14 is_micro = version < 1 format_info = calc_format_info(version, error, mask_pattern) offset = int(is_micro) for i in range(8): bit = (format_info >> i) & 0x01 if i == 6 and not is_micro: # Timing pattern offset += 1 # vertical row, upper left corner matrix[i + offset][8] = bit if not is_micro: # horizontal row, upper right corner matrix[8][-1 - i] = bit offset = int(is_micro) for i in range(8): bit = (format_info >> (14 - i)) & 0x01 if i == 6 and not is_micro: # Timing pattern offset = 1 # horizontal row, upper left corner matrix[8][i + offset] = bit if not is_micro: # vertical row, bottom left corner matrix[-1 - i][8] = bit if not is_micro: # Dark module matrix[-8][8] = 0x1
python
def add_format_info(matrix, version, error, mask_pattern): """\ Adds the format information into the provided matrix. ISO/IEC 18004:2015(E) -- 7.9 Format information (page 55) ISO/IEC 18004:2015(E) -- 7.9.1 QR Code symbols ISO/IEC 18004:2015(E) -- 7.9.2 Micro QR Code symbols :param matrix: The matrix. :param int version: Version constant :param int error: Error level constant. :param int mask_pattern: Mask pattern number. """ # 14: most significant bit # 0: least significant bit # # QR Code format info: Micro QR format info # col 0 col 7 col matrix[-1] col 1 # 0 | | [ ] # 1 0 # 2 1 # 3 2 # 4 3 # 5 4 # [ ] 5 # 6 6 # 14 13 12 11 10 9 [ ] 8 7 ... 7 6 5 4 3 2 1 0 14 13 12 11 10 9 8 7 # # ... # [ ] (dark module) # 8 # 9 # 10 # 11 # 12 # 13 # 14 is_micro = version < 1 format_info = calc_format_info(version, error, mask_pattern) offset = int(is_micro) for i in range(8): bit = (format_info >> i) & 0x01 if i == 6 and not is_micro: # Timing pattern offset += 1 # vertical row, upper left corner matrix[i + offset][8] = bit if not is_micro: # horizontal row, upper right corner matrix[8][-1 - i] = bit offset = int(is_micro) for i in range(8): bit = (format_info >> (14 - i)) & 0x01 if i == 6 and not is_micro: # Timing pattern offset = 1 # horizontal row, upper left corner matrix[8][i + offset] = bit if not is_micro: # vertical row, bottom left corner matrix[-1 - i][8] = bit if not is_micro: # Dark module matrix[-8][8] = 0x1
['def', 'add_format_info', '(', 'matrix', ',', 'version', ',', 'error', ',', 'mask_pattern', ')', ':', '# 14: most significant bit', '# 0: least significant bit', '#', '# QR Code format info: Micro QR format info', '# col 0 col 7 col matrix[-1] col 1', '# 0 | | [ ]', '# 1 0', '# 2 1', '# 3 2', '# 4 3', '# 5 4', '# [ ] 5', '# 6 6', '# 14 13 12 11 10 9 [ ] 8 7 ... 7 6 5 4 3 2 1 0 14 13 12 11 10 9 8 7', '#', '# ...', '# [ ] (dark module)', '# 8', '# 9', '# 10', '# 11', '# 12', '# 13', '# 14', 'is_micro', '=', 'version', '<', '1', 'format_info', '=', 'calc_format_info', '(', 'version', ',', 'error', ',', 'mask_pattern', ')', 'offset', '=', 'int', '(', 'is_micro', ')', 'for', 'i', 'in', 'range', '(', '8', ')', ':', 'bit', '=', '(', 'format_info', '>>', 'i', ')', '&', '0x01', 'if', 'i', '==', '6', 'and', 'not', 'is_micro', ':', '# Timing pattern', 'offset', '+=', '1', '# vertical row, upper left corner', 'matrix', '[', 'i', '+', 'offset', ']', '[', '8', ']', '=', 'bit', 'if', 'not', 'is_micro', ':', '# horizontal row, upper right corner', 'matrix', '[', '8', ']', '[', '-', '1', '-', 'i', ']', '=', 'bit', 'offset', '=', 'int', '(', 'is_micro', ')', 'for', 'i', 'in', 'range', '(', '8', ')', ':', 'bit', '=', '(', 'format_info', '>>', '(', '14', '-', 'i', ')', ')', '&', '0x01', 'if', 'i', '==', '6', 'and', 'not', 'is_micro', ':', '# Timing pattern', 'offset', '=', '1', '# horizontal row, upper left corner', 'matrix', '[', '8', ']', '[', 'i', '+', 'offset', ']', '=', 'bit', 'if', 'not', 'is_micro', ':', '# vertical row, bottom left corner', 'matrix', '[', '-', '1', '-', 'i', ']', '[', '8', ']', '=', 'bit', 'if', 'not', 'is_micro', ':', '# Dark module', 'matrix', '[', '-', '8', ']', '[', '8', ']', '=', '0x1']
\ Adds the format information into the provided matrix. ISO/IEC 18004:2015(E) -- 7.9 Format information (page 55) ISO/IEC 18004:2015(E) -- 7.9.1 QR Code symbols ISO/IEC 18004:2015(E) -- 7.9.2 Micro QR Code symbols :param matrix: The matrix. :param int version: Version constant :param int error: Error level constant. :param int mask_pattern: Mask pattern number.
['\\', 'Adds', 'the', 'format', 'information', 'into', 'the', 'provided', 'matrix', '.']
train
https://github.com/heuer/segno/blob/64d912a2bd17d0b5ff3e8b5d37098edfc663c2b3/segno/encoder.py#L945-L1006
7,048
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_lldp_ext.py
brocade_lldp_ext.get_lldp_neighbor_detail_output_has_more
def get_lldp_neighbor_detail_output_has_more(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail") config = get_lldp_neighbor_detail output = ET.SubElement(get_lldp_neighbor_detail, "output") has_more = ET.SubElement(output, "has-more") has_more.text = kwargs.pop('has_more') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def get_lldp_neighbor_detail_output_has_more(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail") config = get_lldp_neighbor_detail output = ET.SubElement(get_lldp_neighbor_detail, "output") has_more = ET.SubElement(output, "has-more") has_more.text = kwargs.pop('has_more') callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'get_lldp_neighbor_detail_output_has_more', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'get_lldp_neighbor_detail', '=', 'ET', '.', 'Element', '(', '"get_lldp_neighbor_detail"', ')', 'config', '=', 'get_lldp_neighbor_detail', 'output', '=', 'ET', '.', 'SubElement', '(', 'get_lldp_neighbor_detail', ',', '"output"', ')', 'has_more', '=', 'ET', '.', 'SubElement', '(', 'output', ',', '"has-more"', ')', 'has_more', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'has_more'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_lldp_ext.py#L356-L367
7,049
nickoala/telepot
telepot/__init__.py
Bot.sendGame
def sendGame(self, chat_id, game_short_name, disable_notification=None, reply_to_message_id=None, reply_markup=None): """ See: https://core.telegram.org/bots/api#sendgame """ p = _strip(locals()) return self._api_request('sendGame', _rectify(p))
python
def sendGame(self, chat_id, game_short_name, disable_notification=None, reply_to_message_id=None, reply_markup=None): """ See: https://core.telegram.org/bots/api#sendgame """ p = _strip(locals()) return self._api_request('sendGame', _rectify(p))
['def', 'sendGame', '(', 'self', ',', 'chat_id', ',', 'game_short_name', ',', 'disable_notification', '=', 'None', ',', 'reply_to_message_id', '=', 'None', ',', 'reply_markup', '=', 'None', ')', ':', 'p', '=', '_strip', '(', 'locals', '(', ')', ')', 'return', 'self', '.', '_api_request', '(', "'sendGame'", ',', '_rectify', '(', 'p', ')', ')']
See: https://core.telegram.org/bots/api#sendgame
['See', ':', 'https', ':', '//', 'core', '.', 'telegram', '.', 'org', '/', 'bots', '/', 'api#sendgame']
train
https://github.com/nickoala/telepot/blob/3792fde251d0f1d5a6ca16c8ad1a71f89360c41d/telepot/__init__.py#L701-L707
7,050
pyviz/imagen
imagen/patterngenerator.py
Composite.state_pop
def state_pop(self): """ Pop the state of all generators """ super(Composite,self).state_pop() for gen in self.generators: gen.state_pop()
python
def state_pop(self): """ Pop the state of all generators """ super(Composite,self).state_pop() for gen in self.generators: gen.state_pop()
['def', 'state_pop', '(', 'self', ')', ':', 'super', '(', 'Composite', ',', 'self', ')', '.', 'state_pop', '(', ')', 'for', 'gen', 'in', 'self', '.', 'generators', ':', 'gen', '.', 'state_pop', '(', ')']
Pop the state of all generators
['Pop', 'the', 'state', 'of', 'all', 'generators']
train
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/patterngenerator.py#L524-L530
7,051
dmwm/DBS
Server/Python/src/dbs/dao/Oracle/BlockParent/ListChild.py
ListChild.execute
def execute(self, conn, block_name="", transaction = False): """ block: /a/b/c#d """ if not conn: msg='Oracle/BlockParent/List. No DB connection found' dbsExceptionHandler('dbsException-failed-connect2host', msg, self.logger.exception) sql = self.sql binds = {} if block_name: binds.update(block_name = block_name) else: dbsExceptionHandler("dbsException-invalid-input", "Oracle/BlockParent/ListChild. block_name must be provided.", self.logger.exception) cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True) result = [] for c in cursors: result.extend(self.formatCursor(c, size=100)) return result
python
def execute(self, conn, block_name="", transaction = False): """ block: /a/b/c#d """ if not conn: msg='Oracle/BlockParent/List. No DB connection found' dbsExceptionHandler('dbsException-failed-connect2host', msg, self.logger.exception) sql = self.sql binds = {} if block_name: binds.update(block_name = block_name) else: dbsExceptionHandler("dbsException-invalid-input", "Oracle/BlockParent/ListChild. block_name must be provided.", self.logger.exception) cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True) result = [] for c in cursors: result.extend(self.formatCursor(c, size=100)) return result
['def', 'execute', '(', 'self', ',', 'conn', ',', 'block_name', '=', '""', ',', 'transaction', '=', 'False', ')', ':', 'if', 'not', 'conn', ':', 'msg', '=', "'Oracle/BlockParent/List. No DB connection found'", 'dbsExceptionHandler', '(', "'dbsException-failed-connect2host'", ',', 'msg', ',', 'self', '.', 'logger', '.', 'exception', ')', 'sql', '=', 'self', '.', 'sql', 'binds', '=', '{', '}', 'if', 'block_name', ':', 'binds', '.', 'update', '(', 'block_name', '=', 'block_name', ')', 'else', ':', 'dbsExceptionHandler', '(', '"dbsException-invalid-input"', ',', '"Oracle/BlockParent/ListChild. block_name must be provided."', ',', 'self', '.', 'logger', '.', 'exception', ')', 'cursors', '=', 'self', '.', 'dbi', '.', 'processData', '(', 'sql', ',', 'binds', ',', 'conn', ',', 'transaction', ',', 'returnCursor', '=', 'True', ')', 'result', '=', '[', ']', 'for', 'c', 'in', 'cursors', ':', 'result', '.', 'extend', '(', 'self', '.', 'formatCursor', '(', 'c', ',', 'size', '=', '100', ')', ')', 'return', 'result']
block: /a/b/c#d
['block', ':', '/', 'a', '/', 'b', '/', 'c#d']
train
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/dao/Oracle/BlockParent/ListChild.py#L27-L46
7,052
astorfi/speechpy
speechpy/processing.py
fft_spectrum
def fft_spectrum(frames, fft_points=512): """This function computes the one-dimensional n-point discrete Fourier Transform (DFT) of a real-valued array by means of an efficient algorithm called the Fast Fourier Transform (FFT). Please refer to https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.rfft.html for further details. Args: frames (array): The frame array in which each row is a frame. fft_points (int): The length of FFT. If fft_length is greater than frame_len, the frames will be zero-padded. Returns: array: The fft spectrum. If frames is an num_frames x sample_per_frame matrix, output will be num_frames x FFT_LENGTH. """ SPECTRUM_VECTOR = np.fft.rfft(frames, n=fft_points, axis=-1, norm=None) return np.absolute(SPECTRUM_VECTOR)
python
def fft_spectrum(frames, fft_points=512): """This function computes the one-dimensional n-point discrete Fourier Transform (DFT) of a real-valued array by means of an efficient algorithm called the Fast Fourier Transform (FFT). Please refer to https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.rfft.html for further details. Args: frames (array): The frame array in which each row is a frame. fft_points (int): The length of FFT. If fft_length is greater than frame_len, the frames will be zero-padded. Returns: array: The fft spectrum. If frames is an num_frames x sample_per_frame matrix, output will be num_frames x FFT_LENGTH. """ SPECTRUM_VECTOR = np.fft.rfft(frames, n=fft_points, axis=-1, norm=None) return np.absolute(SPECTRUM_VECTOR)
['def', 'fft_spectrum', '(', 'frames', ',', 'fft_points', '=', '512', ')', ':', 'SPECTRUM_VECTOR', '=', 'np', '.', 'fft', '.', 'rfft', '(', 'frames', ',', 'n', '=', 'fft_points', ',', 'axis', '=', '-', '1', ',', 'norm', '=', 'None', ')', 'return', 'np', '.', 'absolute', '(', 'SPECTRUM_VECTOR', ')']
This function computes the one-dimensional n-point discrete Fourier Transform (DFT) of a real-valued array by means of an efficient algorithm called the Fast Fourier Transform (FFT). Please refer to https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.rfft.html for further details. Args: frames (array): The frame array in which each row is a frame. fft_points (int): The length of FFT. If fft_length is greater than frame_len, the frames will be zero-padded. Returns: array: The fft spectrum. If frames is an num_frames x sample_per_frame matrix, output will be num_frames x FFT_LENGTH.
['This', 'function', 'computes', 'the', 'one', '-', 'dimensional', 'n', '-', 'point', 'discrete', 'Fourier', 'Transform', '(', 'DFT', ')', 'of', 'a', 'real', '-', 'valued', 'array', 'by', 'means', 'of', 'an', 'efficient', 'algorithm', 'called', 'the', 'Fast', 'Fourier', 'Transform', '(', 'FFT', ')', '.', 'Please', 'refer', 'to', 'https', ':', '//', 'docs', '.', 'scipy', '.', 'org', '/', 'doc', '/', 'numpy', '/', 'reference', '/', 'generated', '/', 'numpy', '.', 'fft', '.', 'rfft', '.', 'html', 'for', 'further', 'details', '.']
train
https://github.com/astorfi/speechpy/blob/9e99ae81398e7584e6234db371d6d7b5e8736192/speechpy/processing.py#L142-L159
7,053
helixyte/everest
everest/entities/attributes.py
get_domain_class_terminal_attribute_iterator
def get_domain_class_terminal_attribute_iterator(ent): """ Returns an iterator over all terminal attributes in the given registered resource. """ for attr in itervalues_(ent.__everest_attributes__): if attr.kind == RESOURCE_ATTRIBUTE_KINDS.TERMINAL: yield attr
python
def get_domain_class_terminal_attribute_iterator(ent): """ Returns an iterator over all terminal attributes in the given registered resource. """ for attr in itervalues_(ent.__everest_attributes__): if attr.kind == RESOURCE_ATTRIBUTE_KINDS.TERMINAL: yield attr
['def', 'get_domain_class_terminal_attribute_iterator', '(', 'ent', ')', ':', 'for', 'attr', 'in', 'itervalues_', '(', 'ent', '.', '__everest_attributes__', ')', ':', 'if', 'attr', '.', 'kind', '==', 'RESOURCE_ATTRIBUTE_KINDS', '.', 'TERMINAL', ':', 'yield', 'attr']
Returns an iterator over all terminal attributes in the given registered resource.
['Returns', 'an', 'iterator', 'over', 'all', 'terminal', 'attributes', 'in', 'the', 'given', 'registered', 'resource', '.']
train
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/entities/attributes.py#L116-L123
7,054
aburrell/apexpy
src/apexpy/helpers.py
checklat
def checklat(lat, name='lat'): """Makes sure the latitude is inside [-90, 90], clipping close values (tolerance 1e-4). Parameters ========== lat : array_like latitude name : str, optional parameter name to use in the exception message Returns ======= lat : ndarray or float Same as input where values just outside the range have been clipped to [-90, 90] Raises ====== ValueError if any values are too far outside the range [-90, 90] """ if np.all(np.float64(lat) >= -90) and np.all(np.float64(lat) <= 90): return lat if np.isscalar(lat): if lat > 90 and np.isclose(lat, 90, rtol=0, atol=1e-4): lat = 90 return lat elif lat < -90 and np.isclose(lat, -90, rtol=0, atol=1e-4): lat = -90 return lat else: lat = np.float64(lat) # make sure we have an array, not list lat[(lat > 90) & (np.isclose(lat, 90, rtol=0, atol=1e-4))] = 90 lat[(lat < -90) & (np.isclose(lat, -90, rtol=0, atol=1e-4))] = -90 if np.all(lat >= -90) and np.all(lat <= 90): return lat # we haven't returned yet, so raise exception raise ValueError(name + ' must be in [-90, 90]')
python
def checklat(lat, name='lat'): """Makes sure the latitude is inside [-90, 90], clipping close values (tolerance 1e-4). Parameters ========== lat : array_like latitude name : str, optional parameter name to use in the exception message Returns ======= lat : ndarray or float Same as input where values just outside the range have been clipped to [-90, 90] Raises ====== ValueError if any values are too far outside the range [-90, 90] """ if np.all(np.float64(lat) >= -90) and np.all(np.float64(lat) <= 90): return lat if np.isscalar(lat): if lat > 90 and np.isclose(lat, 90, rtol=0, atol=1e-4): lat = 90 return lat elif lat < -90 and np.isclose(lat, -90, rtol=0, atol=1e-4): lat = -90 return lat else: lat = np.float64(lat) # make sure we have an array, not list lat[(lat > 90) & (np.isclose(lat, 90, rtol=0, atol=1e-4))] = 90 lat[(lat < -90) & (np.isclose(lat, -90, rtol=0, atol=1e-4))] = -90 if np.all(lat >= -90) and np.all(lat <= 90): return lat # we haven't returned yet, so raise exception raise ValueError(name + ' must be in [-90, 90]')
['def', 'checklat', '(', 'lat', ',', 'name', '=', "'lat'", ')', ':', 'if', 'np', '.', 'all', '(', 'np', '.', 'float64', '(', 'lat', ')', '>=', '-', '90', ')', 'and', 'np', '.', 'all', '(', 'np', '.', 'float64', '(', 'lat', ')', '<=', '90', ')', ':', 'return', 'lat', 'if', 'np', '.', 'isscalar', '(', 'lat', ')', ':', 'if', 'lat', '>', '90', 'and', 'np', '.', 'isclose', '(', 'lat', ',', '90', ',', 'rtol', '=', '0', ',', 'atol', '=', '1e-4', ')', ':', 'lat', '=', '90', 'return', 'lat', 'elif', 'lat', '<', '-', '90', 'and', 'np', '.', 'isclose', '(', 'lat', ',', '-', '90', ',', 'rtol', '=', '0', ',', 'atol', '=', '1e-4', ')', ':', 'lat', '=', '-', '90', 'return', 'lat', 'else', ':', 'lat', '=', 'np', '.', 'float64', '(', 'lat', ')', '# make sure we have an array, not list', 'lat', '[', '(', 'lat', '>', '90', ')', '&', '(', 'np', '.', 'isclose', '(', 'lat', ',', '90', ',', 'rtol', '=', '0', ',', 'atol', '=', '1e-4', ')', ')', ']', '=', '90', 'lat', '[', '(', 'lat', '<', '-', '90', ')', '&', '(', 'np', '.', 'isclose', '(', 'lat', ',', '-', '90', ',', 'rtol', '=', '0', ',', 'atol', '=', '1e-4', ')', ')', ']', '=', '-', '90', 'if', 'np', '.', 'all', '(', 'lat', '>=', '-', '90', ')', 'and', 'np', '.', 'all', '(', 'lat', '<=', '90', ')', ':', 'return', 'lat', "# we haven't returned yet, so raise exception", 'raise', 'ValueError', '(', 'name', '+', "' must be in [-90, 90]'", ')']
Makes sure the latitude is inside [-90, 90], clipping close values (tolerance 1e-4). Parameters ========== lat : array_like latitude name : str, optional parameter name to use in the exception message Returns ======= lat : ndarray or float Same as input where values just outside the range have been clipped to [-90, 90] Raises ====== ValueError if any values are too far outside the range [-90, 90]
['Makes', 'sure', 'the', 'latitude', 'is', 'inside', '[', '-', '90', '90', ']', 'clipping', 'close', 'values', '(', 'tolerance', '1e', '-', '4', ')', '.']
train
https://github.com/aburrell/apexpy/blob/a2e919fd9ea9a65d49c4c22c9eb030c8ccf48386/src/apexpy/helpers.py#L11-L52
7,055
jadolg/rocketchat_API
rocketchat_API/rocketchat.py
RocketChat.rooms_upload
def rooms_upload(self, rid, file, **kwargs): """Post a message with attached file to a dedicated room.""" files = { 'file': (os.path.basename(file), open(file, 'rb'), mimetypes.guess_type(file)[0]), } return self.__call_api_post('rooms.upload/' + rid, kwargs=kwargs, use_json=False, files=files)
python
def rooms_upload(self, rid, file, **kwargs): """Post a message with attached file to a dedicated room.""" files = { 'file': (os.path.basename(file), open(file, 'rb'), mimetypes.guess_type(file)[0]), } return self.__call_api_post('rooms.upload/' + rid, kwargs=kwargs, use_json=False, files=files)
['def', 'rooms_upload', '(', 'self', ',', 'rid', ',', 'file', ',', '*', '*', 'kwargs', ')', ':', 'files', '=', '{', "'file'", ':', '(', 'os', '.', 'path', '.', 'basename', '(', 'file', ')', ',', 'open', '(', 'file', ',', "'rb'", ')', ',', 'mimetypes', '.', 'guess_type', '(', 'file', ')', '[', '0', ']', ')', ',', '}', 'return', 'self', '.', '__call_api_post', '(', "'rooms.upload/'", '+', 'rid', ',', 'kwargs', '=', 'kwargs', ',', 'use_json', '=', 'False', ',', 'files', '=', 'files', ')']
Post a message with attached file to a dedicated room.
['Post', 'a', 'message', 'with', 'attached', 'file', 'to', 'a', 'dedicated', 'room', '.']
train
https://github.com/jadolg/rocketchat_API/blob/f220d094434991cb9892418245f054ea06f28aad/rocketchat_API/rocketchat.py#L644-L649
7,056
twilio/twilio-python
twilio/rest/studio/v1/flow/execution/__init__.py
ExecutionList.stream
def stream(self, date_created_from=values.unset, date_created_to=values.unset, limit=None, page_size=None): """ Streams ExecutionInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param datetime date_created_from: Only show Executions that started on or after this ISO8601 date-time. :param datetime date_created_to: Only show Executions that started before this this ISO8601 date-time. :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.studio.v1.flow.execution.ExecutionInstance] """ limits = self._version.read_limits(limit, page_size) page = self.page( date_created_from=date_created_from, date_created_to=date_created_to, page_size=limits['page_size'], ) return self._version.stream(page, limits['limit'], limits['page_limit'])
python
def stream(self, date_created_from=values.unset, date_created_to=values.unset, limit=None, page_size=None): """ Streams ExecutionInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param datetime date_created_from: Only show Executions that started on or after this ISO8601 date-time. :param datetime date_created_to: Only show Executions that started before this this ISO8601 date-time. :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.studio.v1.flow.execution.ExecutionInstance] """ limits = self._version.read_limits(limit, page_size) page = self.page( date_created_from=date_created_from, date_created_to=date_created_to, page_size=limits['page_size'], ) return self._version.stream(page, limits['limit'], limits['page_limit'])
['def', 'stream', '(', 'self', ',', 'date_created_from', '=', 'values', '.', 'unset', ',', 'date_created_to', '=', 'values', '.', 'unset', ',', 'limit', '=', 'None', ',', 'page_size', '=', 'None', ')', ':', 'limits', '=', 'self', '.', '_version', '.', 'read_limits', '(', 'limit', ',', 'page_size', ')', 'page', '=', 'self', '.', 'page', '(', 'date_created_from', '=', 'date_created_from', ',', 'date_created_to', '=', 'date_created_to', ',', 'page_size', '=', 'limits', '[', "'page_size'", ']', ',', ')', 'return', 'self', '.', '_version', '.', 'stream', '(', 'page', ',', 'limits', '[', "'limit'", ']', ',', 'limits', '[', "'page_limit'", ']', ')']
Streams ExecutionInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param datetime date_created_from: Only show Executions that started on or after this ISO8601 date-time. :param datetime date_created_to: Only show Executions that started before this this ISO8601 date-time. :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.studio.v1.flow.execution.ExecutionInstance]
['Streams', 'ExecutionInstance', 'records', 'from', 'the', 'API', 'as', 'a', 'generator', 'stream', '.', 'This', 'operation', 'lazily', 'loads', 'records', 'as', 'efficiently', 'as', 'possible', 'until', 'the', 'limit', 'is', 'reached', '.', 'The', 'results', 'are', 'returned', 'as', 'a', 'generator', 'so', 'this', 'operation', 'is', 'memory', 'efficient', '.']
train
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/studio/v1/flow/execution/__init__.py#L39-L67
7,057
senaite/senaite.core
bika/lims/exportimport/instruments/generic/two_dimension.py
find_kw
def find_kw(ar_or_sample, kw): """ This function is used to find keywords that are not on the analysis but keywords that are on the interim fields. This function and is is_keyword function should probably be in resultsimport.py or somewhere central where it can be used by other instrument interfaces. """ for analysis in find_analyses(ar_or_sample): if kw in get_interims_keywords(analysis): return analysis.getKeyword() return None
python
def find_kw(ar_or_sample, kw): """ This function is used to find keywords that are not on the analysis but keywords that are on the interim fields. This function and is is_keyword function should probably be in resultsimport.py or somewhere central where it can be used by other instrument interfaces. """ for analysis in find_analyses(ar_or_sample): if kw in get_interims_keywords(analysis): return analysis.getKeyword() return None
['def', 'find_kw', '(', 'ar_or_sample', ',', 'kw', ')', ':', 'for', 'analysis', 'in', 'find_analyses', '(', 'ar_or_sample', ')', ':', 'if', 'kw', 'in', 'get_interims_keywords', '(', 'analysis', ')', ':', 'return', 'analysis', '.', 'getKeyword', '(', ')', 'return', 'None']
This function is used to find keywords that are not on the analysis but keywords that are on the interim fields. This function and is is_keyword function should probably be in resultsimport.py or somewhere central where it can be used by other instrument interfaces.
['This', 'function', 'is', 'used', 'to', 'find', 'keywords', 'that', 'are', 'not', 'on', 'the', 'analysis', 'but', 'keywords', 'that', 'are', 'on', 'the', 'interim', 'fields', '.']
train
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/exportimport/instruments/generic/two_dimension.py#L127-L138
7,058
alvinwan/TexSoup
TexSoup/reader.py
tokenize_math
def tokenize_math(text): r"""Prevents math from being tokenized. :param Buffer text: iterator over line, with current position >>> b = Buffer(r'$\min_x$ \command') >>> tokenize_math(b) '$' >>> b = Buffer(r'$$\min_x$$ \command') >>> tokenize_math(b) '$$' """ if text.startswith('$') and ( text.position == 0 or text.peek(-1) != '\\' or text.endswith(r'\\')): starter = '$$' if text.startswith('$$') else '$' return TokenWithPosition(text.forward(len(starter)), text.position)
python
def tokenize_math(text): r"""Prevents math from being tokenized. :param Buffer text: iterator over line, with current position >>> b = Buffer(r'$\min_x$ \command') >>> tokenize_math(b) '$' >>> b = Buffer(r'$$\min_x$$ \command') >>> tokenize_math(b) '$$' """ if text.startswith('$') and ( text.position == 0 or text.peek(-1) != '\\' or text.endswith(r'\\')): starter = '$$' if text.startswith('$$') else '$' return TokenWithPosition(text.forward(len(starter)), text.position)
['def', 'tokenize_math', '(', 'text', ')', ':', 'if', 'text', '.', 'startswith', '(', "'$'", ')', 'and', '(', 'text', '.', 'position', '==', '0', 'or', 'text', '.', 'peek', '(', '-', '1', ')', '!=', "'\\\\'", 'or', 'text', '.', 'endswith', '(', "r'\\\\'", ')', ')', ':', 'starter', '=', "'$$'", 'if', 'text', '.', 'startswith', '(', "'$$'", ')', 'else', "'$'", 'return', 'TokenWithPosition', '(', 'text', '.', 'forward', '(', 'len', '(', 'starter', ')', ')', ',', 'text', '.', 'position', ')']
r"""Prevents math from being tokenized. :param Buffer text: iterator over line, with current position >>> b = Buffer(r'$\min_x$ \command') >>> tokenize_math(b) '$' >>> b = Buffer(r'$$\min_x$$ \command') >>> tokenize_math(b) '$$'
['r', 'Prevents', 'math', 'from', 'being', 'tokenized', '.']
train
https://github.com/alvinwan/TexSoup/blob/63323ed71510fd2351102b8c36660a3b7703cead/TexSoup/reader.py#L169-L184
7,059
meejah/txtorcon
txtorcon/onion.py
_await_descriptor_upload
def _await_descriptor_upload(tor_protocol, onion, progress, await_all_uploads): """ Internal helper. :param tor_protocol: ITorControlProtocol instance :param onion: IOnionService instance :param progress: a progess callback, or None :returns: a Deferred that fires once we've detected at least one descriptor upload for the service (as detected by listening for HS_DESC events) """ # For v3 services, Tor attempts to upload to 16 services; we'll # assume that for now but also cap it (we want to show some # progress for "attempting uploads" but we need to decide how # much) .. so we leave 50% of the "progress" for attempts, and the # other 50% for "are we done" (which is either "one thing # uploaded" or "all the things uploaded") attempted_uploads = set() confirmed_uploads = set() failed_uploads = set() uploaded = defer.Deferred() await_all = False if await_all_uploads is None else await_all_uploads def translate_progress(tag, description): if progress: done = len(confirmed_uploads) + len(failed_uploads) done_endpoint = float(len(attempted_uploads)) if await_all else 1.0 done_pct = 0 if not attempted_uploads else float(done) / done_endpoint started_pct = float(min(16, len(attempted_uploads))) / 16.0 try: progress( (done_pct * 50.0) + (started_pct * 50.0), tag, description, ) except Exception: log.err() def hostname_matches(hostname): if IAuthenticatedOnionClients.providedBy(onion): return hostname[:-6] == onion.get_permanent_id() else: # provides IOnionService return onion.hostname == hostname def hs_desc(evt): """ From control-spec: "650" SP "HS_DESC" SP Action SP HSAddress SP AuthType SP HsDir [SP DescriptorID] [SP "REASON=" Reason] [SP "REPLICA=" Replica] """ args = evt.split() subtype = args[0] if subtype == 'UPLOAD': if hostname_matches('{}.onion'.format(args[1])): attempted_uploads.add(args[3]) translate_progress( "wait_descriptor", "Upload to {} started".format(args[3]) ) elif subtype == 'UPLOADED': # we only need ONE successful upload to happen for the # HS to be reachable. # unused? addr = args[1] # XXX FIXME I think tor is sending the onion-address # properly with these now, so we can use those # (i.e. instead of matching to "attempted_uploads") if args[3] in attempted_uploads: confirmed_uploads.add(args[3]) log.msg("Uploaded '{}' to '{}'".format(args[1], args[3])) translate_progress( "wait_descriptor", "Successful upload to {}".format(args[3]) ) if not uploaded.called: if await_all: if (len(failed_uploads) + len(confirmed_uploads)) == len(attempted_uploads): uploaded.callback(onion) else: uploaded.callback(onion) elif subtype == 'FAILED': if hostname_matches('{}.onion'.format(args[1])): failed_uploads.add(args[3]) translate_progress( "wait_descriptor", "Failed upload to {}".format(args[3]) ) if failed_uploads == attempted_uploads: msg = "Failed to upload '{}' to: {}".format( args[1], ', '.join(failed_uploads), ) uploaded.errback(RuntimeError(msg)) # the first 'yield' should be the add_event_listener so that a # caller can do "d = _await_descriptor_upload()", then add the # service. yield tor_protocol.add_event_listener('HS_DESC', hs_desc) yield uploaded yield tor_protocol.remove_event_listener('HS_DESC', hs_desc) # ensure we show "100%" at the end if progress: if await_all_uploads: msg = "Completed descriptor uploads" else: msg = "At least one descriptor uploaded" try: progress(100.0, "wait_descriptor", msg) except Exception: log.err()
python
def _await_descriptor_upload(tor_protocol, onion, progress, await_all_uploads): """ Internal helper. :param tor_protocol: ITorControlProtocol instance :param onion: IOnionService instance :param progress: a progess callback, or None :returns: a Deferred that fires once we've detected at least one descriptor upload for the service (as detected by listening for HS_DESC events) """ # For v3 services, Tor attempts to upload to 16 services; we'll # assume that for now but also cap it (we want to show some # progress for "attempting uploads" but we need to decide how # much) .. so we leave 50% of the "progress" for attempts, and the # other 50% for "are we done" (which is either "one thing # uploaded" or "all the things uploaded") attempted_uploads = set() confirmed_uploads = set() failed_uploads = set() uploaded = defer.Deferred() await_all = False if await_all_uploads is None else await_all_uploads def translate_progress(tag, description): if progress: done = len(confirmed_uploads) + len(failed_uploads) done_endpoint = float(len(attempted_uploads)) if await_all else 1.0 done_pct = 0 if not attempted_uploads else float(done) / done_endpoint started_pct = float(min(16, len(attempted_uploads))) / 16.0 try: progress( (done_pct * 50.0) + (started_pct * 50.0), tag, description, ) except Exception: log.err() def hostname_matches(hostname): if IAuthenticatedOnionClients.providedBy(onion): return hostname[:-6] == onion.get_permanent_id() else: # provides IOnionService return onion.hostname == hostname def hs_desc(evt): """ From control-spec: "650" SP "HS_DESC" SP Action SP HSAddress SP AuthType SP HsDir [SP DescriptorID] [SP "REASON=" Reason] [SP "REPLICA=" Replica] """ args = evt.split() subtype = args[0] if subtype == 'UPLOAD': if hostname_matches('{}.onion'.format(args[1])): attempted_uploads.add(args[3]) translate_progress( "wait_descriptor", "Upload to {} started".format(args[3]) ) elif subtype == 'UPLOADED': # we only need ONE successful upload to happen for the # HS to be reachable. # unused? addr = args[1] # XXX FIXME I think tor is sending the onion-address # properly with these now, so we can use those # (i.e. instead of matching to "attempted_uploads") if args[3] in attempted_uploads: confirmed_uploads.add(args[3]) log.msg("Uploaded '{}' to '{}'".format(args[1], args[3])) translate_progress( "wait_descriptor", "Successful upload to {}".format(args[3]) ) if not uploaded.called: if await_all: if (len(failed_uploads) + len(confirmed_uploads)) == len(attempted_uploads): uploaded.callback(onion) else: uploaded.callback(onion) elif subtype == 'FAILED': if hostname_matches('{}.onion'.format(args[1])): failed_uploads.add(args[3]) translate_progress( "wait_descriptor", "Failed upload to {}".format(args[3]) ) if failed_uploads == attempted_uploads: msg = "Failed to upload '{}' to: {}".format( args[1], ', '.join(failed_uploads), ) uploaded.errback(RuntimeError(msg)) # the first 'yield' should be the add_event_listener so that a # caller can do "d = _await_descriptor_upload()", then add the # service. yield tor_protocol.add_event_listener('HS_DESC', hs_desc) yield uploaded yield tor_protocol.remove_event_listener('HS_DESC', hs_desc) # ensure we show "100%" at the end if progress: if await_all_uploads: msg = "Completed descriptor uploads" else: msg = "At least one descriptor uploaded" try: progress(100.0, "wait_descriptor", msg) except Exception: log.err()
['def', '_await_descriptor_upload', '(', 'tor_protocol', ',', 'onion', ',', 'progress', ',', 'await_all_uploads', ')', ':', "# For v3 services, Tor attempts to upload to 16 services; we'll", '# assume that for now but also cap it (we want to show some', '# progress for "attempting uploads" but we need to decide how', '# much) .. so we leave 50% of the "progress" for attempts, and the', '# other 50% for "are we done" (which is either "one thing', '# uploaded" or "all the things uploaded")', 'attempted_uploads', '=', 'set', '(', ')', 'confirmed_uploads', '=', 'set', '(', ')', 'failed_uploads', '=', 'set', '(', ')', 'uploaded', '=', 'defer', '.', 'Deferred', '(', ')', 'await_all', '=', 'False', 'if', 'await_all_uploads', 'is', 'None', 'else', 'await_all_uploads', 'def', 'translate_progress', '(', 'tag', ',', 'description', ')', ':', 'if', 'progress', ':', 'done', '=', 'len', '(', 'confirmed_uploads', ')', '+', 'len', '(', 'failed_uploads', ')', 'done_endpoint', '=', 'float', '(', 'len', '(', 'attempted_uploads', ')', ')', 'if', 'await_all', 'else', '1.0', 'done_pct', '=', '0', 'if', 'not', 'attempted_uploads', 'else', 'float', '(', 'done', ')', '/', 'done_endpoint', 'started_pct', '=', 'float', '(', 'min', '(', '16', ',', 'len', '(', 'attempted_uploads', ')', ')', ')', '/', '16.0', 'try', ':', 'progress', '(', '(', 'done_pct', '*', '50.0', ')', '+', '(', 'started_pct', '*', '50.0', ')', ',', 'tag', ',', 'description', ',', ')', 'except', 'Exception', ':', 'log', '.', 'err', '(', ')', 'def', 'hostname_matches', '(', 'hostname', ')', ':', 'if', 'IAuthenticatedOnionClients', '.', 'providedBy', '(', 'onion', ')', ':', 'return', 'hostname', '[', ':', '-', '6', ']', '==', 'onion', '.', 'get_permanent_id', '(', ')', 'else', ':', '# provides IOnionService', 'return', 'onion', '.', 'hostname', '==', 'hostname', 'def', 'hs_desc', '(', 'evt', ')', ':', '"""\n From control-spec:\n "650" SP "HS_DESC" SP Action SP HSAddress SP AuthType SP HsDir\n [SP DescriptorID] [SP "REASON=" Reason] [SP "REPLICA=" Replica]\n """', 'args', '=', 'evt', '.', 'split', '(', ')', 'subtype', '=', 'args', '[', '0', ']', 'if', 'subtype', '==', "'UPLOAD'", ':', 'if', 'hostname_matches', '(', "'{}.onion'", '.', 'format', '(', 'args', '[', '1', ']', ')', ')', ':', 'attempted_uploads', '.', 'add', '(', 'args', '[', '3', ']', ')', 'translate_progress', '(', '"wait_descriptor"', ',', '"Upload to {} started"', '.', 'format', '(', 'args', '[', '3', ']', ')', ')', 'elif', 'subtype', '==', "'UPLOADED'", ':', '# we only need ONE successful upload to happen for the', '# HS to be reachable.', '# unused? addr = args[1]', '# XXX FIXME I think tor is sending the onion-address', '# properly with these now, so we can use those', '# (i.e. instead of matching to "attempted_uploads")', 'if', 'args', '[', '3', ']', 'in', 'attempted_uploads', ':', 'confirmed_uploads', '.', 'add', '(', 'args', '[', '3', ']', ')', 'log', '.', 'msg', '(', '"Uploaded \'{}\' to \'{}\'"', '.', 'format', '(', 'args', '[', '1', ']', ',', 'args', '[', '3', ']', ')', ')', 'translate_progress', '(', '"wait_descriptor"', ',', '"Successful upload to {}"', '.', 'format', '(', 'args', '[', '3', ']', ')', ')', 'if', 'not', 'uploaded', '.', 'called', ':', 'if', 'await_all', ':', 'if', '(', 'len', '(', 'failed_uploads', ')', '+', 'len', '(', 'confirmed_uploads', ')', ')', '==', 'len', '(', 'attempted_uploads', ')', ':', 'uploaded', '.', 'callback', '(', 'onion', ')', 'else', ':', 'uploaded', '.', 'callback', '(', 'onion', ')', 'elif', 'subtype', '==', "'FAILED'", ':', 'if', 'hostname_matches', '(', "'{}.onion'", '.', 'format', '(', 'args', '[', '1', ']', ')', ')', ':', 'failed_uploads', '.', 'add', '(', 'args', '[', '3', ']', ')', 'translate_progress', '(', '"wait_descriptor"', ',', '"Failed upload to {}"', '.', 'format', '(', 'args', '[', '3', ']', ')', ')', 'if', 'failed_uploads', '==', 'attempted_uploads', ':', 'msg', '=', '"Failed to upload \'{}\' to: {}"', '.', 'format', '(', 'args', '[', '1', ']', ',', "', '", '.', 'join', '(', 'failed_uploads', ')', ',', ')', 'uploaded', '.', 'errback', '(', 'RuntimeError', '(', 'msg', ')', ')', "# the first 'yield' should be the add_event_listener so that a", '# caller can do "d = _await_descriptor_upload()", then add the', '# service.', 'yield', 'tor_protocol', '.', 'add_event_listener', '(', "'HS_DESC'", ',', 'hs_desc', ')', 'yield', 'uploaded', 'yield', 'tor_protocol', '.', 'remove_event_listener', '(', "'HS_DESC'", ',', 'hs_desc', ')', '# ensure we show "100%" at the end', 'if', 'progress', ':', 'if', 'await_all_uploads', ':', 'msg', '=', '"Completed descriptor uploads"', 'else', ':', 'msg', '=', '"At least one descriptor uploaded"', 'try', ':', 'progress', '(', '100.0', ',', '"wait_descriptor"', ',', 'msg', ')', 'except', 'Exception', ':', 'log', '.', 'err', '(', ')']
Internal helper. :param tor_protocol: ITorControlProtocol instance :param onion: IOnionService instance :param progress: a progess callback, or None :returns: a Deferred that fires once we've detected at least one descriptor upload for the service (as detected by listening for HS_DESC events)
['Internal', 'helper', '.']
train
https://github.com/meejah/txtorcon/blob/14053b95adf0b4bd9dd9c317bece912a26578a93/txtorcon/onion.py#L389-L504
7,060
bitesofcode/projexui
projexui/widgets/xlocalebox.py
XLocaleBox.setShowTerritory
def setShowTerritory(self, state): """ Sets the display mode for this widget to the inputed mode. :param state | <bool> """ if state == self._showTerritory: return self._showTerritory = state self.setDirty()
python
def setShowTerritory(self, state): """ Sets the display mode for this widget to the inputed mode. :param state | <bool> """ if state == self._showTerritory: return self._showTerritory = state self.setDirty()
['def', 'setShowTerritory', '(', 'self', ',', 'state', ')', ':', 'if', 'state', '==', 'self', '.', '_showTerritory', ':', 'return', 'self', '.', '_showTerritory', '=', 'state', 'self', '.', 'setDirty', '(', ')']
Sets the display mode for this widget to the inputed mode. :param state | <bool>
['Sets', 'the', 'display', 'mode', 'for', 'this', 'widget', 'to', 'the', 'inputed', 'mode', '.', ':', 'param', 'state', '|', '<bool', '>']
train
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xlocalebox.py#L293-L303
7,061
sbmlteam/libCombine
examples/python/createArchiveExample.py
createArchiveExample
def createArchiveExample(fileName): """ Creates Combine Archive containing the given file. :param fileName: file to include in the archive :return: None """ print('*' * 80) print('Create archive') print('*' * 80) archive = CombineArchive() archive.addFile( fileName, # filename "./models/model.xml", # target file name KnownFormats.lookupFormat("sbml"), # look up identifier for SBML models True # mark file as master ) # add metadata to the archive itself description = OmexDescription() description.setAbout(".") description.setDescription("Simple test archive including one SBML model") description.setCreated(OmexDescription.getCurrentDateAndTime()) creator = VCard() creator.setFamilyName("Bergmann") creator.setGivenName("Frank") creator.setEmail("[email protected]") creator.setOrganization("Caltech") description.addCreator(creator) archive.addMetadata(".", description) # add metadata to the added file location = "./models/model.xml" description = OmexDescription() description.setAbout(location) description.setDescription("SBML model") description.setCreated(OmexDescription.getCurrentDateAndTime()) archive.addMetadata(location, description) # write the archive out_file = "out.omex" archive.writeToFile(out_file) print('Archive created:', out_file)
python
def createArchiveExample(fileName): """ Creates Combine Archive containing the given file. :param fileName: file to include in the archive :return: None """ print('*' * 80) print('Create archive') print('*' * 80) archive = CombineArchive() archive.addFile( fileName, # filename "./models/model.xml", # target file name KnownFormats.lookupFormat("sbml"), # look up identifier for SBML models True # mark file as master ) # add metadata to the archive itself description = OmexDescription() description.setAbout(".") description.setDescription("Simple test archive including one SBML model") description.setCreated(OmexDescription.getCurrentDateAndTime()) creator = VCard() creator.setFamilyName("Bergmann") creator.setGivenName("Frank") creator.setEmail("[email protected]") creator.setOrganization("Caltech") description.addCreator(creator) archive.addMetadata(".", description) # add metadata to the added file location = "./models/model.xml" description = OmexDescription() description.setAbout(location) description.setDescription("SBML model") description.setCreated(OmexDescription.getCurrentDateAndTime()) archive.addMetadata(location, description) # write the archive out_file = "out.omex" archive.writeToFile(out_file) print('Archive created:', out_file)
['def', 'createArchiveExample', '(', 'fileName', ')', ':', 'print', '(', "'*'", '*', '80', ')', 'print', '(', "'Create archive'", ')', 'print', '(', "'*'", '*', '80', ')', 'archive', '=', 'CombineArchive', '(', ')', 'archive', '.', 'addFile', '(', 'fileName', ',', '# filename', '"./models/model.xml"', ',', '# target file name', 'KnownFormats', '.', 'lookupFormat', '(', '"sbml"', ')', ',', '# look up identifier for SBML models', 'True', '# mark file as master', ')', '# add metadata to the archive itself', 'description', '=', 'OmexDescription', '(', ')', 'description', '.', 'setAbout', '(', '"."', ')', 'description', '.', 'setDescription', '(', '"Simple test archive including one SBML model"', ')', 'description', '.', 'setCreated', '(', 'OmexDescription', '.', 'getCurrentDateAndTime', '(', ')', ')', 'creator', '=', 'VCard', '(', ')', 'creator', '.', 'setFamilyName', '(', '"Bergmann"', ')', 'creator', '.', 'setGivenName', '(', '"Frank"', ')', 'creator', '.', 'setEmail', '(', '"[email protected]"', ')', 'creator', '.', 'setOrganization', '(', '"Caltech"', ')', 'description', '.', 'addCreator', '(', 'creator', ')', 'archive', '.', 'addMetadata', '(', '"."', ',', 'description', ')', '# add metadata to the added file', 'location', '=', '"./models/model.xml"', 'description', '=', 'OmexDescription', '(', ')', 'description', '.', 'setAbout', '(', 'location', ')', 'description', '.', 'setDescription', '(', '"SBML model"', ')', 'description', '.', 'setCreated', '(', 'OmexDescription', '.', 'getCurrentDateAndTime', '(', ')', ')', 'archive', '.', 'addMetadata', '(', 'location', ',', 'description', ')', '# write the archive', 'out_file', '=', '"out.omex"', 'archive', '.', 'writeToFile', '(', 'out_file', ')', 'print', '(', "'Archive created:'", ',', 'out_file', ')']
Creates Combine Archive containing the given file. :param fileName: file to include in the archive :return: None
['Creates', 'Combine', 'Archive', 'containing', 'the', 'given', 'file', '.']
train
https://github.com/sbmlteam/libCombine/blob/d7c11a90129dedbcc8bdba8d204be03f1dd0c3e4/examples/python/createArchiveExample.py#L11-L57
7,062
ewels/MultiQC
multiqc/modules/base_module.py
BaseMultiqcModule.clean_s_name
def clean_s_name(self, s_name, root): """ Helper function to take a long file name and strip it back to a clean sample name. Somewhat arbitrary. :param s_name: The sample name to clean :param root: The directory path that this file is within :config.prepend_dirs: boolean, whether to prepend dir name to s_name :return: The cleaned sample name, ready to be used """ s_name_original = s_name if root is None: root = '' if config.fn_clean_sample_names: # Split then take first section to remove everything after these matches for ext in config.fn_clean_exts: if type(ext) is str: ext = {'type': 'truncate', 'pattern': ext} if ext['type'] == 'truncate': s_name = os.path.basename(s_name.split(ext['pattern'], 1)[0]) elif ext['type'] in ('remove', 'replace'): if ext['type'] == 'replace': logger.warning("use 'config.fn_clean_sample_names.remove' instead " "of 'config.fn_clean_sample_names.replace' [deprecated]") s_name = s_name.replace(ext['pattern'], '') elif ext['type'] == 'regex': s_name = re.sub(ext['pattern'], '', s_name) elif ext['type'] == 'regex_keep': match = re.search(ext['pattern'], s_name) s_name = match.group() if match else s_name else: logger.error('Unrecognised config.fn_clean_exts type: {}'.format(ext['type'])) # Trim off characters at the end of names for chrs in config.fn_clean_trim: if s_name.endswith(chrs): s_name = s_name[:-len(chrs)] if s_name.startswith(chrs): s_name = s_name[len(chrs):] # Prepend sample name with directory if config.prepend_dirs: sep = config.prepend_dirs_sep root = root.lstrip('.{}'.format(os.sep)) dirs = [d.strip() for d in root.split(os.sep) if d.strip() != ''] if config.prepend_dirs_depth != 0: d_idx = config.prepend_dirs_depth * -1 if config.prepend_dirs_depth > 0: dirs = dirs[d_idx:] else: dirs = dirs[:d_idx] if len(dirs) > 0: s_name = "{}{}{}".format(sep.join(dirs), sep, s_name) # Remove trailing whitespace s_name = s_name.strip() if s_name == '': s_name = s_name_original return s_name
python
def clean_s_name(self, s_name, root): """ Helper function to take a long file name and strip it back to a clean sample name. Somewhat arbitrary. :param s_name: The sample name to clean :param root: The directory path that this file is within :config.prepend_dirs: boolean, whether to prepend dir name to s_name :return: The cleaned sample name, ready to be used """ s_name_original = s_name if root is None: root = '' if config.fn_clean_sample_names: # Split then take first section to remove everything after these matches for ext in config.fn_clean_exts: if type(ext) is str: ext = {'type': 'truncate', 'pattern': ext} if ext['type'] == 'truncate': s_name = os.path.basename(s_name.split(ext['pattern'], 1)[0]) elif ext['type'] in ('remove', 'replace'): if ext['type'] == 'replace': logger.warning("use 'config.fn_clean_sample_names.remove' instead " "of 'config.fn_clean_sample_names.replace' [deprecated]") s_name = s_name.replace(ext['pattern'], '') elif ext['type'] == 'regex': s_name = re.sub(ext['pattern'], '', s_name) elif ext['type'] == 'regex_keep': match = re.search(ext['pattern'], s_name) s_name = match.group() if match else s_name else: logger.error('Unrecognised config.fn_clean_exts type: {}'.format(ext['type'])) # Trim off characters at the end of names for chrs in config.fn_clean_trim: if s_name.endswith(chrs): s_name = s_name[:-len(chrs)] if s_name.startswith(chrs): s_name = s_name[len(chrs):] # Prepend sample name with directory if config.prepend_dirs: sep = config.prepend_dirs_sep root = root.lstrip('.{}'.format(os.sep)) dirs = [d.strip() for d in root.split(os.sep) if d.strip() != ''] if config.prepend_dirs_depth != 0: d_idx = config.prepend_dirs_depth * -1 if config.prepend_dirs_depth > 0: dirs = dirs[d_idx:] else: dirs = dirs[:d_idx] if len(dirs) > 0: s_name = "{}{}{}".format(sep.join(dirs), sep, s_name) # Remove trailing whitespace s_name = s_name.strip() if s_name == '': s_name = s_name_original return s_name
['def', 'clean_s_name', '(', 'self', ',', 's_name', ',', 'root', ')', ':', 's_name_original', '=', 's_name', 'if', 'root', 'is', 'None', ':', 'root', '=', "''", 'if', 'config', '.', 'fn_clean_sample_names', ':', '# Split then take first section to remove everything after these matches', 'for', 'ext', 'in', 'config', '.', 'fn_clean_exts', ':', 'if', 'type', '(', 'ext', ')', 'is', 'str', ':', 'ext', '=', '{', "'type'", ':', "'truncate'", ',', "'pattern'", ':', 'ext', '}', 'if', 'ext', '[', "'type'", ']', '==', "'truncate'", ':', 's_name', '=', 'os', '.', 'path', '.', 'basename', '(', 's_name', '.', 'split', '(', 'ext', '[', "'pattern'", ']', ',', '1', ')', '[', '0', ']', ')', 'elif', 'ext', '[', "'type'", ']', 'in', '(', "'remove'", ',', "'replace'", ')', ':', 'if', 'ext', '[', "'type'", ']', '==', "'replace'", ':', 'logger', '.', 'warning', '(', '"use \'config.fn_clean_sample_names.remove\' instead "', '"of \'config.fn_clean_sample_names.replace\' [deprecated]"', ')', 's_name', '=', 's_name', '.', 'replace', '(', 'ext', '[', "'pattern'", ']', ',', "''", ')', 'elif', 'ext', '[', "'type'", ']', '==', "'regex'", ':', 's_name', '=', 're', '.', 'sub', '(', 'ext', '[', "'pattern'", ']', ',', "''", ',', 's_name', ')', 'elif', 'ext', '[', "'type'", ']', '==', "'regex_keep'", ':', 'match', '=', 're', '.', 'search', '(', 'ext', '[', "'pattern'", ']', ',', 's_name', ')', 's_name', '=', 'match', '.', 'group', '(', ')', 'if', 'match', 'else', 's_name', 'else', ':', 'logger', '.', 'error', '(', "'Unrecognised config.fn_clean_exts type: {}'", '.', 'format', '(', 'ext', '[', "'type'", ']', ')', ')', '# Trim off characters at the end of names', 'for', 'chrs', 'in', 'config', '.', 'fn_clean_trim', ':', 'if', 's_name', '.', 'endswith', '(', 'chrs', ')', ':', 's_name', '=', 's_name', '[', ':', '-', 'len', '(', 'chrs', ')', ']', 'if', 's_name', '.', 'startswith', '(', 'chrs', ')', ':', 's_name', '=', 's_name', '[', 'len', '(', 'chrs', ')', ':', ']', '# Prepend sample name with directory', 'if', 'config', '.', 'prepend_dirs', ':', 'sep', '=', 'config', '.', 'prepend_dirs_sep', 'root', '=', 'root', '.', 'lstrip', '(', "'.{}'", '.', 'format', '(', 'os', '.', 'sep', ')', ')', 'dirs', '=', '[', 'd', '.', 'strip', '(', ')', 'for', 'd', 'in', 'root', '.', 'split', '(', 'os', '.', 'sep', ')', 'if', 'd', '.', 'strip', '(', ')', '!=', "''", ']', 'if', 'config', '.', 'prepend_dirs_depth', '!=', '0', ':', 'd_idx', '=', 'config', '.', 'prepend_dirs_depth', '*', '-', '1', 'if', 'config', '.', 'prepend_dirs_depth', '>', '0', ':', 'dirs', '=', 'dirs', '[', 'd_idx', ':', ']', 'else', ':', 'dirs', '=', 'dirs', '[', ':', 'd_idx', ']', 'if', 'len', '(', 'dirs', ')', '>', '0', ':', 's_name', '=', '"{}{}{}"', '.', 'format', '(', 'sep', '.', 'join', '(', 'dirs', ')', ',', 'sep', ',', 's_name', ')', '# Remove trailing whitespace', 's_name', '=', 's_name', '.', 'strip', '(', ')', 'if', 's_name', '==', "''", ':', 's_name', '=', 's_name_original', 'return', 's_name']
Helper function to take a long file name and strip it back to a clean sample name. Somewhat arbitrary. :param s_name: The sample name to clean :param root: The directory path that this file is within :config.prepend_dirs: boolean, whether to prepend dir name to s_name :return: The cleaned sample name, ready to be used
['Helper', 'function', 'to', 'take', 'a', 'long', 'file', 'name', 'and', 'strip', 'it', 'back', 'to', 'a', 'clean', 'sample', 'name', '.', 'Somewhat', 'arbitrary', '.', ':', 'param', 's_name', ':', 'The', 'sample', 'name', 'to', 'clean', ':', 'param', 'root', ':', 'The', 'directory', 'path', 'that', 'this', 'file', 'is', 'within', ':', 'config', '.', 'prepend_dirs', ':', 'boolean', 'whether', 'to', 'prepend', 'dir', 'name', 'to', 's_name', ':', 'return', ':', 'The', 'cleaned', 'sample', 'name', 'ready', 'to', 'be', 'used']
train
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/base_module.py#L195-L252
7,063
buzzfeed/caliendo
caliendo/db/flatfiles.py
insert_io
def insert_io( args ): """ Inserts a method's i/o into the datastore :param dict args: A dictionary of the hash, stack, packet_num, methodname, args, and returnval :rtype None: """ global CACHE_ load_cache() hash = args['hash'] record_used('cache', hash) packet_num = args['packet_num'] if hash not in CACHE_['cache']: CACHE_['cache'][hash] = {} CACHE_['cache'][hash][packet_num] = pickle.dumps(args, PPROT) write_out()
python
def insert_io( args ): """ Inserts a method's i/o into the datastore :param dict args: A dictionary of the hash, stack, packet_num, methodname, args, and returnval :rtype None: """ global CACHE_ load_cache() hash = args['hash'] record_used('cache', hash) packet_num = args['packet_num'] if hash not in CACHE_['cache']: CACHE_['cache'][hash] = {} CACHE_['cache'][hash][packet_num] = pickle.dumps(args, PPROT) write_out()
['def', 'insert_io', '(', 'args', ')', ':', 'global', 'CACHE_', 'load_cache', '(', ')', 'hash', '=', 'args', '[', "'hash'", ']', 'record_used', '(', "'cache'", ',', 'hash', ')', 'packet_num', '=', 'args', '[', "'packet_num'", ']', 'if', 'hash', 'not', 'in', 'CACHE_', '[', "'cache'", ']', ':', 'CACHE_', '[', "'cache'", ']', '[', 'hash', ']', '=', '{', '}', 'CACHE_', '[', "'cache'", ']', '[', 'hash', ']', '[', 'packet_num', ']', '=', 'pickle', '.', 'dumps', '(', 'args', ',', 'PPROT', ')', 'write_out', '(', ')']
Inserts a method's i/o into the datastore :param dict args: A dictionary of the hash, stack, packet_num, methodname, args, and returnval :rtype None:
['Inserts', 'a', 'method', 's', 'i', '/', 'o', 'into', 'the', 'datastore']
train
https://github.com/buzzfeed/caliendo/blob/1628a10f7782ad67c0422b5cbc9bf4979ac40abc/caliendo/db/flatfiles.py#L64-L80
7,064
pybel/pybel
src/pybel/struct/mutation/induction/paths.py
get_random_path
def get_random_path(graph) -> List[BaseEntity]: """Get a random path from the graph as a list of nodes. :param pybel.BELGraph graph: A BEL graph """ wg = graph.to_undirected() nodes = wg.nodes() def pick_random_pair() -> Tuple[BaseEntity, BaseEntity]: """Get a pair of random nodes.""" return random.sample(nodes, k=2) source, target = pick_random_pair() tries = 0 sentinel_tries = 5 while not nx.has_path(wg, source, target) and tries < sentinel_tries: tries += 1 source, target = pick_random_pair() if tries == sentinel_tries: return [source] return nx.shortest_path(wg, source=source, target=target)
python
def get_random_path(graph) -> List[BaseEntity]: """Get a random path from the graph as a list of nodes. :param pybel.BELGraph graph: A BEL graph """ wg = graph.to_undirected() nodes = wg.nodes() def pick_random_pair() -> Tuple[BaseEntity, BaseEntity]: """Get a pair of random nodes.""" return random.sample(nodes, k=2) source, target = pick_random_pair() tries = 0 sentinel_tries = 5 while not nx.has_path(wg, source, target) and tries < sentinel_tries: tries += 1 source, target = pick_random_pair() if tries == sentinel_tries: return [source] return nx.shortest_path(wg, source=source, target=target)
['def', 'get_random_path', '(', 'graph', ')', '->', 'List', '[', 'BaseEntity', ']', ':', 'wg', '=', 'graph', '.', 'to_undirected', '(', ')', 'nodes', '=', 'wg', '.', 'nodes', '(', ')', 'def', 'pick_random_pair', '(', ')', '->', 'Tuple', '[', 'BaseEntity', ',', 'BaseEntity', ']', ':', '"""Get a pair of random nodes."""', 'return', 'random', '.', 'sample', '(', 'nodes', ',', 'k', '=', '2', ')', 'source', ',', 'target', '=', 'pick_random_pair', '(', ')', 'tries', '=', '0', 'sentinel_tries', '=', '5', 'while', 'not', 'nx', '.', 'has_path', '(', 'wg', ',', 'source', ',', 'target', ')', 'and', 'tries', '<', 'sentinel_tries', ':', 'tries', '+=', '1', 'source', ',', 'target', '=', 'pick_random_pair', '(', ')', 'if', 'tries', '==', 'sentinel_tries', ':', 'return', '[', 'source', ']', 'return', 'nx', '.', 'shortest_path', '(', 'wg', ',', 'source', '=', 'source', ',', 'target', '=', 'target', ')']
Get a random path from the graph as a list of nodes. :param pybel.BELGraph graph: A BEL graph
['Get', 'a', 'random', 'path', 'from', 'the', 'graph', 'as', 'a', 'list', 'of', 'nodes', '.']
train
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/mutation/induction/paths.py#L114-L139
7,065
roboogle/gtkmvc3
gtkmvco/gtkmvc3/adapters/default.py
remove_adapter
def remove_adapter(widget_class, flavour=None): """Removes the given widget class information from the default set of adapters. If widget_class had been previously added by using add_adapter, the added adapter will be removed, restoring possibly previusly existing adapter(s). Notice that this function will remove only *one* adapter about given wiget_class (the first found in order), even if many are currently stored. @param flavour has to be used when the entry was added with a particular flavour. Returns True if one adapter was removed, False if no adapter was removed.""" for it,tu in enumerate(__def_adapter): if (widget_class == tu[WIDGET] and flavour == tu[FLAVOUR]): del __def_adapter[it] return True return False
python
def remove_adapter(widget_class, flavour=None): """Removes the given widget class information from the default set of adapters. If widget_class had been previously added by using add_adapter, the added adapter will be removed, restoring possibly previusly existing adapter(s). Notice that this function will remove only *one* adapter about given wiget_class (the first found in order), even if many are currently stored. @param flavour has to be used when the entry was added with a particular flavour. Returns True if one adapter was removed, False if no adapter was removed.""" for it,tu in enumerate(__def_adapter): if (widget_class == tu[WIDGET] and flavour == tu[FLAVOUR]): del __def_adapter[it] return True return False
['def', 'remove_adapter', '(', 'widget_class', ',', 'flavour', '=', 'None', ')', ':', 'for', 'it', ',', 'tu', 'in', 'enumerate', '(', '__def_adapter', ')', ':', 'if', '(', 'widget_class', '==', 'tu', '[', 'WIDGET', ']', 'and', 'flavour', '==', 'tu', '[', 'FLAVOUR', ']', ')', ':', 'del', '__def_adapter', '[', 'it', ']', 'return', 'True', 'return', 'False']
Removes the given widget class information from the default set of adapters. If widget_class had been previously added by using add_adapter, the added adapter will be removed, restoring possibly previusly existing adapter(s). Notice that this function will remove only *one* adapter about given wiget_class (the first found in order), even if many are currently stored. @param flavour has to be used when the entry was added with a particular flavour. Returns True if one adapter was removed, False if no adapter was removed.
['Removes', 'the', 'given', 'widget', 'class', 'information', 'from', 'the', 'default', 'set', 'of', 'adapters', '.']
train
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/gtkmvc3/adapters/default.py#L106-L126
7,066
mitodl/PyLmod
pylmod/gradebook.py
GradeBook.create_assignment
def create_assignment( # pylint: disable=too-many-arguments self, name, short_name, weight, max_points, due_date_str, gradebook_id='', **kwargs ): """Create a new assignment. Create a new assignment. By default, assignments are created under the `Uncategorized` category. Args: name (str): descriptive assignment name, i.e. ``new NUMERIC SIMPLE ASSIGNMENT`` short_name (str): short name of assignment, one word of no more than 5 characters, i.e. ``SAnew`` weight (str): floating point value for weight, i.e. ``1.0`` max_points (str): floating point value for maximum point total, i.e. ``100.0`` due_date_str (str): due date as string in ``mm-dd-yyyy`` format, i.e. ``08-21-2011`` gradebook_id (str): unique identifier for gradebook, i.e. ``2314`` kwargs (dict): dictionary containing additional parameters, i.e. ``graderVisible``, ``totalAverage``, and ``categoryId``. For example: .. code-block:: python { u'graderVisible': True, u'totalAverage': None u'categoryId': 1007964, } Raises: requests.RequestException: Exception connection error ValueError: Unable to decode response content Returns: dict: dictionary containing ``data``, ``status`` and ``message`` for example: .. code-block:: python { u'data': { u'assignmentId': 18490492, u'categoryId': 1293820, u'description': u'', u'dueDate': 1312171200000, u'dueDateString': u'08-01-2011', u'gradebookId': 1293808, u'graderVisible': False, u'gradingSchemeId': 18490493, u'gradingSchemeType': u'NUMERIC', u'isComposite': False, u'isHomework': False, u'maxPointsTotal': 100.0, u'name': u'new NUMERIC SIMPLE ASSIGNMENT', u'numStudentGradesToBeApproved': 0, u'numStudentsToBeGraded': 614, u'shortName': u'SAnew', u'userDeleted': False, u'weight': 1.0 }, u'message': u'assignment is created successfully', u'status': 1 } """ data = { 'name': name, 'shortName': short_name, 'weight': weight, 'graderVisible': False, 'gradingSchemeType': 'NUMERIC', 'gradebookId': gradebook_id or self.gradebook_id, 'maxPointsTotal': max_points, 'dueDateString': due_date_str } data.update(kwargs) log.info("Creating assignment %s", name) response = self.post('assignment', data) log.debug('Received response data: %s', response) return response
python
def create_assignment( # pylint: disable=too-many-arguments self, name, short_name, weight, max_points, due_date_str, gradebook_id='', **kwargs ): """Create a new assignment. Create a new assignment. By default, assignments are created under the `Uncategorized` category. Args: name (str): descriptive assignment name, i.e. ``new NUMERIC SIMPLE ASSIGNMENT`` short_name (str): short name of assignment, one word of no more than 5 characters, i.e. ``SAnew`` weight (str): floating point value for weight, i.e. ``1.0`` max_points (str): floating point value for maximum point total, i.e. ``100.0`` due_date_str (str): due date as string in ``mm-dd-yyyy`` format, i.e. ``08-21-2011`` gradebook_id (str): unique identifier for gradebook, i.e. ``2314`` kwargs (dict): dictionary containing additional parameters, i.e. ``graderVisible``, ``totalAverage``, and ``categoryId``. For example: .. code-block:: python { u'graderVisible': True, u'totalAverage': None u'categoryId': 1007964, } Raises: requests.RequestException: Exception connection error ValueError: Unable to decode response content Returns: dict: dictionary containing ``data``, ``status`` and ``message`` for example: .. code-block:: python { u'data': { u'assignmentId': 18490492, u'categoryId': 1293820, u'description': u'', u'dueDate': 1312171200000, u'dueDateString': u'08-01-2011', u'gradebookId': 1293808, u'graderVisible': False, u'gradingSchemeId': 18490493, u'gradingSchemeType': u'NUMERIC', u'isComposite': False, u'isHomework': False, u'maxPointsTotal': 100.0, u'name': u'new NUMERIC SIMPLE ASSIGNMENT', u'numStudentGradesToBeApproved': 0, u'numStudentsToBeGraded': 614, u'shortName': u'SAnew', u'userDeleted': False, u'weight': 1.0 }, u'message': u'assignment is created successfully', u'status': 1 } """ data = { 'name': name, 'shortName': short_name, 'weight': weight, 'graderVisible': False, 'gradingSchemeType': 'NUMERIC', 'gradebookId': gradebook_id or self.gradebook_id, 'maxPointsTotal': max_points, 'dueDateString': due_date_str } data.update(kwargs) log.info("Creating assignment %s", name) response = self.post('assignment', data) log.debug('Received response data: %s', response) return response
['def', 'create_assignment', '(', '# pylint: disable=too-many-arguments', 'self', ',', 'name', ',', 'short_name', ',', 'weight', ',', 'max_points', ',', 'due_date_str', ',', 'gradebook_id', '=', "''", ',', '*', '*', 'kwargs', ')', ':', 'data', '=', '{', "'name'", ':', 'name', ',', "'shortName'", ':', 'short_name', ',', "'weight'", ':', 'weight', ',', "'graderVisible'", ':', 'False', ',', "'gradingSchemeType'", ':', "'NUMERIC'", ',', "'gradebookId'", ':', 'gradebook_id', 'or', 'self', '.', 'gradebook_id', ',', "'maxPointsTotal'", ':', 'max_points', ',', "'dueDateString'", ':', 'due_date_str', '}', 'data', '.', 'update', '(', 'kwargs', ')', 'log', '.', 'info', '(', '"Creating assignment %s"', ',', 'name', ')', 'response', '=', 'self', '.', 'post', '(', "'assignment'", ',', 'data', ')', 'log', '.', 'debug', '(', "'Received response data: %s'", ',', 'response', ')', 'return', 'response']
Create a new assignment. Create a new assignment. By default, assignments are created under the `Uncategorized` category. Args: name (str): descriptive assignment name, i.e. ``new NUMERIC SIMPLE ASSIGNMENT`` short_name (str): short name of assignment, one word of no more than 5 characters, i.e. ``SAnew`` weight (str): floating point value for weight, i.e. ``1.0`` max_points (str): floating point value for maximum point total, i.e. ``100.0`` due_date_str (str): due date as string in ``mm-dd-yyyy`` format, i.e. ``08-21-2011`` gradebook_id (str): unique identifier for gradebook, i.e. ``2314`` kwargs (dict): dictionary containing additional parameters, i.e. ``graderVisible``, ``totalAverage``, and ``categoryId``. For example: .. code-block:: python { u'graderVisible': True, u'totalAverage': None u'categoryId': 1007964, } Raises: requests.RequestException: Exception connection error ValueError: Unable to decode response content Returns: dict: dictionary containing ``data``, ``status`` and ``message`` for example: .. code-block:: python { u'data': { u'assignmentId': 18490492, u'categoryId': 1293820, u'description': u'', u'dueDate': 1312171200000, u'dueDateString': u'08-01-2011', u'gradebookId': 1293808, u'graderVisible': False, u'gradingSchemeId': 18490493, u'gradingSchemeType': u'NUMERIC', u'isComposite': False, u'isHomework': False, u'maxPointsTotal': 100.0, u'name': u'new NUMERIC SIMPLE ASSIGNMENT', u'numStudentGradesToBeApproved': 0, u'numStudentsToBeGraded': 614, u'shortName': u'SAnew', u'userDeleted': False, u'weight': 1.0 }, u'message': u'assignment is created successfully', u'status': 1 }
['Create', 'a', 'new', 'assignment', '.']
train
https://github.com/mitodl/PyLmod/blob/b798b86c33d1eb615e7cd4f3457b5c15da1d86e0/pylmod/gradebook.py#L335-L425
7,067
molmod/molmod
molmod/graphs.py
RingPattern.get_new_edges
def get_new_edges(self, level): """Get new edges from the pattern graph for the graph search algorithm The level argument denotes the distance of the new edges from the starting vertex in the pattern graph. """ if level == 0: edges0 = [(0, 1), (0, 2)] elif level >= (self.max_size-1)//2: edges0 = [] else: l2 = level*2 edges0 = [(l2-1, l2+1), (l2, l2+2)] return edges0, []
python
def get_new_edges(self, level): """Get new edges from the pattern graph for the graph search algorithm The level argument denotes the distance of the new edges from the starting vertex in the pattern graph. """ if level == 0: edges0 = [(0, 1), (0, 2)] elif level >= (self.max_size-1)//2: edges0 = [] else: l2 = level*2 edges0 = [(l2-1, l2+1), (l2, l2+2)] return edges0, []
['def', 'get_new_edges', '(', 'self', ',', 'level', ')', ':', 'if', 'level', '==', '0', ':', 'edges0', '=', '[', '(', '0', ',', '1', ')', ',', '(', '0', ',', '2', ')', ']', 'elif', 'level', '>=', '(', 'self', '.', 'max_size', '-', '1', ')', '//', '2', ':', 'edges0', '=', '[', ']', 'else', ':', 'l2', '=', 'level', '*', '2', 'edges0', '=', '[', '(', 'l2', '-', '1', ',', 'l2', '+', '1', ')', ',', '(', 'l2', ',', 'l2', '+', '2', ')', ']', 'return', 'edges0', ',', '[', ']']
Get new edges from the pattern graph for the graph search algorithm The level argument denotes the distance of the new edges from the starting vertex in the pattern graph.
['Get', 'new', 'edges', 'from', 'the', 'pattern', 'graph', 'for', 'the', 'graph', 'search', 'algorithm']
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L1429-L1442
7,068
saltstack/salt
salt/modules/firewalld.py
list_services
def list_services(zone=None, permanent=True): ''' List services added for zone as a space separated list. If zone is omitted, default zone will be used. CLI Example: .. code-block:: bash salt '*' firewalld.list_services List a specific zone .. code-block:: bash salt '*' firewalld.list_services my_zone ''' if zone: cmd = '--zone={0} --list-services'.format(zone) else: cmd = '--list-services' if permanent: cmd += ' --permanent' return __firewall_cmd(cmd).split()
python
def list_services(zone=None, permanent=True): ''' List services added for zone as a space separated list. If zone is omitted, default zone will be used. CLI Example: .. code-block:: bash salt '*' firewalld.list_services List a specific zone .. code-block:: bash salt '*' firewalld.list_services my_zone ''' if zone: cmd = '--zone={0} --list-services'.format(zone) else: cmd = '--list-services' if permanent: cmd += ' --permanent' return __firewall_cmd(cmd).split()
['def', 'list_services', '(', 'zone', '=', 'None', ',', 'permanent', '=', 'True', ')', ':', 'if', 'zone', ':', 'cmd', '=', "'--zone={0} --list-services'", '.', 'format', '(', 'zone', ')', 'else', ':', 'cmd', '=', "'--list-services'", 'if', 'permanent', ':', 'cmd', '+=', "' --permanent'", 'return', '__firewall_cmd', '(', 'cmd', ')', '.', 'split', '(', ')']
List services added for zone as a space separated list. If zone is omitted, default zone will be used. CLI Example: .. code-block:: bash salt '*' firewalld.list_services List a specific zone .. code-block:: bash salt '*' firewalld.list_services my_zone
['List', 'services', 'added', 'for', 'zone', 'as', 'a', 'space', 'separated', 'list', '.', 'If', 'zone', 'is', 'omitted', 'default', 'zone', 'will', 'be', 'used', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/firewalld.py#L354-L379
7,069
ploneintranet/ploneintranet.workspace
src/ploneintranet/workspace/browser/tiles/sidebar.py
Sidebar.children
def children(self): """ returns a list of dicts of items in the current context """ items = [] catalog = self.context.portal_catalog current_path = '/'.join(self.context.getPhysicalPath()) sidebar_search = self.request.get('sidebar-search', None) if sidebar_search: st = '%s*' % sidebar_search # XXX plone only allows * as postfix. # With solr we might want to do real substr results = catalog.searchResults(SearchableText=st, path=current_path) else: results = self.context.getFolderContents() for item in results: # Do some checks to set the right classes for icons and candy desc = ( item['Description'] and 'has-description' or 'has-no-description' ) content_type = TYPE_MAP.get(item['portal_type'], 'none') mime_type = '' # XXX: will be needed later for grouping by mimetyp # typ can be user, folder, date and mime typish typ = 'folder' # XXX: This needs to get dynamic later url = item.getURL() ptool = api.portal.get_tool('portal_properties') view_action_types = \ ptool.site_properties.typesUseViewActionInListings if content_type in FOLDERISH_TYPES: dpi = ( "source: #workspace-documents; " "target: #workspace-documents" ) url = url + '/@@sidebar.default#workspace-documents' content_type = 'group' else: if item['portal_type'] in view_action_types: url = "%s/view" % url dpi = ( "target: #document-body; " "source: #document-body; " "history: record" ) content_type = 'document' cls = 'item %s type-%s %s' % (content_type, typ, desc) items.append({ 'id': item['getId'], 'cls': cls, 'title': item['Title'], 'description': item['Description'], 'url': url, 'type': TYPE_MAP.get(item['portal_type'], 'none'), 'mime-type': mime_type, 'dpi': dpi}) return items
python
def children(self): """ returns a list of dicts of items in the current context """ items = [] catalog = self.context.portal_catalog current_path = '/'.join(self.context.getPhysicalPath()) sidebar_search = self.request.get('sidebar-search', None) if sidebar_search: st = '%s*' % sidebar_search # XXX plone only allows * as postfix. # With solr we might want to do real substr results = catalog.searchResults(SearchableText=st, path=current_path) else: results = self.context.getFolderContents() for item in results: # Do some checks to set the right classes for icons and candy desc = ( item['Description'] and 'has-description' or 'has-no-description' ) content_type = TYPE_MAP.get(item['portal_type'], 'none') mime_type = '' # XXX: will be needed later for grouping by mimetyp # typ can be user, folder, date and mime typish typ = 'folder' # XXX: This needs to get dynamic later url = item.getURL() ptool = api.portal.get_tool('portal_properties') view_action_types = \ ptool.site_properties.typesUseViewActionInListings if content_type in FOLDERISH_TYPES: dpi = ( "source: #workspace-documents; " "target: #workspace-documents" ) url = url + '/@@sidebar.default#workspace-documents' content_type = 'group' else: if item['portal_type'] in view_action_types: url = "%s/view" % url dpi = ( "target: #document-body; " "source: #document-body; " "history: record" ) content_type = 'document' cls = 'item %s type-%s %s' % (content_type, typ, desc) items.append({ 'id': item['getId'], 'cls': cls, 'title': item['Title'], 'description': item['Description'], 'url': url, 'type': TYPE_MAP.get(item['portal_type'], 'none'), 'mime-type': mime_type, 'dpi': dpi}) return items
['def', 'children', '(', 'self', ')', ':', 'items', '=', '[', ']', 'catalog', '=', 'self', '.', 'context', '.', 'portal_catalog', 'current_path', '=', "'/'", '.', 'join', '(', 'self', '.', 'context', '.', 'getPhysicalPath', '(', ')', ')', 'sidebar_search', '=', 'self', '.', 'request', '.', 'get', '(', "'sidebar-search'", ',', 'None', ')', 'if', 'sidebar_search', ':', 'st', '=', "'%s*'", '%', 'sidebar_search', '# XXX plone only allows * as postfix.', '# With solr we might want to do real substr', 'results', '=', 'catalog', '.', 'searchResults', '(', 'SearchableText', '=', 'st', ',', 'path', '=', 'current_path', ')', 'else', ':', 'results', '=', 'self', '.', 'context', '.', 'getFolderContents', '(', ')', 'for', 'item', 'in', 'results', ':', '# Do some checks to set the right classes for icons and candy', 'desc', '=', '(', 'item', '[', "'Description'", ']', 'and', "'has-description'", 'or', "'has-no-description'", ')', 'content_type', '=', 'TYPE_MAP', '.', 'get', '(', 'item', '[', "'portal_type'", ']', ',', "'none'", ')', 'mime_type', '=', "''", '# XXX: will be needed later for grouping by mimetyp', '# typ can be user, folder, date and mime typish', 'typ', '=', "'folder'", '# XXX: This needs to get dynamic later', 'url', '=', 'item', '.', 'getURL', '(', ')', 'ptool', '=', 'api', '.', 'portal', '.', 'get_tool', '(', "'portal_properties'", ')', 'view_action_types', '=', 'ptool', '.', 'site_properties', '.', 'typesUseViewActionInListings', 'if', 'content_type', 'in', 'FOLDERISH_TYPES', ':', 'dpi', '=', '(', '"source: #workspace-documents; "', '"target: #workspace-documents"', ')', 'url', '=', 'url', '+', "'/@@sidebar.default#workspace-documents'", 'content_type', '=', "'group'", 'else', ':', 'if', 'item', '[', "'portal_type'", ']', 'in', 'view_action_types', ':', 'url', '=', '"%s/view"', '%', 'url', 'dpi', '=', '(', '"target: #document-body; "', '"source: #document-body; "', '"history: record"', ')', 'content_type', '=', "'document'", 'cls', '=', "'item %s type-%s %s'", '%', '(', 'content_type', ',', 'typ', ',', 'desc', ')', 'items', '.', 'append', '(', '{', "'id'", ':', 'item', '[', "'getId'", ']', ',', "'cls'", ':', 'cls', ',', "'title'", ':', 'item', '[', "'Title'", ']', ',', "'description'", ':', 'item', '[', "'Description'", ']', ',', "'url'", ':', 'url', ',', "'type'", ':', 'TYPE_MAP', '.', 'get', '(', 'item', '[', "'portal_type'", ']', ',', "'none'", ')', ',', "'mime-type'", ':', 'mime_type', ',', "'dpi'", ':', 'dpi', '}', ')', 'return', 'items']
returns a list of dicts of items in the current context
['returns', 'a', 'list', 'of', 'dicts', 'of', 'items', 'in', 'the', 'current', 'context']
train
https://github.com/ploneintranet/ploneintranet.workspace/blob/a4fc7a5c61f9c6d4d4ad25478ff5250f342ffbba/src/ploneintranet/workspace/browser/tiles/sidebar.py#L223-L286
7,070
pyviz/holoviews
holoviews/plotting/plot.py
GenericElementPlot.get_extents
def get_extents(self, element, ranges, range_type='combined', xdim=None, ydim=None, zdim=None): """ Gets the extents for the axes from the current Element. The globally computed ranges can optionally override the extents. The extents are computed by combining the data ranges, extents and dimension ranges. Each of these can be obtained individually by setting the range_type to one of: * 'data': Just the data ranges * 'extents': Element.extents * 'soft': Dimension.soft_range values * 'hard': Dimension.range values To obtain the combined range, which includes range padding the default may be used: * 'combined': All the range types combined and padding applied This allows Overlay plots to obtain each range and combine them appropriately for all the objects in the overlay. """ num = 6 if self.projection == '3d' else 4 if self.apply_extents and range_type in ('combined', 'extents'): norm_opts = self.lookup_options(element, 'norm').options if norm_opts.get('framewise', False) or self.dynamic: extents = element.extents else: extent_list = self.hmap.traverse(lambda x: x.extents, [Element]) extents = util.max_extents(extent_list, self.projection == '3d') else: extents = (np.NaN,) * num if range_type == 'extents': return extents if self.apply_ranges: range_extents = self._get_range_extents(element, ranges, range_type, xdim, ydim, zdim) else: range_extents = (np.NaN,) * num if getattr(self, 'shared_axes', False) and self.subplot: combined = util.max_extents([range_extents, extents], self.projection == '3d') else: max_extent = [] for l1, l2 in zip(range_extents, extents): if isfinite(l2): max_extent.append(l2) else: max_extent.append(l1) combined = tuple(max_extent) if self.projection == '3d': x0, y0, z0, x1, y1, z1 = combined else: x0, y0, x1, y1 = combined x0, x1 = util.dimension_range(x0, x1, self.xlim, (None, None)) y0, y1 = util.dimension_range(y0, y1, self.ylim, (None, None)) if self.projection == '3d': z0, z1 = util.dimension_range(z0, z1, self.zlim, (None, None)) return (x0, y0, z0, x1, y1, z1) return (x0, y0, x1, y1)
python
def get_extents(self, element, ranges, range_type='combined', xdim=None, ydim=None, zdim=None): """ Gets the extents for the axes from the current Element. The globally computed ranges can optionally override the extents. The extents are computed by combining the data ranges, extents and dimension ranges. Each of these can be obtained individually by setting the range_type to one of: * 'data': Just the data ranges * 'extents': Element.extents * 'soft': Dimension.soft_range values * 'hard': Dimension.range values To obtain the combined range, which includes range padding the default may be used: * 'combined': All the range types combined and padding applied This allows Overlay plots to obtain each range and combine them appropriately for all the objects in the overlay. """ num = 6 if self.projection == '3d' else 4 if self.apply_extents and range_type in ('combined', 'extents'): norm_opts = self.lookup_options(element, 'norm').options if norm_opts.get('framewise', False) or self.dynamic: extents = element.extents else: extent_list = self.hmap.traverse(lambda x: x.extents, [Element]) extents = util.max_extents(extent_list, self.projection == '3d') else: extents = (np.NaN,) * num if range_type == 'extents': return extents if self.apply_ranges: range_extents = self._get_range_extents(element, ranges, range_type, xdim, ydim, zdim) else: range_extents = (np.NaN,) * num if getattr(self, 'shared_axes', False) and self.subplot: combined = util.max_extents([range_extents, extents], self.projection == '3d') else: max_extent = [] for l1, l2 in zip(range_extents, extents): if isfinite(l2): max_extent.append(l2) else: max_extent.append(l1) combined = tuple(max_extent) if self.projection == '3d': x0, y0, z0, x1, y1, z1 = combined else: x0, y0, x1, y1 = combined x0, x1 = util.dimension_range(x0, x1, self.xlim, (None, None)) y0, y1 = util.dimension_range(y0, y1, self.ylim, (None, None)) if self.projection == '3d': z0, z1 = util.dimension_range(z0, z1, self.zlim, (None, None)) return (x0, y0, z0, x1, y1, z1) return (x0, y0, x1, y1)
['def', 'get_extents', '(', 'self', ',', 'element', ',', 'ranges', ',', 'range_type', '=', "'combined'", ',', 'xdim', '=', 'None', ',', 'ydim', '=', 'None', ',', 'zdim', '=', 'None', ')', ':', 'num', '=', '6', 'if', 'self', '.', 'projection', '==', "'3d'", 'else', '4', 'if', 'self', '.', 'apply_extents', 'and', 'range_type', 'in', '(', "'combined'", ',', "'extents'", ')', ':', 'norm_opts', '=', 'self', '.', 'lookup_options', '(', 'element', ',', "'norm'", ')', '.', 'options', 'if', 'norm_opts', '.', 'get', '(', "'framewise'", ',', 'False', ')', 'or', 'self', '.', 'dynamic', ':', 'extents', '=', 'element', '.', 'extents', 'else', ':', 'extent_list', '=', 'self', '.', 'hmap', '.', 'traverse', '(', 'lambda', 'x', ':', 'x', '.', 'extents', ',', '[', 'Element', ']', ')', 'extents', '=', 'util', '.', 'max_extents', '(', 'extent_list', ',', 'self', '.', 'projection', '==', "'3d'", ')', 'else', ':', 'extents', '=', '(', 'np', '.', 'NaN', ',', ')', '*', 'num', 'if', 'range_type', '==', "'extents'", ':', 'return', 'extents', 'if', 'self', '.', 'apply_ranges', ':', 'range_extents', '=', 'self', '.', '_get_range_extents', '(', 'element', ',', 'ranges', ',', 'range_type', ',', 'xdim', ',', 'ydim', ',', 'zdim', ')', 'else', ':', 'range_extents', '=', '(', 'np', '.', 'NaN', ',', ')', '*', 'num', 'if', 'getattr', '(', 'self', ',', "'shared_axes'", ',', 'False', ')', 'and', 'self', '.', 'subplot', ':', 'combined', '=', 'util', '.', 'max_extents', '(', '[', 'range_extents', ',', 'extents', ']', ',', 'self', '.', 'projection', '==', "'3d'", ')', 'else', ':', 'max_extent', '=', '[', ']', 'for', 'l1', ',', 'l2', 'in', 'zip', '(', 'range_extents', ',', 'extents', ')', ':', 'if', 'isfinite', '(', 'l2', ')', ':', 'max_extent', '.', 'append', '(', 'l2', ')', 'else', ':', 'max_extent', '.', 'append', '(', 'l1', ')', 'combined', '=', 'tuple', '(', 'max_extent', ')', 'if', 'self', '.', 'projection', '==', "'3d'", ':', 'x0', ',', 'y0', ',', 'z0', ',', 'x1', ',', 'y1', ',', 'z1', '=', 'combined', 'else', ':', 'x0', ',', 'y0', ',', 'x1', ',', 'y1', '=', 'combined', 'x0', ',', 'x1', '=', 'util', '.', 'dimension_range', '(', 'x0', ',', 'x1', ',', 'self', '.', 'xlim', ',', '(', 'None', ',', 'None', ')', ')', 'y0', ',', 'y1', '=', 'util', '.', 'dimension_range', '(', 'y0', ',', 'y1', ',', 'self', '.', 'ylim', ',', '(', 'None', ',', 'None', ')', ')', 'if', 'self', '.', 'projection', '==', "'3d'", ':', 'z0', ',', 'z1', '=', 'util', '.', 'dimension_range', '(', 'z0', ',', 'z1', ',', 'self', '.', 'zlim', ',', '(', 'None', ',', 'None', ')', ')', 'return', '(', 'x0', ',', 'y0', ',', 'z0', ',', 'x1', ',', 'y1', ',', 'z1', ')', 'return', '(', 'x0', ',', 'y0', ',', 'x1', ',', 'y1', ')']
Gets the extents for the axes from the current Element. The globally computed ranges can optionally override the extents. The extents are computed by combining the data ranges, extents and dimension ranges. Each of these can be obtained individually by setting the range_type to one of: * 'data': Just the data ranges * 'extents': Element.extents * 'soft': Dimension.soft_range values * 'hard': Dimension.range values To obtain the combined range, which includes range padding the default may be used: * 'combined': All the range types combined and padding applied This allows Overlay plots to obtain each range and combine them appropriately for all the objects in the overlay.
['Gets', 'the', 'extents', 'for', 'the', 'axes', 'from', 'the', 'current', 'Element', '.', 'The', 'globally', 'computed', 'ranges', 'can', 'optionally', 'override', 'the', 'extents', '.']
train
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/plot.py#L983-L1045
7,071
brandon-rhodes/python-sgp4
sgp4/io.py
twoline2rv
def twoline2rv(longstr1, longstr2, whichconst, afspc_mode=False): """Return a Satellite imported from two lines of TLE data. Provide the two TLE lines as strings `longstr1` and `longstr2`, and select which standard set of gravitational constants you want by providing `gravity_constants`: `sgp4.earth_gravity.wgs72` - Standard WGS 72 model `sgp4.earth_gravity.wgs84` - More recent WGS 84 model `sgp4.earth_gravity.wgs72old` - Legacy support for old SGP4 behavior Normally, computations are made using various recent improvements to the algorithm. If you want to turn some of these off and go back into "afspc" mode, then set `afspc_mode` to `True`. """ deg2rad = pi / 180.0; # 0.0174532925199433 xpdotp = 1440.0 / (2.0 *pi); # 229.1831180523293 tumin = whichconst.tumin satrec = Satellite() satrec.error = 0; satrec.whichconst = whichconst # Python extension: remembers its consts line = longstr1.rstrip() # try/except is not well supported by Numba if (len(line) >= 64 and line.startswith('1 ') and line[8] == ' ' and line[23] == '.' and line[32] == ' ' and line[34] == '.' and line[43] == ' ' and line[52] == ' ' and line[61] == ' ' and line[63] == ' '): _saved_satnum = satrec.satnum = int(line[2:7]) # classification = line[7] or 'U' # intldesg = line[9:17] two_digit_year = int(line[18:20]) satrec.epochdays = float(line[20:32]) satrec.ndot = float(line[33:43]) satrec.nddot = float(line[44] + '.' + line[45:50]) nexp = int(line[50:52]) satrec.bstar = float(line[53] + '.' + line[54:59]) ibexp = int(line[59:61]) # numb = int(line[62]) # elnum = int(line[64:68]) else: raise ValueError(error_message.format(1, LINE1, line)) line = longstr2.rstrip() if (len(line) >= 69 and line.startswith('2 ') and line[7] == ' ' and line[11] == '.' and line[16] == ' ' and line[20] == '.' and line[25] == ' ' and line[33] == ' ' and line[37] == '.' and line[42] == ' ' and line[46] == '.' and line[51] == ' '): satrec.satnum = int(line[2:7]) if _saved_satnum != satrec.satnum: raise ValueError('Object numbers in lines 1 and 2 do not match') satrec.inclo = float(line[8:16]) satrec.nodeo = float(line[17:25]) satrec.ecco = float('0.' + line[26:33].replace(' ', '0')) satrec.argpo = float(line[34:42]) satrec.mo = float(line[43:51]) satrec.no = float(line[52:63]) #revnum = line[63:68] #except (AssertionError, IndexError, ValueError): else: raise ValueError(error_message.format(2, LINE2, line)) # ---- find no, ndot, nddot ---- satrec.no = satrec.no / xpdotp; # rad/min satrec.nddot= satrec.nddot * pow(10.0, nexp); satrec.bstar= satrec.bstar * pow(10.0, ibexp); # ---- convert to sgp4 units ---- satrec.a = pow( satrec.no*tumin , (-2.0/3.0) ); satrec.ndot = satrec.ndot / (xpdotp*1440.0); # ? * minperday satrec.nddot= satrec.nddot / (xpdotp*1440.0*1440); # ---- find standard orbital elements ---- satrec.inclo = satrec.inclo * deg2rad; satrec.nodeo = satrec.nodeo * deg2rad; satrec.argpo = satrec.argpo * deg2rad; satrec.mo = satrec.mo * deg2rad; satrec.alta = satrec.a*(1.0 + satrec.ecco) - 1.0; satrec.altp = satrec.a*(1.0 - satrec.ecco) - 1.0; """ // ---------------------------------------------------------------- // find sgp4epoch time of element set // remember that sgp4 uses units of days from 0 jan 1950 (sgp4epoch) // and minutes from the epoch (time) // ---------------------------------------------------------------- // ---------------- temp fix for years from 1957-2056 ------------------- // --------- correct fix will occur when year is 4-digit in tle --------- """ if two_digit_year < 57: year = two_digit_year + 2000; else: year = two_digit_year + 1900; mon,day,hr,minute,sec = days2mdhms(year, satrec.epochdays); sec_whole, sec_fraction = divmod(sec, 1.0) satrec.epochyr = year satrec.jdsatepoch = jday(year,mon,day,hr,minute,sec); satrec.epoch = datetime(year, mon, day, hr, minute, int(sec_whole), int(sec_fraction * 1000000.0 // 1.0)) # ---------------- initialize the orbit at sgp4epoch ------------------- sgp4init(whichconst, afspc_mode, satrec.satnum, satrec.jdsatepoch-2433281.5, satrec.bstar, satrec.ecco, satrec.argpo, satrec.inclo, satrec.mo, satrec.no, satrec.nodeo, satrec) return satrec
python
def twoline2rv(longstr1, longstr2, whichconst, afspc_mode=False): """Return a Satellite imported from two lines of TLE data. Provide the two TLE lines as strings `longstr1` and `longstr2`, and select which standard set of gravitational constants you want by providing `gravity_constants`: `sgp4.earth_gravity.wgs72` - Standard WGS 72 model `sgp4.earth_gravity.wgs84` - More recent WGS 84 model `sgp4.earth_gravity.wgs72old` - Legacy support for old SGP4 behavior Normally, computations are made using various recent improvements to the algorithm. If you want to turn some of these off and go back into "afspc" mode, then set `afspc_mode` to `True`. """ deg2rad = pi / 180.0; # 0.0174532925199433 xpdotp = 1440.0 / (2.0 *pi); # 229.1831180523293 tumin = whichconst.tumin satrec = Satellite() satrec.error = 0; satrec.whichconst = whichconst # Python extension: remembers its consts line = longstr1.rstrip() # try/except is not well supported by Numba if (len(line) >= 64 and line.startswith('1 ') and line[8] == ' ' and line[23] == '.' and line[32] == ' ' and line[34] == '.' and line[43] == ' ' and line[52] == ' ' and line[61] == ' ' and line[63] == ' '): _saved_satnum = satrec.satnum = int(line[2:7]) # classification = line[7] or 'U' # intldesg = line[9:17] two_digit_year = int(line[18:20]) satrec.epochdays = float(line[20:32]) satrec.ndot = float(line[33:43]) satrec.nddot = float(line[44] + '.' + line[45:50]) nexp = int(line[50:52]) satrec.bstar = float(line[53] + '.' + line[54:59]) ibexp = int(line[59:61]) # numb = int(line[62]) # elnum = int(line[64:68]) else: raise ValueError(error_message.format(1, LINE1, line)) line = longstr2.rstrip() if (len(line) >= 69 and line.startswith('2 ') and line[7] == ' ' and line[11] == '.' and line[16] == ' ' and line[20] == '.' and line[25] == ' ' and line[33] == ' ' and line[37] == '.' and line[42] == ' ' and line[46] == '.' and line[51] == ' '): satrec.satnum = int(line[2:7]) if _saved_satnum != satrec.satnum: raise ValueError('Object numbers in lines 1 and 2 do not match') satrec.inclo = float(line[8:16]) satrec.nodeo = float(line[17:25]) satrec.ecco = float('0.' + line[26:33].replace(' ', '0')) satrec.argpo = float(line[34:42]) satrec.mo = float(line[43:51]) satrec.no = float(line[52:63]) #revnum = line[63:68] #except (AssertionError, IndexError, ValueError): else: raise ValueError(error_message.format(2, LINE2, line)) # ---- find no, ndot, nddot ---- satrec.no = satrec.no / xpdotp; # rad/min satrec.nddot= satrec.nddot * pow(10.0, nexp); satrec.bstar= satrec.bstar * pow(10.0, ibexp); # ---- convert to sgp4 units ---- satrec.a = pow( satrec.no*tumin , (-2.0/3.0) ); satrec.ndot = satrec.ndot / (xpdotp*1440.0); # ? * minperday satrec.nddot= satrec.nddot / (xpdotp*1440.0*1440); # ---- find standard orbital elements ---- satrec.inclo = satrec.inclo * deg2rad; satrec.nodeo = satrec.nodeo * deg2rad; satrec.argpo = satrec.argpo * deg2rad; satrec.mo = satrec.mo * deg2rad; satrec.alta = satrec.a*(1.0 + satrec.ecco) - 1.0; satrec.altp = satrec.a*(1.0 - satrec.ecco) - 1.0; """ // ---------------------------------------------------------------- // find sgp4epoch time of element set // remember that sgp4 uses units of days from 0 jan 1950 (sgp4epoch) // and minutes from the epoch (time) // ---------------------------------------------------------------- // ---------------- temp fix for years from 1957-2056 ------------------- // --------- correct fix will occur when year is 4-digit in tle --------- """ if two_digit_year < 57: year = two_digit_year + 2000; else: year = two_digit_year + 1900; mon,day,hr,minute,sec = days2mdhms(year, satrec.epochdays); sec_whole, sec_fraction = divmod(sec, 1.0) satrec.epochyr = year satrec.jdsatepoch = jday(year,mon,day,hr,minute,sec); satrec.epoch = datetime(year, mon, day, hr, minute, int(sec_whole), int(sec_fraction * 1000000.0 // 1.0)) # ---------------- initialize the orbit at sgp4epoch ------------------- sgp4init(whichconst, afspc_mode, satrec.satnum, satrec.jdsatepoch-2433281.5, satrec.bstar, satrec.ecco, satrec.argpo, satrec.inclo, satrec.mo, satrec.no, satrec.nodeo, satrec) return satrec
['def', 'twoline2rv', '(', 'longstr1', ',', 'longstr2', ',', 'whichconst', ',', 'afspc_mode', '=', 'False', ')', ':', 'deg2rad', '=', 'pi', '/', '180.0', '# 0.0174532925199433', 'xpdotp', '=', '1440.0', '/', '(', '2.0', '*', 'pi', ')', '# 229.1831180523293', 'tumin', '=', 'whichconst', '.', 'tumin', 'satrec', '=', 'Satellite', '(', ')', 'satrec', '.', 'error', '=', '0', 'satrec', '.', 'whichconst', '=', 'whichconst', '# Python extension: remembers its consts', 'line', '=', 'longstr1', '.', 'rstrip', '(', ')', '# try/except is not well supported by Numba', 'if', '(', 'len', '(', 'line', ')', '>=', '64', 'and', 'line', '.', 'startswith', '(', "'1 '", ')', 'and', 'line', '[', '8', ']', '==', "' '", 'and', 'line', '[', '23', ']', '==', "'.'", 'and', 'line', '[', '32', ']', '==', "' '", 'and', 'line', '[', '34', ']', '==', "'.'", 'and', 'line', '[', '43', ']', '==', "' '", 'and', 'line', '[', '52', ']', '==', "' '", 'and', 'line', '[', '61', ']', '==', "' '", 'and', 'line', '[', '63', ']', '==', "' '", ')', ':', '_saved_satnum', '=', 'satrec', '.', 'satnum', '=', 'int', '(', 'line', '[', '2', ':', '7', ']', ')', "# classification = line[7] or 'U'", '# intldesg = line[9:17]', 'two_digit_year', '=', 'int', '(', 'line', '[', '18', ':', '20', ']', ')', 'satrec', '.', 'epochdays', '=', 'float', '(', 'line', '[', '20', ':', '32', ']', ')', 'satrec', '.', 'ndot', '=', 'float', '(', 'line', '[', '33', ':', '43', ']', ')', 'satrec', '.', 'nddot', '=', 'float', '(', 'line', '[', '44', ']', '+', "'.'", '+', 'line', '[', '45', ':', '50', ']', ')', 'nexp', '=', 'int', '(', 'line', '[', '50', ':', '52', ']', ')', 'satrec', '.', 'bstar', '=', 'float', '(', 'line', '[', '53', ']', '+', "'.'", '+', 'line', '[', '54', ':', '59', ']', ')', 'ibexp', '=', 'int', '(', 'line', '[', '59', ':', '61', ']', ')', '# numb = int(line[62])', '# elnum = int(line[64:68])', 'else', ':', 'raise', 'ValueError', '(', 'error_message', '.', 'format', '(', '1', ',', 'LINE1', ',', 'line', ')', ')', 'line', '=', 'longstr2', '.', 'rstrip', '(', ')', 'if', '(', 'len', '(', 'line', ')', '>=', '69', 'and', 'line', '.', 'startswith', '(', "'2 '", ')', 'and', 'line', '[', '7', ']', '==', "' '", 'and', 'line', '[', '11', ']', '==', "'.'", 'and', 'line', '[', '16', ']', '==', "' '", 'and', 'line', '[', '20', ']', '==', "'.'", 'and', 'line', '[', '25', ']', '==', "' '", 'and', 'line', '[', '33', ']', '==', "' '", 'and', 'line', '[', '37', ']', '==', "'.'", 'and', 'line', '[', '42', ']', '==', "' '", 'and', 'line', '[', '46', ']', '==', "'.'", 'and', 'line', '[', '51', ']', '==', "' '", ')', ':', 'satrec', '.', 'satnum', '=', 'int', '(', 'line', '[', '2', ':', '7', ']', ')', 'if', '_saved_satnum', '!=', 'satrec', '.', 'satnum', ':', 'raise', 'ValueError', '(', "'Object numbers in lines 1 and 2 do not match'", ')', 'satrec', '.', 'inclo', '=', 'float', '(', 'line', '[', '8', ':', '16', ']', ')', 'satrec', '.', 'nodeo', '=', 'float', '(', 'line', '[', '17', ':', '25', ']', ')', 'satrec', '.', 'ecco', '=', 'float', '(', "'0.'", '+', 'line', '[', '26', ':', '33', ']', '.', 'replace', '(', "' '", ',', "'0'", ')', ')', 'satrec', '.', 'argpo', '=', 'float', '(', 'line', '[', '34', ':', '42', ']', ')', 'satrec', '.', 'mo', '=', 'float', '(', 'line', '[', '43', ':', '51', ']', ')', 'satrec', '.', 'no', '=', 'float', '(', 'line', '[', '52', ':', '63', ']', ')', '#revnum = line[63:68]', '#except (AssertionError, IndexError, ValueError):', 'else', ':', 'raise', 'ValueError', '(', 'error_message', '.', 'format', '(', '2', ',', 'LINE2', ',', 'line', ')', ')', '# ---- find no, ndot, nddot ----', 'satrec', '.', 'no', '=', 'satrec', '.', 'no', '/', 'xpdotp', '# rad/min', 'satrec', '.', 'nddot', '=', 'satrec', '.', 'nddot', '*', 'pow', '(', '10.0', ',', 'nexp', ')', 'satrec', '.', 'bstar', '=', 'satrec', '.', 'bstar', '*', 'pow', '(', '10.0', ',', 'ibexp', ')', '# ---- convert to sgp4 units ----', 'satrec', '.', 'a', '=', 'pow', '(', 'satrec', '.', 'no', '*', 'tumin', ',', '(', '-', '2.0', '/', '3.0', ')', ')', 'satrec', '.', 'ndot', '=', 'satrec', '.', 'ndot', '/', '(', 'xpdotp', '*', '1440.0', ')', '# ? * minperday', 'satrec', '.', 'nddot', '=', 'satrec', '.', 'nddot', '/', '(', 'xpdotp', '*', '1440.0', '*', '1440', ')', '# ---- find standard orbital elements ----', 'satrec', '.', 'inclo', '=', 'satrec', '.', 'inclo', '*', 'deg2rad', 'satrec', '.', 'nodeo', '=', 'satrec', '.', 'nodeo', '*', 'deg2rad', 'satrec', '.', 'argpo', '=', 'satrec', '.', 'argpo', '*', 'deg2rad', 'satrec', '.', 'mo', '=', 'satrec', '.', 'mo', '*', 'deg2rad', 'satrec', '.', 'alta', '=', 'satrec', '.', 'a', '*', '(', '1.0', '+', 'satrec', '.', 'ecco', ')', '-', '1.0', 'satrec', '.', 'altp', '=', 'satrec', '.', 'a', '*', '(', '1.0', '-', 'satrec', '.', 'ecco', ')', '-', '1.0', '"""\n // ----------------------------------------------------------------\n // find sgp4epoch time of element set\n // remember that sgp4 uses units of days from 0 jan 1950 (sgp4epoch)\n // and minutes from the epoch (time)\n // ----------------------------------------------------------------\n\n // ---------------- temp fix for years from 1957-2056 -------------------\n // --------- correct fix will occur when year is 4-digit in tle ---------\n """', 'if', 'two_digit_year', '<', '57', ':', 'year', '=', 'two_digit_year', '+', '2000', 'else', ':', 'year', '=', 'two_digit_year', '+', '1900', 'mon', ',', 'day', ',', 'hr', ',', 'minute', ',', 'sec', '=', 'days2mdhms', '(', 'year', ',', 'satrec', '.', 'epochdays', ')', 'sec_whole', ',', 'sec_fraction', '=', 'divmod', '(', 'sec', ',', '1.0', ')', 'satrec', '.', 'epochyr', '=', 'year', 'satrec', '.', 'jdsatepoch', '=', 'jday', '(', 'year', ',', 'mon', ',', 'day', ',', 'hr', ',', 'minute', ',', 'sec', ')', 'satrec', '.', 'epoch', '=', 'datetime', '(', 'year', ',', 'mon', ',', 'day', ',', 'hr', ',', 'minute', ',', 'int', '(', 'sec_whole', ')', ',', 'int', '(', 'sec_fraction', '*', '1000000.0', '//', '1.0', ')', ')', '# ---------------- initialize the orbit at sgp4epoch -------------------', 'sgp4init', '(', 'whichconst', ',', 'afspc_mode', ',', 'satrec', '.', 'satnum', ',', 'satrec', '.', 'jdsatepoch', '-', '2433281.5', ',', 'satrec', '.', 'bstar', ',', 'satrec', '.', 'ecco', ',', 'satrec', '.', 'argpo', ',', 'satrec', '.', 'inclo', ',', 'satrec', '.', 'mo', ',', 'satrec', '.', 'no', ',', 'satrec', '.', 'nodeo', ',', 'satrec', ')', 'return', 'satrec']
Return a Satellite imported from two lines of TLE data. Provide the two TLE lines as strings `longstr1` and `longstr2`, and select which standard set of gravitational constants you want by providing `gravity_constants`: `sgp4.earth_gravity.wgs72` - Standard WGS 72 model `sgp4.earth_gravity.wgs84` - More recent WGS 84 model `sgp4.earth_gravity.wgs72old` - Legacy support for old SGP4 behavior Normally, computations are made using various recent improvements to the algorithm. If you want to turn some of these off and go back into "afspc" mode, then set `afspc_mode` to `True`.
['Return', 'a', 'Satellite', 'imported', 'from', 'two', 'lines', 'of', 'TLE', 'data', '.']
train
https://github.com/brandon-rhodes/python-sgp4/blob/a1e19e32831d6814b3ab34f55b39b8520d291c4e/sgp4/io.py#L102-L232
7,072
sentinel-hub/sentinelhub-py
sentinelhub/ogc.py
WebFeatureService._parse_tile_url
def _parse_tile_url(tile_url): """ Extracts tile name, data and AWS index from tile URL :param tile_url: Location of tile at AWS :type: tile_url: str :return: Tuple in a form (tile_name, date, aws_index) :rtype: (str, str, int) """ props = tile_url.rsplit('/', 7) return ''.join(props[1:4]), '-'.join(props[4:7]), int(props[7])
python
def _parse_tile_url(tile_url): """ Extracts tile name, data and AWS index from tile URL :param tile_url: Location of tile at AWS :type: tile_url: str :return: Tuple in a form (tile_name, date, aws_index) :rtype: (str, str, int) """ props = tile_url.rsplit('/', 7) return ''.join(props[1:4]), '-'.join(props[4:7]), int(props[7])
['def', '_parse_tile_url', '(', 'tile_url', ')', ':', 'props', '=', 'tile_url', '.', 'rsplit', '(', "'/'", ',', '7', ')', 'return', "''", '.', 'join', '(', 'props', '[', '1', ':', '4', ']', ')', ',', "'-'", '.', 'join', '(', 'props', '[', '4', ':', '7', ']', ')', ',', 'int', '(', 'props', '[', '7', ']', ')']
Extracts tile name, data and AWS index from tile URL :param tile_url: Location of tile at AWS :type: tile_url: str :return: Tuple in a form (tile_name, date, aws_index) :rtype: (str, str, int)
['Extracts', 'tile', 'name', 'data', 'and', 'AWS', 'index', 'from', 'tile', 'URL']
train
https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/ogc.py#L587-L596
7,073
duniter/duniter-python-api
duniterpy/api/endpoint.py
endpoint
def endpoint(value: Any) -> Any: """ Convert a endpoint string to the corresponding Endpoint instance type :param value: Endpoint string or subclass :return: """ if issubclass(type(value), Endpoint): return value elif isinstance(value, str): for api, cls in MANAGED_API.items(): if value.startswith(api + " "): return cls.from_inline(value) return UnknownEndpoint.from_inline(value) else: raise TypeError("Cannot convert {0} to endpoint".format(value))
python
def endpoint(value: Any) -> Any: """ Convert a endpoint string to the corresponding Endpoint instance type :param value: Endpoint string or subclass :return: """ if issubclass(type(value), Endpoint): return value elif isinstance(value, str): for api, cls in MANAGED_API.items(): if value.startswith(api + " "): return cls.from_inline(value) return UnknownEndpoint.from_inline(value) else: raise TypeError("Cannot convert {0} to endpoint".format(value))
['def', 'endpoint', '(', 'value', ':', 'Any', ')', '->', 'Any', ':', 'if', 'issubclass', '(', 'type', '(', 'value', ')', ',', 'Endpoint', ')', ':', 'return', 'value', 'elif', 'isinstance', '(', 'value', ',', 'str', ')', ':', 'for', 'api', ',', 'cls', 'in', 'MANAGED_API', '.', 'items', '(', ')', ':', 'if', 'value', '.', 'startswith', '(', 'api', '+', '" "', ')', ':', 'return', 'cls', '.', 'from_inline', '(', 'value', ')', 'return', 'UnknownEndpoint', '.', 'from_inline', '(', 'value', ')', 'else', ':', 'raise', 'TypeError', '(', '"Cannot convert {0} to endpoint"', '.', 'format', '(', 'value', ')', ')']
Convert a endpoint string to the corresponding Endpoint instance type :param value: Endpoint string or subclass :return:
['Convert', 'a', 'endpoint', 'string', 'to', 'the', 'corresponding', 'Endpoint', 'instance', 'type']
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/endpoint.py#L547-L562
7,074
btimby/fulltext
fulltext/data/winmake.py
clean
def clean(): """Deletes dev files""" rm("$testfn*") rm("*.bak") rm("*.core") rm("*.egg-info") rm("*.orig") rm("*.pyc") rm("*.pyd") rm("*.pyo") rm("*.rej") rm("*.so") rm("*.~") rm("*__pycache__") rm(".coverage") rm(".tox") rm(".coverage") rm("build") rm("dist") rm("docs/_build") rm("htmlcov") rm("tmp") rm("venv")
python
def clean(): """Deletes dev files""" rm("$testfn*") rm("*.bak") rm("*.core") rm("*.egg-info") rm("*.orig") rm("*.pyc") rm("*.pyd") rm("*.pyo") rm("*.rej") rm("*.so") rm("*.~") rm("*__pycache__") rm(".coverage") rm(".tox") rm(".coverage") rm("build") rm("dist") rm("docs/_build") rm("htmlcov") rm("tmp") rm("venv")
['def', 'clean', '(', ')', ':', 'rm', '(', '"$testfn*"', ')', 'rm', '(', '"*.bak"', ')', 'rm', '(', '"*.core"', ')', 'rm', '(', '"*.egg-info"', ')', 'rm', '(', '"*.orig"', ')', 'rm', '(', '"*.pyc"', ')', 'rm', '(', '"*.pyd"', ')', 'rm', '(', '"*.pyo"', ')', 'rm', '(', '"*.rej"', ')', 'rm', '(', '"*.so"', ')', 'rm', '(', '"*.~"', ')', 'rm', '(', '"*__pycache__"', ')', 'rm', '(', '".coverage"', ')', 'rm', '(', '".tox"', ')', 'rm', '(', '".coverage"', ')', 'rm', '(', '"build"', ')', 'rm', '(', '"dist"', ')', 'rm', '(', '"docs/_build"', ')', 'rm', '(', '"htmlcov"', ')', 'rm', '(', '"tmp"', ')', 'rm', '(', '"venv"', ')']
Deletes dev files
['Deletes', 'dev', 'files']
train
https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/data/winmake.py#L200-L222
7,075
benhoff/pluginmanager
pluginmanager/module_manager.py
ModuleManager.load_modules
def load_modules(self, filepaths): """ Loads the modules from their `filepaths`. A filepath may be a directory filepath if there is an `__init__.py` file in the directory. If a filepath errors, the exception will be caught and logged in the logger. Returns a list of modules. """ # removes filepaths from processed if they are not in sys.modules self._update_loaded_modules() filepaths = util.return_set(filepaths) modules = [] for filepath in filepaths: filepath = self._clean_filepath(filepath) # check to see if already processed and move onto next if so if self._processed_filepath(filepath): continue module_name = util.get_module_name(filepath) plugin_module_name = util.create_unique_module_name(module_name) try: module = load_source(plugin_module_name, filepath) # Catch all exceptions b/c loader will return errors # within the code itself, such as Syntax, NameErrors, etc. except Exception: exc_info = sys.exc_info() self._log.error(msg=self._error_string.format(filepath), exc_info=exc_info) continue self.loaded_modules.add(module.__name__) modules.append(module) self.processed_filepaths[module.__name__] = filepath return modules
python
def load_modules(self, filepaths): """ Loads the modules from their `filepaths`. A filepath may be a directory filepath if there is an `__init__.py` file in the directory. If a filepath errors, the exception will be caught and logged in the logger. Returns a list of modules. """ # removes filepaths from processed if they are not in sys.modules self._update_loaded_modules() filepaths = util.return_set(filepaths) modules = [] for filepath in filepaths: filepath = self._clean_filepath(filepath) # check to see if already processed and move onto next if so if self._processed_filepath(filepath): continue module_name = util.get_module_name(filepath) plugin_module_name = util.create_unique_module_name(module_name) try: module = load_source(plugin_module_name, filepath) # Catch all exceptions b/c loader will return errors # within the code itself, such as Syntax, NameErrors, etc. except Exception: exc_info = sys.exc_info() self._log.error(msg=self._error_string.format(filepath), exc_info=exc_info) continue self.loaded_modules.add(module.__name__) modules.append(module) self.processed_filepaths[module.__name__] = filepath return modules
['def', 'load_modules', '(', 'self', ',', 'filepaths', ')', ':', '# removes filepaths from processed if they are not in sys.modules', 'self', '.', '_update_loaded_modules', '(', ')', 'filepaths', '=', 'util', '.', 'return_set', '(', 'filepaths', ')', 'modules', '=', '[', ']', 'for', 'filepath', 'in', 'filepaths', ':', 'filepath', '=', 'self', '.', '_clean_filepath', '(', 'filepath', ')', '# check to see if already processed and move onto next if so', 'if', 'self', '.', '_processed_filepath', '(', 'filepath', ')', ':', 'continue', 'module_name', '=', 'util', '.', 'get_module_name', '(', 'filepath', ')', 'plugin_module_name', '=', 'util', '.', 'create_unique_module_name', '(', 'module_name', ')', 'try', ':', 'module', '=', 'load_source', '(', 'plugin_module_name', ',', 'filepath', ')', '# Catch all exceptions b/c loader will return errors', '# within the code itself, such as Syntax, NameErrors, etc.', 'except', 'Exception', ':', 'exc_info', '=', 'sys', '.', 'exc_info', '(', ')', 'self', '.', '_log', '.', 'error', '(', 'msg', '=', 'self', '.', '_error_string', '.', 'format', '(', 'filepath', ')', ',', 'exc_info', '=', 'exc_info', ')', 'continue', 'self', '.', 'loaded_modules', '.', 'add', '(', 'module', '.', '__name__', ')', 'modules', '.', 'append', '(', 'module', ')', 'self', '.', 'processed_filepaths', '[', 'module', '.', '__name__', ']', '=', 'filepath', 'return', 'modules']
Loads the modules from their `filepaths`. A filepath may be a directory filepath if there is an `__init__.py` file in the directory. If a filepath errors, the exception will be caught and logged in the logger. Returns a list of modules.
['Loads', 'the', 'modules', 'from', 'their', 'filepaths', '.', 'A', 'filepath', 'may', 'be', 'a', 'directory', 'filepath', 'if', 'there', 'is', 'an', '__init__', '.', 'py', 'file', 'in', 'the', 'directory', '.']
train
https://github.com/benhoff/pluginmanager/blob/a8a184f9ebfbb521703492cb88c1dbda4cd04c06/pluginmanager/module_manager.py#L45-L84
7,076
limpyd/redis-limpyd
limpyd/fields.py
RedisField.attached_to_model
def attached_to_model(self): """Tells if the current field is the one attached to the model, not instance""" try: if not bool(self._model): return False except AttributeError: return False else: try: return not bool(self._instance) except AttributeError: return True
python
def attached_to_model(self): """Tells if the current field is the one attached to the model, not instance""" try: if not bool(self._model): return False except AttributeError: return False else: try: return not bool(self._instance) except AttributeError: return True
['def', 'attached_to_model', '(', 'self', ')', ':', 'try', ':', 'if', 'not', 'bool', '(', 'self', '.', '_model', ')', ':', 'return', 'False', 'except', 'AttributeError', ':', 'return', 'False', 'else', ':', 'try', ':', 'return', 'not', 'bool', '(', 'self', '.', '_instance', ')', 'except', 'AttributeError', ':', 'return', 'True']
Tells if the current field is the one attached to the model, not instance
['Tells', 'if', 'the', 'current', 'field', 'is', 'the', 'one', 'attached', 'to', 'the', 'model', 'not', 'instance']
train
https://github.com/limpyd/redis-limpyd/blob/3c745dde1390a0bd09690b77a089dcc08c6c7e43/limpyd/fields.py#L426-L437
7,077
ailionx/cloudflare-ddns
cloudflare_ddns/cloudflare.py
CloudFlare.create_record
def create_record(self, dns_type, name, content, **kwargs): """ Create a dns record :param dns_type: :param name: :param content: :param kwargs: :return: """ data = { 'type': dns_type, 'name': name, 'content': content } if kwargs.get('ttl') and kwargs['ttl'] != 1: data['ttl'] = kwargs['ttl'] if kwargs.get('proxied') is True: data['proxied'] = True else: data['proxied'] = False content = self.request( self.api_url + self.zone['id'] + '/dns_records', 'post', data=data ) print('DNS record successfully created') return content['result']
python
def create_record(self, dns_type, name, content, **kwargs): """ Create a dns record :param dns_type: :param name: :param content: :param kwargs: :return: """ data = { 'type': dns_type, 'name': name, 'content': content } if kwargs.get('ttl') and kwargs['ttl'] != 1: data['ttl'] = kwargs['ttl'] if kwargs.get('proxied') is True: data['proxied'] = True else: data['proxied'] = False content = self.request( self.api_url + self.zone['id'] + '/dns_records', 'post', data=data ) print('DNS record successfully created') return content['result']
['def', 'create_record', '(', 'self', ',', 'dns_type', ',', 'name', ',', 'content', ',', '*', '*', 'kwargs', ')', ':', 'data', '=', '{', "'type'", ':', 'dns_type', ',', "'name'", ':', 'name', ',', "'content'", ':', 'content', '}', 'if', 'kwargs', '.', 'get', '(', "'ttl'", ')', 'and', 'kwargs', '[', "'ttl'", ']', '!=', '1', ':', 'data', '[', "'ttl'", ']', '=', 'kwargs', '[', "'ttl'", ']', 'if', 'kwargs', '.', 'get', '(', "'proxied'", ')', 'is', 'True', ':', 'data', '[', "'proxied'", ']', '=', 'True', 'else', ':', 'data', '[', "'proxied'", ']', '=', 'False', 'content', '=', 'self', '.', 'request', '(', 'self', '.', 'api_url', '+', 'self', '.', 'zone', '[', "'id'", ']', '+', "'/dns_records'", ',', "'post'", ',', 'data', '=', 'data', ')', 'print', '(', "'DNS record successfully created'", ')', 'return', 'content', '[', "'result'", ']']
Create a dns record :param dns_type: :param name: :param content: :param kwargs: :return:
['Create', 'a', 'dns', 'record', ':', 'param', 'dns_type', ':', ':', 'param', 'name', ':', ':', 'param', 'content', ':', ':', 'param', 'kwargs', ':', ':', 'return', ':']
train
https://github.com/ailionx/cloudflare-ddns/blob/e4808b8314e447f69fab77b5bd3880846e59adbe/cloudflare_ddns/cloudflare.py#L136-L162
7,078
phac-nml/sistr_cmd
sistr/src/parsers.py
fasta_format_check
def fasta_format_check(fasta_path, logger): """ Check that a file is valid FASTA format. - First non-blank line needs to begin with a '>' header character. - Sequence can only contain valid IUPAC nucleotide characters Args: fasta_str (str): FASTA file contents string Raises: Exception: If invalid FASTA format """ header_count = 0 line_count = 1 nt_count = 0 with open(fasta_path) as f: for l in f: l = l.strip() if l == '': continue if l[0] == '>': header_count += 1 continue if header_count == 0 and l[0] != '>': error_msg = 'First non-blank line (L:{line_count}) does not contain FASTA header. Line beginning with ">" expected.' \ .format(line_count=line_count) logger.error(error_msg) raise Exception(error_msg) non_nucleotide_chars_in_line = set(l) - VALID_NUCLEOTIDES if len(non_nucleotide_chars_in_line) > 0: error_msg = 'Line {line} contains the following non-nucleotide characters: {non_nt_chars}' \ .format(line=line_count, non_nt_chars=', '.join([x for x in non_nucleotide_chars_in_line])) logger.error(error_msg) raise Exception(error_msg) nt_count += len(l) line_count += 1 if nt_count == 0: error_msg = 'File "{}" does not contain any nucleotide sequence.'.format(fasta_path) logger.error(error_msg) raise Exception(error_msg) logger.info('Valid FASTA format "{}" ({} bp)'.format(fasta_path, nt_count))
python
def fasta_format_check(fasta_path, logger): """ Check that a file is valid FASTA format. - First non-blank line needs to begin with a '>' header character. - Sequence can only contain valid IUPAC nucleotide characters Args: fasta_str (str): FASTA file contents string Raises: Exception: If invalid FASTA format """ header_count = 0 line_count = 1 nt_count = 0 with open(fasta_path) as f: for l in f: l = l.strip() if l == '': continue if l[0] == '>': header_count += 1 continue if header_count == 0 and l[0] != '>': error_msg = 'First non-blank line (L:{line_count}) does not contain FASTA header. Line beginning with ">" expected.' \ .format(line_count=line_count) logger.error(error_msg) raise Exception(error_msg) non_nucleotide_chars_in_line = set(l) - VALID_NUCLEOTIDES if len(non_nucleotide_chars_in_line) > 0: error_msg = 'Line {line} contains the following non-nucleotide characters: {non_nt_chars}' \ .format(line=line_count, non_nt_chars=', '.join([x for x in non_nucleotide_chars_in_line])) logger.error(error_msg) raise Exception(error_msg) nt_count += len(l) line_count += 1 if nt_count == 0: error_msg = 'File "{}" does not contain any nucleotide sequence.'.format(fasta_path) logger.error(error_msg) raise Exception(error_msg) logger.info('Valid FASTA format "{}" ({} bp)'.format(fasta_path, nt_count))
['def', 'fasta_format_check', '(', 'fasta_path', ',', 'logger', ')', ':', 'header_count', '=', '0', 'line_count', '=', '1', 'nt_count', '=', '0', 'with', 'open', '(', 'fasta_path', ')', 'as', 'f', ':', 'for', 'l', 'in', 'f', ':', 'l', '=', 'l', '.', 'strip', '(', ')', 'if', 'l', '==', "''", ':', 'continue', 'if', 'l', '[', '0', ']', '==', "'>'", ':', 'header_count', '+=', '1', 'continue', 'if', 'header_count', '==', '0', 'and', 'l', '[', '0', ']', '!=', "'>'", ':', 'error_msg', '=', '\'First non-blank line (L:{line_count}) does not contain FASTA header. Line beginning with ">" expected.\'', '.', 'format', '(', 'line_count', '=', 'line_count', ')', 'logger', '.', 'error', '(', 'error_msg', ')', 'raise', 'Exception', '(', 'error_msg', ')', 'non_nucleotide_chars_in_line', '=', 'set', '(', 'l', ')', '-', 'VALID_NUCLEOTIDES', 'if', 'len', '(', 'non_nucleotide_chars_in_line', ')', '>', '0', ':', 'error_msg', '=', "'Line {line} contains the following non-nucleotide characters: {non_nt_chars}'", '.', 'format', '(', 'line', '=', 'line_count', ',', 'non_nt_chars', '=', "', '", '.', 'join', '(', '[', 'x', 'for', 'x', 'in', 'non_nucleotide_chars_in_line', ']', ')', ')', 'logger', '.', 'error', '(', 'error_msg', ')', 'raise', 'Exception', '(', 'error_msg', ')', 'nt_count', '+=', 'len', '(', 'l', ')', 'line_count', '+=', '1', 'if', 'nt_count', '==', '0', ':', 'error_msg', '=', '\'File "{}" does not contain any nucleotide sequence.\'', '.', 'format', '(', 'fasta_path', ')', 'logger', '.', 'error', '(', 'error_msg', ')', 'raise', 'Exception', '(', 'error_msg', ')', 'logger', '.', 'info', '(', '\'Valid FASTA format "{}" ({} bp)\'', '.', 'format', '(', 'fasta_path', ',', 'nt_count', ')', ')']
Check that a file is valid FASTA format. - First non-blank line needs to begin with a '>' header character. - Sequence can only contain valid IUPAC nucleotide characters Args: fasta_str (str): FASTA file contents string Raises: Exception: If invalid FASTA format
['Check', 'that', 'a', 'file', 'is', 'valid', 'FASTA', 'format', '.']
train
https://github.com/phac-nml/sistr_cmd/blob/4630fae72439723b354a94b94fbe76ad2f9f6295/sistr/src/parsers.py#L64-L110
7,079
PmagPy/PmagPy
programs/demag_gui.py
Demag_GUI.user_warning
def user_warning(self, message, caption='Warning!'): """ Shows a dialog that warns the user about some action Parameters ---------- message : message to display to user caption : title for dialog (default: "Warning!") Returns ------- continue_bool : True or False """ dlg = wx.MessageDialog(self, message, caption, wx.OK | wx.CANCEL | wx.ICON_WARNING) if self.show_dlg(dlg) == wx.ID_OK: continue_bool = True else: continue_bool = False dlg.Destroy() return continue_bool
python
def user_warning(self, message, caption='Warning!'): """ Shows a dialog that warns the user about some action Parameters ---------- message : message to display to user caption : title for dialog (default: "Warning!") Returns ------- continue_bool : True or False """ dlg = wx.MessageDialog(self, message, caption, wx.OK | wx.CANCEL | wx.ICON_WARNING) if self.show_dlg(dlg) == wx.ID_OK: continue_bool = True else: continue_bool = False dlg.Destroy() return continue_bool
['def', 'user_warning', '(', 'self', ',', 'message', ',', 'caption', '=', "'Warning!'", ')', ':', 'dlg', '=', 'wx', '.', 'MessageDialog', '(', 'self', ',', 'message', ',', 'caption', ',', 'wx', '.', 'OK', '|', 'wx', '.', 'CANCEL', '|', 'wx', '.', 'ICON_WARNING', ')', 'if', 'self', '.', 'show_dlg', '(', 'dlg', ')', '==', 'wx', '.', 'ID_OK', ':', 'continue_bool', '=', 'True', 'else', ':', 'continue_bool', '=', 'False', 'dlg', '.', 'Destroy', '(', ')', 'return', 'continue_bool']
Shows a dialog that warns the user about some action Parameters ---------- message : message to display to user caption : title for dialog (default: "Warning!") Returns ------- continue_bool : True or False
['Shows', 'a', 'dialog', 'that', 'warns', 'the', 'user', 'about', 'some', 'action']
train
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/demag_gui.py#L5142-L5162
7,080
Tygs/ww
src/ww/tools/iterables.py
iterslice
def iterslice(iterable, start=0, stop=None, step=1): # type: (Iterable[T], int, int, int) -> Iterable[T] """ Like itertools.islice, but accept int and callables. If `start` is a callable, start the slice after the first time start(item) == True. If `stop` is a callable, stop the slice after the first time stop(item) == True. """ if step < 0: raise ValueError("The step can not be negative: '%s' given" % step) if not isinstance(start, int): # [Callable:Callable] if not isinstance(stop, int) and stop: return stops_when(starts_when(iterable, start), stop) # [Callable:int] return starts_when(itertools.islice(iterable, None, stop, step), start) # [int:Callable] if not isinstance(stop, int) and stop: return stops_when(itertools.islice(iterable, start, None, step), stop) # [int:int] return itertools.islice(iterable, start, stop, step)
python
def iterslice(iterable, start=0, stop=None, step=1): # type: (Iterable[T], int, int, int) -> Iterable[T] """ Like itertools.islice, but accept int and callables. If `start` is a callable, start the slice after the first time start(item) == True. If `stop` is a callable, stop the slice after the first time stop(item) == True. """ if step < 0: raise ValueError("The step can not be negative: '%s' given" % step) if not isinstance(start, int): # [Callable:Callable] if not isinstance(stop, int) and stop: return stops_when(starts_when(iterable, start), stop) # [Callable:int] return starts_when(itertools.islice(iterable, None, stop, step), start) # [int:Callable] if not isinstance(stop, int) and stop: return stops_when(itertools.islice(iterable, start, None, step), stop) # [int:int] return itertools.islice(iterable, start, stop, step)
['def', 'iterslice', '(', 'iterable', ',', 'start', '=', '0', ',', 'stop', '=', 'None', ',', 'step', '=', '1', ')', ':', '# type: (Iterable[T], int, int, int) -> Iterable[T]', 'if', 'step', '<', '0', ':', 'raise', 'ValueError', '(', '"The step can not be negative: \'%s\' given"', '%', 'step', ')', 'if', 'not', 'isinstance', '(', 'start', ',', 'int', ')', ':', '# [Callable:Callable]', 'if', 'not', 'isinstance', '(', 'stop', ',', 'int', ')', 'and', 'stop', ':', 'return', 'stops_when', '(', 'starts_when', '(', 'iterable', ',', 'start', ')', ',', 'stop', ')', '# [Callable:int]', 'return', 'starts_when', '(', 'itertools', '.', 'islice', '(', 'iterable', ',', 'None', ',', 'stop', ',', 'step', ')', ',', 'start', ')', '# [int:Callable]', 'if', 'not', 'isinstance', '(', 'stop', ',', 'int', ')', 'and', 'stop', ':', 'return', 'stops_when', '(', 'itertools', '.', 'islice', '(', 'iterable', ',', 'start', ',', 'None', ',', 'step', ')', ',', 'stop', ')', '# [int:int]', 'return', 'itertools', '.', 'islice', '(', 'iterable', ',', 'start', ',', 'stop', ',', 'step', ')']
Like itertools.islice, but accept int and callables. If `start` is a callable, start the slice after the first time start(item) == True. If `stop` is a callable, stop the slice after the first time stop(item) == True.
['Like', 'itertools', '.', 'islice', 'but', 'accept', 'int', 'and', 'callables', '.']
train
https://github.com/Tygs/ww/blob/6a4b85141c9b74026abe8f3fa9bc7021f3c99fd4/src/ww/tools/iterables.py#L265-L293
7,081
materialsproject/pymatgen
pymatgen/analysis/elasticity/elastic.py
find_eq_stress
def find_eq_stress(strains, stresses, tol=1e-10): """ Finds stress corresponding to zero strain state in stress-strain list Args: strains (Nx3x3 array-like): array corresponding to strains stresses (Nx3x3 array-like): array corresponding to stresses tol (float): tolerance to find zero strain state """ stress_array = np.array(stresses) strain_array = np.array(strains) eq_stress = stress_array[np.all(abs(strain_array)<tol, axis=(1,2))] if eq_stress.size != 0: all_same = (abs(eq_stress - eq_stress[0]) < 1e-8).all() if len(eq_stress) > 1 and not all_same: raise ValueError("Multiple stresses found for equilibrium strain" " state, please specify equilibrium stress or " " remove extraneous stresses.") eq_stress = eq_stress[0] else: warnings.warn("No eq state found, returning zero voigt stress") eq_stress = Stress(np.zeros((3, 3))) return eq_stress
python
def find_eq_stress(strains, stresses, tol=1e-10): """ Finds stress corresponding to zero strain state in stress-strain list Args: strains (Nx3x3 array-like): array corresponding to strains stresses (Nx3x3 array-like): array corresponding to stresses tol (float): tolerance to find zero strain state """ stress_array = np.array(stresses) strain_array = np.array(strains) eq_stress = stress_array[np.all(abs(strain_array)<tol, axis=(1,2))] if eq_stress.size != 0: all_same = (abs(eq_stress - eq_stress[0]) < 1e-8).all() if len(eq_stress) > 1 and not all_same: raise ValueError("Multiple stresses found for equilibrium strain" " state, please specify equilibrium stress or " " remove extraneous stresses.") eq_stress = eq_stress[0] else: warnings.warn("No eq state found, returning zero voigt stress") eq_stress = Stress(np.zeros((3, 3))) return eq_stress
['def', 'find_eq_stress', '(', 'strains', ',', 'stresses', ',', 'tol', '=', '1e-10', ')', ':', 'stress_array', '=', 'np', '.', 'array', '(', 'stresses', ')', 'strain_array', '=', 'np', '.', 'array', '(', 'strains', ')', 'eq_stress', '=', 'stress_array', '[', 'np', '.', 'all', '(', 'abs', '(', 'strain_array', ')', '<', 'tol', ',', 'axis', '=', '(', '1', ',', '2', ')', ')', ']', 'if', 'eq_stress', '.', 'size', '!=', '0', ':', 'all_same', '=', '(', 'abs', '(', 'eq_stress', '-', 'eq_stress', '[', '0', ']', ')', '<', '1e-8', ')', '.', 'all', '(', ')', 'if', 'len', '(', 'eq_stress', ')', '>', '1', 'and', 'not', 'all_same', ':', 'raise', 'ValueError', '(', '"Multiple stresses found for equilibrium strain"', '" state, please specify equilibrium stress or "', '" remove extraneous stresses."', ')', 'eq_stress', '=', 'eq_stress', '[', '0', ']', 'else', ':', 'warnings', '.', 'warn', '(', '"No eq state found, returning zero voigt stress"', ')', 'eq_stress', '=', 'Stress', '(', 'np', '.', 'zeros', '(', '(', '3', ',', '3', ')', ')', ')', 'return', 'eq_stress']
Finds stress corresponding to zero strain state in stress-strain list Args: strains (Nx3x3 array-like): array corresponding to strains stresses (Nx3x3 array-like): array corresponding to stresses tol (float): tolerance to find zero strain state
['Finds', 'stress', 'corresponding', 'to', 'zero', 'strain', 'state', 'in', 'stress', '-', 'strain', 'list']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/elasticity/elastic.py#L890-L913
7,082
djgagne/hagelslag
hagelslag/processing/STObject.py
STObject.calc_attribute_statistic
def calc_attribute_statistic(self, attribute, statistic, time): """ Calculate statistics based on the values of an attribute. The following statistics are supported: mean, max, min, std, ptp (range), median, skew (mean - median), and percentile_(percentile value). Args: attribute: Attribute extracted from model grid statistic: Name of statistic being used. time: timestep of the object being investigated Returns: The value of the statistic """ ti = np.where(self.times == time)[0][0] ma = np.where(self.masks[ti].ravel() == 1) if statistic in ['mean', 'max', 'min', 'std', 'ptp']: stat_val = getattr(self.attributes[attribute][ti].ravel()[ma], statistic)() elif statistic == 'median': stat_val = np.median(self.attributes[attribute][ti].ravel()[ma]) elif statistic == "skew": stat_val = np.mean(self.attributes[attribute][ti].ravel()[ma]) - \ np.median(self.attributes[attribute][ti].ravel()[ma]) elif 'percentile' in statistic: per = int(statistic.split("_")[1]) stat_val = np.percentile(self.attributes[attribute][ti].ravel()[ma], per) elif 'dt' in statistic: stat_name = statistic[:-3] if ti == 0: stat_val = 0 else: stat_val = self.calc_attribute_statistic(attribute, stat_name, time) \ - self.calc_attribute_statistic(attribute, stat_name, time - 1) else: stat_val = np.nan return stat_val
python
def calc_attribute_statistic(self, attribute, statistic, time): """ Calculate statistics based on the values of an attribute. The following statistics are supported: mean, max, min, std, ptp (range), median, skew (mean - median), and percentile_(percentile value). Args: attribute: Attribute extracted from model grid statistic: Name of statistic being used. time: timestep of the object being investigated Returns: The value of the statistic """ ti = np.where(self.times == time)[0][0] ma = np.where(self.masks[ti].ravel() == 1) if statistic in ['mean', 'max', 'min', 'std', 'ptp']: stat_val = getattr(self.attributes[attribute][ti].ravel()[ma], statistic)() elif statistic == 'median': stat_val = np.median(self.attributes[attribute][ti].ravel()[ma]) elif statistic == "skew": stat_val = np.mean(self.attributes[attribute][ti].ravel()[ma]) - \ np.median(self.attributes[attribute][ti].ravel()[ma]) elif 'percentile' in statistic: per = int(statistic.split("_")[1]) stat_val = np.percentile(self.attributes[attribute][ti].ravel()[ma], per) elif 'dt' in statistic: stat_name = statistic[:-3] if ti == 0: stat_val = 0 else: stat_val = self.calc_attribute_statistic(attribute, stat_name, time) \ - self.calc_attribute_statistic(attribute, stat_name, time - 1) else: stat_val = np.nan return stat_val
['def', 'calc_attribute_statistic', '(', 'self', ',', 'attribute', ',', 'statistic', ',', 'time', ')', ':', 'ti', '=', 'np', '.', 'where', '(', 'self', '.', 'times', '==', 'time', ')', '[', '0', ']', '[', '0', ']', 'ma', '=', 'np', '.', 'where', '(', 'self', '.', 'masks', '[', 'ti', ']', '.', 'ravel', '(', ')', '==', '1', ')', 'if', 'statistic', 'in', '[', "'mean'", ',', "'max'", ',', "'min'", ',', "'std'", ',', "'ptp'", ']', ':', 'stat_val', '=', 'getattr', '(', 'self', '.', 'attributes', '[', 'attribute', ']', '[', 'ti', ']', '.', 'ravel', '(', ')', '[', 'ma', ']', ',', 'statistic', ')', '(', ')', 'elif', 'statistic', '==', "'median'", ':', 'stat_val', '=', 'np', '.', 'median', '(', 'self', '.', 'attributes', '[', 'attribute', ']', '[', 'ti', ']', '.', 'ravel', '(', ')', '[', 'ma', ']', ')', 'elif', 'statistic', '==', '"skew"', ':', 'stat_val', '=', 'np', '.', 'mean', '(', 'self', '.', 'attributes', '[', 'attribute', ']', '[', 'ti', ']', '.', 'ravel', '(', ')', '[', 'ma', ']', ')', '-', 'np', '.', 'median', '(', 'self', '.', 'attributes', '[', 'attribute', ']', '[', 'ti', ']', '.', 'ravel', '(', ')', '[', 'ma', ']', ')', 'elif', "'percentile'", 'in', 'statistic', ':', 'per', '=', 'int', '(', 'statistic', '.', 'split', '(', '"_"', ')', '[', '1', ']', ')', 'stat_val', '=', 'np', '.', 'percentile', '(', 'self', '.', 'attributes', '[', 'attribute', ']', '[', 'ti', ']', '.', 'ravel', '(', ')', '[', 'ma', ']', ',', 'per', ')', 'elif', "'dt'", 'in', 'statistic', ':', 'stat_name', '=', 'statistic', '[', ':', '-', '3', ']', 'if', 'ti', '==', '0', ':', 'stat_val', '=', '0', 'else', ':', 'stat_val', '=', 'self', '.', 'calc_attribute_statistic', '(', 'attribute', ',', 'stat_name', ',', 'time', ')', '-', 'self', '.', 'calc_attribute_statistic', '(', 'attribute', ',', 'stat_name', ',', 'time', '-', '1', ')', 'else', ':', 'stat_val', '=', 'np', '.', 'nan', 'return', 'stat_val']
Calculate statistics based on the values of an attribute. The following statistics are supported: mean, max, min, std, ptp (range), median, skew (mean - median), and percentile_(percentile value). Args: attribute: Attribute extracted from model grid statistic: Name of statistic being used. time: timestep of the object being investigated Returns: The value of the statistic
['Calculate', 'statistics', 'based', 'on', 'the', 'values', 'of', 'an', 'attribute', '.', 'The', 'following', 'statistics', 'are', 'supported', ':', 'mean', 'max', 'min', 'std', 'ptp', '(', 'range', ')', 'median', 'skew', '(', 'mean', '-', 'median', ')', 'and', 'percentile_', '(', 'percentile', 'value', ')', '.']
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/STObject.py#L385-L419
7,083
hyperledger/sawtooth-core
cli/sawtooth_cli/parent_parsers.py
base_http_parser
def base_http_parser(): """Creates a parser with arguments specific to sending an HTTP request to the REST API. Returns: {ArgumentParser}: Base parser with default HTTP args """ base_parser = ArgumentParser(add_help=False) base_parser.add_argument( '--url', type=str, help="identify the URL of the validator's REST API " "(default: http://localhost:8008)") base_parser.add_argument( '-u', '--user', type=str, metavar='USERNAME[:PASSWORD]', help='specify the user to authorize request') return base_parser
python
def base_http_parser(): """Creates a parser with arguments specific to sending an HTTP request to the REST API. Returns: {ArgumentParser}: Base parser with default HTTP args """ base_parser = ArgumentParser(add_help=False) base_parser.add_argument( '--url', type=str, help="identify the URL of the validator's REST API " "(default: http://localhost:8008)") base_parser.add_argument( '-u', '--user', type=str, metavar='USERNAME[:PASSWORD]', help='specify the user to authorize request') return base_parser
['def', 'base_http_parser', '(', ')', ':', 'base_parser', '=', 'ArgumentParser', '(', 'add_help', '=', 'False', ')', 'base_parser', '.', 'add_argument', '(', "'--url'", ',', 'type', '=', 'str', ',', 'help', '=', '"identify the URL of the validator\'s REST API "', '"(default: http://localhost:8008)"', ')', 'base_parser', '.', 'add_argument', '(', "'-u'", ',', "'--user'", ',', 'type', '=', 'str', ',', 'metavar', '=', "'USERNAME[:PASSWORD]'", ',', 'help', '=', "'specify the user to authorize request'", ')', 'return', 'base_parser']
Creates a parser with arguments specific to sending an HTTP request to the REST API. Returns: {ArgumentParser}: Base parser with default HTTP args
['Creates', 'a', 'parser', 'with', 'arguments', 'specific', 'to', 'sending', 'an', 'HTTP', 'request', 'to', 'the', 'REST', 'API', '.']
train
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/cli/sawtooth_cli/parent_parsers.py#L19-L39
7,084
tjcsl/cslbot
cslbot/helpers/babble.py
build_markov
def build_markov(cursor, cmdchar, ctrlchan, speaker=None, initial_run=False, debug=False): """Builds a markov dictionary.""" if initial_run: cursor.query(Babble_last).delete() lastrow = cursor.query(Babble_last).first() if not lastrow: lastrow = Babble_last(last=0) cursor.add(lastrow) t = time.time() # for debug messages = get_messages(cursor, cmdchar, ctrlchan, speaker, lastrow.last) # FIXME: count can be too low if speaker is not None curr = messages[-1].id if messages else None markov = generate_markov(cursor, 1, messages, initial_run) markov2 = generate_markov(cursor, 2, messages, initial_run) if debug: print('Generated markov in %f' % (time.time() - t)) t = time.time() data, count_data = build_rows(cursor, 1, markov, initial_run) data2, count_data2 = build_rows(cursor, 2, markov2, initial_run) if debug: print('Rows built in %f' % (time.time() - t)) if initial_run: t = time.time() # for debug delete_tables(cursor) if debug: print('Tables deleted in %f' % (time.time() - t)) t = time.time() # for debug if initial_run and cursor.bind.dialect.name == 'postgresql': postgres_hack(cursor, 1, data) postgres_hack(cursor, 2, data2) else: data = [{'source': x[0], 'target': x[1], 'key': x[2], 'word': x[3], 'freq': x[4]} for x in data] cursor.bulk_insert_mappings(Babble, data) data2 = [{'source': x[0], 'target': x[1], 'key': x[2], 'word': x[3], 'freq': x[4]} for x in data2] cursor.bulk_insert_mappings(Babble2, data2) cursor.bulk_insert_mappings(Babble_count, count_data) cursor.bulk_insert_mappings(Babble_count, count_data2) if debug: print('Inserted rows in %f' % (time.time() - t)) if curr is not None: lastrow.last = curr if initial_run: if debug: t = time.time() key_index = Index('ix_babble_key', Babble.key) key_index2 = Index('ix_babble2_key', Babble2.key) key_index.create(cursor.connection()) key_index2.create(cursor.connection()) if debug: print('Created index in %f' % (time.time() - t)) t = time.time() # for debug cursor.commit() if debug: print('Commited in %f' % (time.time() - t))
python
def build_markov(cursor, cmdchar, ctrlchan, speaker=None, initial_run=False, debug=False): """Builds a markov dictionary.""" if initial_run: cursor.query(Babble_last).delete() lastrow = cursor.query(Babble_last).first() if not lastrow: lastrow = Babble_last(last=0) cursor.add(lastrow) t = time.time() # for debug messages = get_messages(cursor, cmdchar, ctrlchan, speaker, lastrow.last) # FIXME: count can be too low if speaker is not None curr = messages[-1].id if messages else None markov = generate_markov(cursor, 1, messages, initial_run) markov2 = generate_markov(cursor, 2, messages, initial_run) if debug: print('Generated markov in %f' % (time.time() - t)) t = time.time() data, count_data = build_rows(cursor, 1, markov, initial_run) data2, count_data2 = build_rows(cursor, 2, markov2, initial_run) if debug: print('Rows built in %f' % (time.time() - t)) if initial_run: t = time.time() # for debug delete_tables(cursor) if debug: print('Tables deleted in %f' % (time.time() - t)) t = time.time() # for debug if initial_run and cursor.bind.dialect.name == 'postgresql': postgres_hack(cursor, 1, data) postgres_hack(cursor, 2, data2) else: data = [{'source': x[0], 'target': x[1], 'key': x[2], 'word': x[3], 'freq': x[4]} for x in data] cursor.bulk_insert_mappings(Babble, data) data2 = [{'source': x[0], 'target': x[1], 'key': x[2], 'word': x[3], 'freq': x[4]} for x in data2] cursor.bulk_insert_mappings(Babble2, data2) cursor.bulk_insert_mappings(Babble_count, count_data) cursor.bulk_insert_mappings(Babble_count, count_data2) if debug: print('Inserted rows in %f' % (time.time() - t)) if curr is not None: lastrow.last = curr if initial_run: if debug: t = time.time() key_index = Index('ix_babble_key', Babble.key) key_index2 = Index('ix_babble2_key', Babble2.key) key_index.create(cursor.connection()) key_index2.create(cursor.connection()) if debug: print('Created index in %f' % (time.time() - t)) t = time.time() # for debug cursor.commit() if debug: print('Commited in %f' % (time.time() - t))
['def', 'build_markov', '(', 'cursor', ',', 'cmdchar', ',', 'ctrlchan', ',', 'speaker', '=', 'None', ',', 'initial_run', '=', 'False', ',', 'debug', '=', 'False', ')', ':', 'if', 'initial_run', ':', 'cursor', '.', 'query', '(', 'Babble_last', ')', '.', 'delete', '(', ')', 'lastrow', '=', 'cursor', '.', 'query', '(', 'Babble_last', ')', '.', 'first', '(', ')', 'if', 'not', 'lastrow', ':', 'lastrow', '=', 'Babble_last', '(', 'last', '=', '0', ')', 'cursor', '.', 'add', '(', 'lastrow', ')', 't', '=', 'time', '.', 'time', '(', ')', '# for debug', 'messages', '=', 'get_messages', '(', 'cursor', ',', 'cmdchar', ',', 'ctrlchan', ',', 'speaker', ',', 'lastrow', '.', 'last', ')', '# FIXME: count can be too low if speaker is not None', 'curr', '=', 'messages', '[', '-', '1', ']', '.', 'id', 'if', 'messages', 'else', 'None', 'markov', '=', 'generate_markov', '(', 'cursor', ',', '1', ',', 'messages', ',', 'initial_run', ')', 'markov2', '=', 'generate_markov', '(', 'cursor', ',', '2', ',', 'messages', ',', 'initial_run', ')', 'if', 'debug', ':', 'print', '(', "'Generated markov in %f'", '%', '(', 'time', '.', 'time', '(', ')', '-', 't', ')', ')', 't', '=', 'time', '.', 'time', '(', ')', 'data', ',', 'count_data', '=', 'build_rows', '(', 'cursor', ',', '1', ',', 'markov', ',', 'initial_run', ')', 'data2', ',', 'count_data2', '=', 'build_rows', '(', 'cursor', ',', '2', ',', 'markov2', ',', 'initial_run', ')', 'if', 'debug', ':', 'print', '(', "'Rows built in %f'", '%', '(', 'time', '.', 'time', '(', ')', '-', 't', ')', ')', 'if', 'initial_run', ':', 't', '=', 'time', '.', 'time', '(', ')', '# for debug', 'delete_tables', '(', 'cursor', ')', 'if', 'debug', ':', 'print', '(', "'Tables deleted in %f'", '%', '(', 'time', '.', 'time', '(', ')', '-', 't', ')', ')', 't', '=', 'time', '.', 'time', '(', ')', '# for debug', 'if', 'initial_run', 'and', 'cursor', '.', 'bind', '.', 'dialect', '.', 'name', '==', "'postgresql'", ':', 'postgres_hack', '(', 'cursor', ',', '1', ',', 'data', ')', 'postgres_hack', '(', 'cursor', ',', '2', ',', 'data2', ')', 'else', ':', 'data', '=', '[', '{', "'source'", ':', 'x', '[', '0', ']', ',', "'target'", ':', 'x', '[', '1', ']', ',', "'key'", ':', 'x', '[', '2', ']', ',', "'word'", ':', 'x', '[', '3', ']', ',', "'freq'", ':', 'x', '[', '4', ']', '}', 'for', 'x', 'in', 'data', ']', 'cursor', '.', 'bulk_insert_mappings', '(', 'Babble', ',', 'data', ')', 'data2', '=', '[', '{', "'source'", ':', 'x', '[', '0', ']', ',', "'target'", ':', 'x', '[', '1', ']', ',', "'key'", ':', 'x', '[', '2', ']', ',', "'word'", ':', 'x', '[', '3', ']', ',', "'freq'", ':', 'x', '[', '4', ']', '}', 'for', 'x', 'in', 'data2', ']', 'cursor', '.', 'bulk_insert_mappings', '(', 'Babble2', ',', 'data2', ')', 'cursor', '.', 'bulk_insert_mappings', '(', 'Babble_count', ',', 'count_data', ')', 'cursor', '.', 'bulk_insert_mappings', '(', 'Babble_count', ',', 'count_data2', ')', 'if', 'debug', ':', 'print', '(', "'Inserted rows in %f'", '%', '(', 'time', '.', 'time', '(', ')', '-', 't', ')', ')', 'if', 'curr', 'is', 'not', 'None', ':', 'lastrow', '.', 'last', '=', 'curr', 'if', 'initial_run', ':', 'if', 'debug', ':', 't', '=', 'time', '.', 'time', '(', ')', 'key_index', '=', 'Index', '(', "'ix_babble_key'", ',', 'Babble', '.', 'key', ')', 'key_index2', '=', 'Index', '(', "'ix_babble2_key'", ',', 'Babble2', '.', 'key', ')', 'key_index', '.', 'create', '(', 'cursor', '.', 'connection', '(', ')', ')', 'key_index2', '.', 'create', '(', 'cursor', '.', 'connection', '(', ')', ')', 'if', 'debug', ':', 'print', '(', "'Created index in %f'", '%', '(', 'time', '.', 'time', '(', ')', '-', 't', ')', ')', 't', '=', 'time', '.', 'time', '(', ')', '# for debug', 'cursor', '.', 'commit', '(', ')', 'if', 'debug', ':', 'print', '(', "'Commited in %f'", '%', '(', 'time', '.', 'time', '(', ')', '-', 't', ')', ')']
Builds a markov dictionary.
['Builds', 'a', 'markov', 'dictionary', '.']
train
https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/helpers/babble.py#L153-L206
7,085
vnmabus/dcor
dcor/_pairwise.py
pairwise
def pairwise(function, x, y=None, **kwargs): """ pairwise(function, x, y=None, *, pool=None, is_symmetric=None, **kwargs) Computes a dependency measure between each pair of elements. Parameters ---------- function: Dependency measure function. x: iterable of array_like First list of random vectors. The columns of each vector correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second list of random vectors. The columns of each vector correspond with the individual random variables while the rows are individual instances of the random vector. If None, the :math:`x` array is used. pool: object implementing multiprocessing.Pool interface Pool of processes/threads used to delegate computations. is_symmetric: bool or None If True, the dependency function is assumed to be symmetric. If False, it is assumed non-symmetric. If None (the default value), the attribute :code:`is_symmetric` of the function object is inspected to determine if the function is symmetric. If this attribute is absent, the function is assumed to not be symmetric. kwargs: dictionary Additional options necessary. Returns ------- numpy ndarray A :math:`n \times m` matrix where the :math:`(i, j)`-th entry is the dependency between :math:`x[i]` and :math:`y[j]`. Examples -------- >>> import numpy as np >>> import dcor >>> a = [np.array([[1, 1], ... [2, 4], ... [3, 8], ... [4, 16]]), ... np.array([[9, 10], ... [11, 12], ... [13, 14], ... [15, 16]]) ... ] >>> b = [np.array([[0, 1], ... [3, 1], ... [6, 2], ... [9, 3]]), ... np.array([[5, 1], ... [8, 1], ... [13, 1], ... [21, 1]]) ... ] >>> dcor.pairwise(dcor.distance_covariance, a) array([[4.61229635, 3.35991482], [3.35991482, 2.54950976]]) >>> dcor.pairwise(dcor.distance_correlation, a, b) array([[0.98182263, 0.99901855], [0.99989466, 0.98320103]]) A pool object can be used to improve performance for a large number of computations: >>> import multiprocessing >>> pool = multiprocessing.Pool() >>> dcor.pairwise(dcor.distance_correlation, a, b, pool=pool) array([[0.98182263, 0.99901855], [0.99989466, 0.98320103]]) It is possible to force to consider that the function is symmetric or not (useful only if :math:`y` is :code:`None`): >>> dcor.pairwise(dcor.distance_covariance, a, is_symmetric=True) array([[4.61229635, 3.35991482], [3.35991482, 2.54950976]]) >>> dcor.pairwise(dcor.distance_covariance, a, is_symmetric=False) array([[4.61229635, 3.35991482], [3.35991482, 2.54950976]]) """ return _pairwise_imp(function, x, y, **kwargs)
python
def pairwise(function, x, y=None, **kwargs): """ pairwise(function, x, y=None, *, pool=None, is_symmetric=None, **kwargs) Computes a dependency measure between each pair of elements. Parameters ---------- function: Dependency measure function. x: iterable of array_like First list of random vectors. The columns of each vector correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second list of random vectors. The columns of each vector correspond with the individual random variables while the rows are individual instances of the random vector. If None, the :math:`x` array is used. pool: object implementing multiprocessing.Pool interface Pool of processes/threads used to delegate computations. is_symmetric: bool or None If True, the dependency function is assumed to be symmetric. If False, it is assumed non-symmetric. If None (the default value), the attribute :code:`is_symmetric` of the function object is inspected to determine if the function is symmetric. If this attribute is absent, the function is assumed to not be symmetric. kwargs: dictionary Additional options necessary. Returns ------- numpy ndarray A :math:`n \times m` matrix where the :math:`(i, j)`-th entry is the dependency between :math:`x[i]` and :math:`y[j]`. Examples -------- >>> import numpy as np >>> import dcor >>> a = [np.array([[1, 1], ... [2, 4], ... [3, 8], ... [4, 16]]), ... np.array([[9, 10], ... [11, 12], ... [13, 14], ... [15, 16]]) ... ] >>> b = [np.array([[0, 1], ... [3, 1], ... [6, 2], ... [9, 3]]), ... np.array([[5, 1], ... [8, 1], ... [13, 1], ... [21, 1]]) ... ] >>> dcor.pairwise(dcor.distance_covariance, a) array([[4.61229635, 3.35991482], [3.35991482, 2.54950976]]) >>> dcor.pairwise(dcor.distance_correlation, a, b) array([[0.98182263, 0.99901855], [0.99989466, 0.98320103]]) A pool object can be used to improve performance for a large number of computations: >>> import multiprocessing >>> pool = multiprocessing.Pool() >>> dcor.pairwise(dcor.distance_correlation, a, b, pool=pool) array([[0.98182263, 0.99901855], [0.99989466, 0.98320103]]) It is possible to force to consider that the function is symmetric or not (useful only if :math:`y` is :code:`None`): >>> dcor.pairwise(dcor.distance_covariance, a, is_symmetric=True) array([[4.61229635, 3.35991482], [3.35991482, 2.54950976]]) >>> dcor.pairwise(dcor.distance_covariance, a, is_symmetric=False) array([[4.61229635, 3.35991482], [3.35991482, 2.54950976]]) """ return _pairwise_imp(function, x, y, **kwargs)
['def', 'pairwise', '(', 'function', ',', 'x', ',', 'y', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'return', '_pairwise_imp', '(', 'function', ',', 'x', ',', 'y', ',', '*', '*', 'kwargs', ')']
pairwise(function, x, y=None, *, pool=None, is_symmetric=None, **kwargs) Computes a dependency measure between each pair of elements. Parameters ---------- function: Dependency measure function. x: iterable of array_like First list of random vectors. The columns of each vector correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second list of random vectors. The columns of each vector correspond with the individual random variables while the rows are individual instances of the random vector. If None, the :math:`x` array is used. pool: object implementing multiprocessing.Pool interface Pool of processes/threads used to delegate computations. is_symmetric: bool or None If True, the dependency function is assumed to be symmetric. If False, it is assumed non-symmetric. If None (the default value), the attribute :code:`is_symmetric` of the function object is inspected to determine if the function is symmetric. If this attribute is absent, the function is assumed to not be symmetric. kwargs: dictionary Additional options necessary. Returns ------- numpy ndarray A :math:`n \times m` matrix where the :math:`(i, j)`-th entry is the dependency between :math:`x[i]` and :math:`y[j]`. Examples -------- >>> import numpy as np >>> import dcor >>> a = [np.array([[1, 1], ... [2, 4], ... [3, 8], ... [4, 16]]), ... np.array([[9, 10], ... [11, 12], ... [13, 14], ... [15, 16]]) ... ] >>> b = [np.array([[0, 1], ... [3, 1], ... [6, 2], ... [9, 3]]), ... np.array([[5, 1], ... [8, 1], ... [13, 1], ... [21, 1]]) ... ] >>> dcor.pairwise(dcor.distance_covariance, a) array([[4.61229635, 3.35991482], [3.35991482, 2.54950976]]) >>> dcor.pairwise(dcor.distance_correlation, a, b) array([[0.98182263, 0.99901855], [0.99989466, 0.98320103]]) A pool object can be used to improve performance for a large number of computations: >>> import multiprocessing >>> pool = multiprocessing.Pool() >>> dcor.pairwise(dcor.distance_correlation, a, b, pool=pool) array([[0.98182263, 0.99901855], [0.99989466, 0.98320103]]) It is possible to force to consider that the function is symmetric or not (useful only if :math:`y` is :code:`None`): >>> dcor.pairwise(dcor.distance_covariance, a, is_symmetric=True) array([[4.61229635, 3.35991482], [3.35991482, 2.54950976]]) >>> dcor.pairwise(dcor.distance_covariance, a, is_symmetric=False) array([[4.61229635, 3.35991482], [3.35991482, 2.54950976]])
['pairwise', '(', 'function', 'x', 'y', '=', 'None', '*', 'pool', '=', 'None', 'is_symmetric', '=', 'None', '**', 'kwargs', ')']
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_pairwise.py#L10-L94
7,086
collectiveacuity/labPack
labpack/records/time.py
labDT.fromISO
def fromISO(cls, iso_string): ''' a method for constructing a labDT object from a timezone aware ISO string :param iso_string: string with date and time info in ISO format :return: labDT object ''' # validate input title = 'ISO time input for labDT.fromISO' isopattern = re.compile('\d{4}-?\d{2}-?\d{2}[\s|T].*') if not isopattern.search(iso_string): raise ValueError('\n%s is not a valid ISO string.' % title) python_datetime = dTparser.parse(iso_string) if not python_datetime.tzinfo: raise ValueError('\n%s must have timezone info.' % title) # construct labDT from parsed string dT = python_datetime.astimezone(pytz.utc) dt_kwargs = { 'year': dT.year, 'month': dT.month, 'day': dT.day, 'hour': dT.hour, 'minute': dT.minute, 'second': dT.second, 'microsecond': dT.microsecond, 'tzinfo': dT.tzinfo } return labDT(**dt_kwargs)
python
def fromISO(cls, iso_string): ''' a method for constructing a labDT object from a timezone aware ISO string :param iso_string: string with date and time info in ISO format :return: labDT object ''' # validate input title = 'ISO time input for labDT.fromISO' isopattern = re.compile('\d{4}-?\d{2}-?\d{2}[\s|T].*') if not isopattern.search(iso_string): raise ValueError('\n%s is not a valid ISO string.' % title) python_datetime = dTparser.parse(iso_string) if not python_datetime.tzinfo: raise ValueError('\n%s must have timezone info.' % title) # construct labDT from parsed string dT = python_datetime.astimezone(pytz.utc) dt_kwargs = { 'year': dT.year, 'month': dT.month, 'day': dT.day, 'hour': dT.hour, 'minute': dT.minute, 'second': dT.second, 'microsecond': dT.microsecond, 'tzinfo': dT.tzinfo } return labDT(**dt_kwargs)
['def', 'fromISO', '(', 'cls', ',', 'iso_string', ')', ':', '# validate input\r', 'title', '=', "'ISO time input for labDT.fromISO'", 'isopattern', '=', 're', '.', 'compile', '(', "'\\d{4}-?\\d{2}-?\\d{2}[\\s|T].*'", ')', 'if', 'not', 'isopattern', '.', 'search', '(', 'iso_string', ')', ':', 'raise', 'ValueError', '(', "'\\n%s is not a valid ISO string.'", '%', 'title', ')', 'python_datetime', '=', 'dTparser', '.', 'parse', '(', 'iso_string', ')', 'if', 'not', 'python_datetime', '.', 'tzinfo', ':', 'raise', 'ValueError', '(', "'\\n%s must have timezone info.'", '%', 'title', ')', '# construct labDT from parsed string\r', 'dT', '=', 'python_datetime', '.', 'astimezone', '(', 'pytz', '.', 'utc', ')', 'dt_kwargs', '=', '{', "'year'", ':', 'dT', '.', 'year', ',', "'month'", ':', 'dT', '.', 'month', ',', "'day'", ':', 'dT', '.', 'day', ',', "'hour'", ':', 'dT', '.', 'hour', ',', "'minute'", ':', 'dT', '.', 'minute', ',', "'second'", ':', 'dT', '.', 'second', ',', "'microsecond'", ':', 'dT', '.', 'microsecond', ',', "'tzinfo'", ':', 'dT', '.', 'tzinfo', '}', 'return', 'labDT', '(', '*', '*', 'dt_kwargs', ')']
a method for constructing a labDT object from a timezone aware ISO string :param iso_string: string with date and time info in ISO format :return: labDT object
['a', 'method', 'for', 'constructing', 'a', 'labDT', 'object', 'from', 'a', 'timezone', 'aware', 'ISO', 'string', ':', 'param', 'iso_string', ':', 'string', 'with', 'date', 'and', 'time', 'info', 'in', 'ISO', 'format', ':', 'return', ':', 'labDT', 'object']
train
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/records/time.py#L209-L238
7,087
Microsoft/nni
src/sdk/pynni/nni/networkmorphism_tuner/layers.py
layer_description_extractor
def layer_description_extractor(layer, node_to_id): '''get layer description. ''' layer_input = layer.input layer_output = layer.output if layer_input is not None: if isinstance(layer_input, Iterable): layer_input = list(map(lambda x: node_to_id[x], layer_input)) else: layer_input = node_to_id[layer_input] if layer_output is not None: layer_output = node_to_id[layer_output] if isinstance(layer, StubConv): return ( type(layer).__name__, layer_input, layer_output, layer.input_channel, layer.filters, layer.kernel_size, layer.stride, layer.padding, ) elif isinstance(layer, (StubDense,)): return [ type(layer).__name__, layer_input, layer_output, layer.input_units, layer.units, ] elif isinstance(layer, (StubBatchNormalization,)): return (type(layer).__name__, layer_input, layer_output, layer.num_features) elif isinstance(layer, (StubDropout,)): return (type(layer).__name__, layer_input, layer_output, layer.rate) elif isinstance(layer, StubPooling): return ( type(layer).__name__, layer_input, layer_output, layer.kernel_size, layer.stride, layer.padding, ) else: return (type(layer).__name__, layer_input, layer_output)
python
def layer_description_extractor(layer, node_to_id): '''get layer description. ''' layer_input = layer.input layer_output = layer.output if layer_input is not None: if isinstance(layer_input, Iterable): layer_input = list(map(lambda x: node_to_id[x], layer_input)) else: layer_input = node_to_id[layer_input] if layer_output is not None: layer_output = node_to_id[layer_output] if isinstance(layer, StubConv): return ( type(layer).__name__, layer_input, layer_output, layer.input_channel, layer.filters, layer.kernel_size, layer.stride, layer.padding, ) elif isinstance(layer, (StubDense,)): return [ type(layer).__name__, layer_input, layer_output, layer.input_units, layer.units, ] elif isinstance(layer, (StubBatchNormalization,)): return (type(layer).__name__, layer_input, layer_output, layer.num_features) elif isinstance(layer, (StubDropout,)): return (type(layer).__name__, layer_input, layer_output, layer.rate) elif isinstance(layer, StubPooling): return ( type(layer).__name__, layer_input, layer_output, layer.kernel_size, layer.stride, layer.padding, ) else: return (type(layer).__name__, layer_input, layer_output)
['def', 'layer_description_extractor', '(', 'layer', ',', 'node_to_id', ')', ':', 'layer_input', '=', 'layer', '.', 'input', 'layer_output', '=', 'layer', '.', 'output', 'if', 'layer_input', 'is', 'not', 'None', ':', 'if', 'isinstance', '(', 'layer_input', ',', 'Iterable', ')', ':', 'layer_input', '=', 'list', '(', 'map', '(', 'lambda', 'x', ':', 'node_to_id', '[', 'x', ']', ',', 'layer_input', ')', ')', 'else', ':', 'layer_input', '=', 'node_to_id', '[', 'layer_input', ']', 'if', 'layer_output', 'is', 'not', 'None', ':', 'layer_output', '=', 'node_to_id', '[', 'layer_output', ']', 'if', 'isinstance', '(', 'layer', ',', 'StubConv', ')', ':', 'return', '(', 'type', '(', 'layer', ')', '.', '__name__', ',', 'layer_input', ',', 'layer_output', ',', 'layer', '.', 'input_channel', ',', 'layer', '.', 'filters', ',', 'layer', '.', 'kernel_size', ',', 'layer', '.', 'stride', ',', 'layer', '.', 'padding', ',', ')', 'elif', 'isinstance', '(', 'layer', ',', '(', 'StubDense', ',', ')', ')', ':', 'return', '[', 'type', '(', 'layer', ')', '.', '__name__', ',', 'layer_input', ',', 'layer_output', ',', 'layer', '.', 'input_units', ',', 'layer', '.', 'units', ',', ']', 'elif', 'isinstance', '(', 'layer', ',', '(', 'StubBatchNormalization', ',', ')', ')', ':', 'return', '(', 'type', '(', 'layer', ')', '.', '__name__', ',', 'layer_input', ',', 'layer_output', ',', 'layer', '.', 'num_features', ')', 'elif', 'isinstance', '(', 'layer', ',', '(', 'StubDropout', ',', ')', ')', ':', 'return', '(', 'type', '(', 'layer', ')', '.', '__name__', ',', 'layer_input', ',', 'layer_output', ',', 'layer', '.', 'rate', ')', 'elif', 'isinstance', '(', 'layer', ',', 'StubPooling', ')', ':', 'return', '(', 'type', '(', 'layer', ')', '.', '__name__', ',', 'layer_input', ',', 'layer_output', ',', 'layer', '.', 'kernel_size', ',', 'layer', '.', 'stride', ',', 'layer', '.', 'padding', ',', ')', 'else', ':', 'return', '(', 'type', '(', 'layer', ')', '.', '__name__', ',', 'layer_input', ',', 'layer_output', ')']
get layer description.
['get', 'layer', 'description', '.']
train
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/networkmorphism_tuner/layers.py#L613-L661
7,088
InfoAgeTech/django-core
django_core/auth/views.py
AuthorizationTokenRequiredViewMixin.get_authorization
def get_authorization(self, **kwargs): """Gets the authorization object for the view.""" if self.authorization is not None: return self.authorization auth_class = self.get_authorization_class() auth_user = self.get_authorization_user() auth_kwargs = { 'token': self.get_authorization_token(**kwargs) } if auth_user and auth_user.is_authenticated(): auth_kwargs['created_user'] = self.get_authorization_user() self.authorization = auth_class.objects.get_by_token_or_404( **auth_kwargs ) return self.authorization
python
def get_authorization(self, **kwargs): """Gets the authorization object for the view.""" if self.authorization is not None: return self.authorization auth_class = self.get_authorization_class() auth_user = self.get_authorization_user() auth_kwargs = { 'token': self.get_authorization_token(**kwargs) } if auth_user and auth_user.is_authenticated(): auth_kwargs['created_user'] = self.get_authorization_user() self.authorization = auth_class.objects.get_by_token_or_404( **auth_kwargs ) return self.authorization
['def', 'get_authorization', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'if', 'self', '.', 'authorization', 'is', 'not', 'None', ':', 'return', 'self', '.', 'authorization', 'auth_class', '=', 'self', '.', 'get_authorization_class', '(', ')', 'auth_user', '=', 'self', '.', 'get_authorization_user', '(', ')', 'auth_kwargs', '=', '{', "'token'", ':', 'self', '.', 'get_authorization_token', '(', '*', '*', 'kwargs', ')', '}', 'if', 'auth_user', 'and', 'auth_user', '.', 'is_authenticated', '(', ')', ':', 'auth_kwargs', '[', "'created_user'", ']', '=', 'self', '.', 'get_authorization_user', '(', ')', 'self', '.', 'authorization', '=', 'auth_class', '.', 'objects', '.', 'get_by_token_or_404', '(', '*', '*', 'auth_kwargs', ')', 'return', 'self', '.', 'authorization']
Gets the authorization object for the view.
['Gets', 'the', 'authorization', 'object', 'for', 'the', 'view', '.']
train
https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/auth/views.py#L31-L48
7,089
wandb/client
wandb/apis/file_stream.py
FileStreamApi._handle_response
def _handle_response(self, response): """Logs dropped chunks and updates dynamic settings""" if isinstance(response, Exception): logging.error("dropped chunk %s" % response) elif response.json().get("limits"): parsed = response.json() self._api.dynamic_settings.update(parsed["limits"])
python
def _handle_response(self, response): """Logs dropped chunks and updates dynamic settings""" if isinstance(response, Exception): logging.error("dropped chunk %s" % response) elif response.json().get("limits"): parsed = response.json() self._api.dynamic_settings.update(parsed["limits"])
['def', '_handle_response', '(', 'self', ',', 'response', ')', ':', 'if', 'isinstance', '(', 'response', ',', 'Exception', ')', ':', 'logging', '.', 'error', '(', '"dropped chunk %s"', '%', 'response', ')', 'elif', 'response', '.', 'json', '(', ')', '.', 'get', '(', '"limits"', ')', ':', 'parsed', '=', 'response', '.', 'json', '(', ')', 'self', '.', '_api', '.', 'dynamic_settings', '.', 'update', '(', 'parsed', '[', '"limits"', ']', ')']
Logs dropped chunks and updates dynamic settings
['Logs', 'dropped', 'chunks', 'and', 'updates', 'dynamic', 'settings']
train
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/file_stream.py#L179-L185
7,090
tilezen/tilequeue
tilequeue/rawr.py
SqsQueue.done
def done(self, msg_handle): """acknowledge completion of message""" self.sqs_client.delete_message( QueueUrl=self.queue_url, ReceiptHandle=msg_handle.handle, )
python
def done(self, msg_handle): """acknowledge completion of message""" self.sqs_client.delete_message( QueueUrl=self.queue_url, ReceiptHandle=msg_handle.handle, )
['def', 'done', '(', 'self', ',', 'msg_handle', ')', ':', 'self', '.', 'sqs_client', '.', 'delete_message', '(', 'QueueUrl', '=', 'self', '.', 'queue_url', ',', 'ReceiptHandle', '=', 'msg_handle', '.', 'handle', ',', ')']
acknowledge completion of message
['acknowledge', 'completion', 'of', 'message']
train
https://github.com/tilezen/tilequeue/blob/d7b9484ab92e246eb2773949c784ebb37c731e28/tilequeue/rawr.py#L129-L134
7,091
scarface-4711/denonavr
denonavr/denonavr.py
DenonAVR._update_avr
def _update_avr(self): """ Get the latest status information from device. Method queries device via HTTP and updates instance attributes. Returns "True" on success and "False" on fail. This method is for pre 2016 AVR(-X) devices """ # Set all tags to be evaluated relevant_tags = {"Power": None, "InputFuncSelect": None, "Mute": None, "MasterVolume": None} # Sound mode information only available in main zone if self._zone == "Main" and self._support_sound_mode: relevant_tags["selectSurround"] = None relevant_tags["SurrMode"] = None # Get status XML from Denon receiver via HTTP try: root = self.get_status_xml(self._urls.status) except ValueError: pass except requests.exceptions.RequestException: # On timeout and connection error, the device is probably off self._power = POWER_OFF else: # Get the tags from this XML relevant_tags = self._get_status_from_xml_tags(root, relevant_tags) # Second option to update variables from different source if relevant_tags and self._power != POWER_OFF: try: root = self.get_status_xml(self._urls.mainzone) except (ValueError, requests.exceptions.RequestException): pass else: # Get the tags from this XML relevant_tags = self._get_status_from_xml_tags(root, relevant_tags) # Error message if still some variables are not updated yet if relevant_tags and self._power != POWER_OFF: _LOGGER.error("Missing status information from XML of %s for: %s", self._zone, ", ".join(relevant_tags.keys())) # Set state and media image URL based on current source # and power status if (self._power == POWER_ON) and ( self._input_func in self._playing_func_list): if self._update_media_data(): pass else: _LOGGER.error( "Update of media data for source %s in %s failed", self._input_func, self._zone) elif self._power == POWER_ON: self._state = STATE_ON self._title = None self._artist = None self._album = None self._band = None self._frequency = None self._station = None self._image_url = None else: self._state = STATE_OFF self._title = None self._artist = None self._album = None self._band = None self._frequency = None self._station = None # Get/update sources list if current source is not known yet if (self._input_func not in self._input_func_list and self._input_func is not None): if self._update_input_func_list(): _LOGGER.info("List of input functions refreshed.") # If input function is still not known, create new entry. if (self._input_func not in self._input_func_list and self._input_func is not None): inputfunc = self._input_func self._input_func_list_rev[inputfunc] = inputfunc self._input_func_list[inputfunc] = inputfunc else: _LOGGER.error(( "Input function list for Denon receiver at host %s " "could not be updated."), self._host) # Finished return True
python
def _update_avr(self): """ Get the latest status information from device. Method queries device via HTTP and updates instance attributes. Returns "True" on success and "False" on fail. This method is for pre 2016 AVR(-X) devices """ # Set all tags to be evaluated relevant_tags = {"Power": None, "InputFuncSelect": None, "Mute": None, "MasterVolume": None} # Sound mode information only available in main zone if self._zone == "Main" and self._support_sound_mode: relevant_tags["selectSurround"] = None relevant_tags["SurrMode"] = None # Get status XML from Denon receiver via HTTP try: root = self.get_status_xml(self._urls.status) except ValueError: pass except requests.exceptions.RequestException: # On timeout and connection error, the device is probably off self._power = POWER_OFF else: # Get the tags from this XML relevant_tags = self._get_status_from_xml_tags(root, relevant_tags) # Second option to update variables from different source if relevant_tags and self._power != POWER_OFF: try: root = self.get_status_xml(self._urls.mainzone) except (ValueError, requests.exceptions.RequestException): pass else: # Get the tags from this XML relevant_tags = self._get_status_from_xml_tags(root, relevant_tags) # Error message if still some variables are not updated yet if relevant_tags and self._power != POWER_OFF: _LOGGER.error("Missing status information from XML of %s for: %s", self._zone, ", ".join(relevant_tags.keys())) # Set state and media image URL based on current source # and power status if (self._power == POWER_ON) and ( self._input_func in self._playing_func_list): if self._update_media_data(): pass else: _LOGGER.error( "Update of media data for source %s in %s failed", self._input_func, self._zone) elif self._power == POWER_ON: self._state = STATE_ON self._title = None self._artist = None self._album = None self._band = None self._frequency = None self._station = None self._image_url = None else: self._state = STATE_OFF self._title = None self._artist = None self._album = None self._band = None self._frequency = None self._station = None # Get/update sources list if current source is not known yet if (self._input_func not in self._input_func_list and self._input_func is not None): if self._update_input_func_list(): _LOGGER.info("List of input functions refreshed.") # If input function is still not known, create new entry. if (self._input_func not in self._input_func_list and self._input_func is not None): inputfunc = self._input_func self._input_func_list_rev[inputfunc] = inputfunc self._input_func_list[inputfunc] = inputfunc else: _LOGGER.error(( "Input function list for Denon receiver at host %s " "could not be updated."), self._host) # Finished return True
['def', '_update_avr', '(', 'self', ')', ':', '# Set all tags to be evaluated', 'relevant_tags', '=', '{', '"Power"', ':', 'None', ',', '"InputFuncSelect"', ':', 'None', ',', '"Mute"', ':', 'None', ',', '"MasterVolume"', ':', 'None', '}', '# Sound mode information only available in main zone', 'if', 'self', '.', '_zone', '==', '"Main"', 'and', 'self', '.', '_support_sound_mode', ':', 'relevant_tags', '[', '"selectSurround"', ']', '=', 'None', 'relevant_tags', '[', '"SurrMode"', ']', '=', 'None', '# Get status XML from Denon receiver via HTTP', 'try', ':', 'root', '=', 'self', '.', 'get_status_xml', '(', 'self', '.', '_urls', '.', 'status', ')', 'except', 'ValueError', ':', 'pass', 'except', 'requests', '.', 'exceptions', '.', 'RequestException', ':', '# On timeout and connection error, the device is probably off', 'self', '.', '_power', '=', 'POWER_OFF', 'else', ':', '# Get the tags from this XML', 'relevant_tags', '=', 'self', '.', '_get_status_from_xml_tags', '(', 'root', ',', 'relevant_tags', ')', '# Second option to update variables from different source', 'if', 'relevant_tags', 'and', 'self', '.', '_power', '!=', 'POWER_OFF', ':', 'try', ':', 'root', '=', 'self', '.', 'get_status_xml', '(', 'self', '.', '_urls', '.', 'mainzone', ')', 'except', '(', 'ValueError', ',', 'requests', '.', 'exceptions', '.', 'RequestException', ')', ':', 'pass', 'else', ':', '# Get the tags from this XML', 'relevant_tags', '=', 'self', '.', '_get_status_from_xml_tags', '(', 'root', ',', 'relevant_tags', ')', '# Error message if still some variables are not updated yet', 'if', 'relevant_tags', 'and', 'self', '.', '_power', '!=', 'POWER_OFF', ':', '_LOGGER', '.', 'error', '(', '"Missing status information from XML of %s for: %s"', ',', 'self', '.', '_zone', ',', '", "', '.', 'join', '(', 'relevant_tags', '.', 'keys', '(', ')', ')', ')', '# Set state and media image URL based on current source', '# and power status', 'if', '(', 'self', '.', '_power', '==', 'POWER_ON', ')', 'and', '(', 'self', '.', '_input_func', 'in', 'self', '.', '_playing_func_list', ')', ':', 'if', 'self', '.', '_update_media_data', '(', ')', ':', 'pass', 'else', ':', '_LOGGER', '.', 'error', '(', '"Update of media data for source %s in %s failed"', ',', 'self', '.', '_input_func', ',', 'self', '.', '_zone', ')', 'elif', 'self', '.', '_power', '==', 'POWER_ON', ':', 'self', '.', '_state', '=', 'STATE_ON', 'self', '.', '_title', '=', 'None', 'self', '.', '_artist', '=', 'None', 'self', '.', '_album', '=', 'None', 'self', '.', '_band', '=', 'None', 'self', '.', '_frequency', '=', 'None', 'self', '.', '_station', '=', 'None', 'self', '.', '_image_url', '=', 'None', 'else', ':', 'self', '.', '_state', '=', 'STATE_OFF', 'self', '.', '_title', '=', 'None', 'self', '.', '_artist', '=', 'None', 'self', '.', '_album', '=', 'None', 'self', '.', '_band', '=', 'None', 'self', '.', '_frequency', '=', 'None', 'self', '.', '_station', '=', 'None', '# Get/update sources list if current source is not known yet', 'if', '(', 'self', '.', '_input_func', 'not', 'in', 'self', '.', '_input_func_list', 'and', 'self', '.', '_input_func', 'is', 'not', 'None', ')', ':', 'if', 'self', '.', '_update_input_func_list', '(', ')', ':', '_LOGGER', '.', 'info', '(', '"List of input functions refreshed."', ')', '# If input function is still not known, create new entry.', 'if', '(', 'self', '.', '_input_func', 'not', 'in', 'self', '.', '_input_func_list', 'and', 'self', '.', '_input_func', 'is', 'not', 'None', ')', ':', 'inputfunc', '=', 'self', '.', '_input_func', 'self', '.', '_input_func_list_rev', '[', 'inputfunc', ']', '=', 'inputfunc', 'self', '.', '_input_func_list', '[', 'inputfunc', ']', '=', 'inputfunc', 'else', ':', '_LOGGER', '.', 'error', '(', '(', '"Input function list for Denon receiver at host %s "', '"could not be updated."', ')', ',', 'self', '.', '_host', ')', '# Finished', 'return', 'True']
Get the latest status information from device. Method queries device via HTTP and updates instance attributes. Returns "True" on success and "False" on fail. This method is for pre 2016 AVR(-X) devices
['Get', 'the', 'latest', 'status', 'information', 'from', 'device', '.']
train
https://github.com/scarface-4711/denonavr/blob/59a136e27b43cb1d1e140cf67705087b3aa377cd/denonavr/denonavr.py#L410-L501
7,092
miquelo/resort
packages/resort/component/glassfish.py
JDBCConnectionPool.insert
def insert(self, context): """ Create connection pool. :param resort.engine.execution.Context context: Current execution context. """ status_code, msg = self.__endpoint.post( "/resources/jdbc-connection-pool", data={ "id": self.__name, "resType": self.__res_type, "datasourceClassname": self.__ds_classname, "property": props_value(self.__props) } ) self.__available = True
python
def insert(self, context): """ Create connection pool. :param resort.engine.execution.Context context: Current execution context. """ status_code, msg = self.__endpoint.post( "/resources/jdbc-connection-pool", data={ "id": self.__name, "resType": self.__res_type, "datasourceClassname": self.__ds_classname, "property": props_value(self.__props) } ) self.__available = True
['def', 'insert', '(', 'self', ',', 'context', ')', ':', 'status_code', ',', 'msg', '=', 'self', '.', '__endpoint', '.', 'post', '(', '"/resources/jdbc-connection-pool"', ',', 'data', '=', '{', '"id"', ':', 'self', '.', '__name', ',', '"resType"', ':', 'self', '.', '__res_type', ',', '"datasourceClassname"', ':', 'self', '.', '__ds_classname', ',', '"property"', ':', 'props_value', '(', 'self', '.', '__props', ')', '}', ')', 'self', '.', '__available', '=', 'True']
Create connection pool. :param resort.engine.execution.Context context: Current execution context.
['Create', 'connection', 'pool', '.', ':', 'param', 'resort', '.', 'engine', '.', 'execution', '.', 'Context', 'context', ':', 'Current', 'execution', 'context', '.']
train
https://github.com/miquelo/resort/blob/097a25d3257c91a75c194fd44c2797ab356f85dd/packages/resort/component/glassfish.py#L783-L801
7,093
coursera-dl/coursera-dl
coursera/api.py
CourseraOnDemand.extract_links_from_peer_assignment
def extract_links_from_peer_assignment(self, element_id): """ Return a dictionary with links to supplement files (pdf, csv, zip, ipynb, html and so on) extracted from peer assignment. @param element_id: Element ID to extract files from. @type element_id: str @return: @see CourseraOnDemand._extract_links_from_text """ logging.debug( 'Gathering supplement URLs for element_id <%s>.', element_id) try: # Assignment text (instructions) contains asset tags which describe # supplementary files. text = ''.join(self._extract_peer_assignment_text(element_id)) if not text: return {} supplement_links = self._extract_links_from_text(text) instructions = (IN_MEMORY_MARKER + self._markup_to_html(text), 'peer_assignment_instructions') extend_supplement_links( supplement_links, {IN_MEMORY_EXTENSION: [instructions]}) return supplement_links except requests.exceptions.HTTPError as exception: logging.error('Could not download peer assignment %s: %s', element_id, exception) if is_debug_run(): logging.exception('Could not download peer assignment %s: %s', element_id, exception) return None
python
def extract_links_from_peer_assignment(self, element_id): """ Return a dictionary with links to supplement files (pdf, csv, zip, ipynb, html and so on) extracted from peer assignment. @param element_id: Element ID to extract files from. @type element_id: str @return: @see CourseraOnDemand._extract_links_from_text """ logging.debug( 'Gathering supplement URLs for element_id <%s>.', element_id) try: # Assignment text (instructions) contains asset tags which describe # supplementary files. text = ''.join(self._extract_peer_assignment_text(element_id)) if not text: return {} supplement_links = self._extract_links_from_text(text) instructions = (IN_MEMORY_MARKER + self._markup_to_html(text), 'peer_assignment_instructions') extend_supplement_links( supplement_links, {IN_MEMORY_EXTENSION: [instructions]}) return supplement_links except requests.exceptions.HTTPError as exception: logging.error('Could not download peer assignment %s: %s', element_id, exception) if is_debug_run(): logging.exception('Could not download peer assignment %s: %s', element_id, exception) return None
['def', 'extract_links_from_peer_assignment', '(', 'self', ',', 'element_id', ')', ':', 'logging', '.', 'debug', '(', "'Gathering supplement URLs for element_id <%s>.'", ',', 'element_id', ')', 'try', ':', '# Assignment text (instructions) contains asset tags which describe', '# supplementary files.', 'text', '=', "''", '.', 'join', '(', 'self', '.', '_extract_peer_assignment_text', '(', 'element_id', ')', ')', 'if', 'not', 'text', ':', 'return', '{', '}', 'supplement_links', '=', 'self', '.', '_extract_links_from_text', '(', 'text', ')', 'instructions', '=', '(', 'IN_MEMORY_MARKER', '+', 'self', '.', '_markup_to_html', '(', 'text', ')', ',', "'peer_assignment_instructions'", ')', 'extend_supplement_links', '(', 'supplement_links', ',', '{', 'IN_MEMORY_EXTENSION', ':', '[', 'instructions', ']', '}', ')', 'return', 'supplement_links', 'except', 'requests', '.', 'exceptions', '.', 'HTTPError', 'as', 'exception', ':', 'logging', '.', 'error', '(', "'Could not download peer assignment %s: %s'", ',', 'element_id', ',', 'exception', ')', 'if', 'is_debug_run', '(', ')', ':', 'logging', '.', 'exception', '(', "'Could not download peer assignment %s: %s'", ',', 'element_id', ',', 'exception', ')', 'return', 'None']
Return a dictionary with links to supplement files (pdf, csv, zip, ipynb, html and so on) extracted from peer assignment. @param element_id: Element ID to extract files from. @type element_id: str @return: @see CourseraOnDemand._extract_links_from_text
['Return', 'a', 'dictionary', 'with', 'links', 'to', 'supplement', 'files', '(', 'pdf', 'csv', 'zip', 'ipynb', 'html', 'and', 'so', 'on', ')', 'extracted', 'from', 'peer', 'assignment', '.']
train
https://github.com/coursera-dl/coursera-dl/blob/9b434bcf3c4011bf3181429fe674633ae5fb7d4d/coursera/api.py#L1204-L1236
7,094
openego/eDisGo
edisgo/grid/network.py
TimeSeriesControl._worst_case_generation
def _worst_case_generation(self, worst_case_scale_factors, modes): """ Define worst case generation time series for fluctuating and dispatchable generators. Parameters ---------- worst_case_scale_factors : dict Scale factors defined in config file 'config_timeseries.cfg'. Scale factors describe actual power to nominal power ratio of in worst-case scenarios. modes : list List with worst-cases to generate time series for. Can be 'feedin_case', 'load_case' or both. """ self.timeseries.generation_fluctuating = pd.DataFrame( {'solar': [worst_case_scale_factors[ '{}_feedin_pv'.format(mode)] for mode in modes], 'wind': [worst_case_scale_factors[ '{}_feedin_other'.format(mode)] for mode in modes]}, index=self.timeseries.timeindex) self.timeseries.generation_dispatchable = pd.DataFrame( {'other': [worst_case_scale_factors[ '{}_feedin_other'.format(mode)] for mode in modes]}, index=self.timeseries.timeindex)
python
def _worst_case_generation(self, worst_case_scale_factors, modes): """ Define worst case generation time series for fluctuating and dispatchable generators. Parameters ---------- worst_case_scale_factors : dict Scale factors defined in config file 'config_timeseries.cfg'. Scale factors describe actual power to nominal power ratio of in worst-case scenarios. modes : list List with worst-cases to generate time series for. Can be 'feedin_case', 'load_case' or both. """ self.timeseries.generation_fluctuating = pd.DataFrame( {'solar': [worst_case_scale_factors[ '{}_feedin_pv'.format(mode)] for mode in modes], 'wind': [worst_case_scale_factors[ '{}_feedin_other'.format(mode)] for mode in modes]}, index=self.timeseries.timeindex) self.timeseries.generation_dispatchable = pd.DataFrame( {'other': [worst_case_scale_factors[ '{}_feedin_other'.format(mode)] for mode in modes]}, index=self.timeseries.timeindex)
['def', '_worst_case_generation', '(', 'self', ',', 'worst_case_scale_factors', ',', 'modes', ')', ':', 'self', '.', 'timeseries', '.', 'generation_fluctuating', '=', 'pd', '.', 'DataFrame', '(', '{', "'solar'", ':', '[', 'worst_case_scale_factors', '[', "'{}_feedin_pv'", '.', 'format', '(', 'mode', ')', ']', 'for', 'mode', 'in', 'modes', ']', ',', "'wind'", ':', '[', 'worst_case_scale_factors', '[', "'{}_feedin_other'", '.', 'format', '(', 'mode', ')', ']', 'for', 'mode', 'in', 'modes', ']', '}', ',', 'index', '=', 'self', '.', 'timeseries', '.', 'timeindex', ')', 'self', '.', 'timeseries', '.', 'generation_dispatchable', '=', 'pd', '.', 'DataFrame', '(', '{', "'other'", ':', '[', 'worst_case_scale_factors', '[', "'{}_feedin_other'", '.', 'format', '(', 'mode', ')', ']', 'for', 'mode', 'in', 'modes', ']', '}', ',', 'index', '=', 'self', '.', 'timeseries', '.', 'timeindex', ')']
Define worst case generation time series for fluctuating and dispatchable generators. Parameters ---------- worst_case_scale_factors : dict Scale factors defined in config file 'config_timeseries.cfg'. Scale factors describe actual power to nominal power ratio of in worst-case scenarios. modes : list List with worst-cases to generate time series for. Can be 'feedin_case', 'load_case' or both.
['Define', 'worst', 'case', 'generation', 'time', 'series', 'for', 'fluctuating', 'and', 'dispatchable', 'generators', '.']
train
https://github.com/openego/eDisGo/blob/e6245bdaf236f9c49dbda5a18c1c458290f41e2b/edisgo/grid/network.py#L1444-L1471
7,095
KnightHawk3/Hummingbird
hummingbird/__init__.py
Hummingbird._query_
def _query_(self, path, method, params={}): """Used internally for requests. :param str path: The path to hit. :param str method: The method to use, either `'GET'` or `'POST.` :param dict data: The optional paramters to the `GET` or the data to `POST`. :returns: Requests object -- Requires you to handle the status codes yourself. """ if method == "POST": url = '{API_URL}{API_PATH}'.format(API_URL=self.api_url, API_PATH=path) r = requests.post(url, data=json.dumps(params), headers=self.headers) return r elif method == "GET": url = '{API_URL}{API_PATH}'.format(API_URL=self.api_url, API_PATH=path) r = requests.get(url, params=params, headers=self.headers) return r
python
def _query_(self, path, method, params={}): """Used internally for requests. :param str path: The path to hit. :param str method: The method to use, either `'GET'` or `'POST.` :param dict data: The optional paramters to the `GET` or the data to `POST`. :returns: Requests object -- Requires you to handle the status codes yourself. """ if method == "POST": url = '{API_URL}{API_PATH}'.format(API_URL=self.api_url, API_PATH=path) r = requests.post(url, data=json.dumps(params), headers=self.headers) return r elif method == "GET": url = '{API_URL}{API_PATH}'.format(API_URL=self.api_url, API_PATH=path) r = requests.get(url, params=params, headers=self.headers) return r
['def', '_query_', '(', 'self', ',', 'path', ',', 'method', ',', 'params', '=', '{', '}', ')', ':', 'if', 'method', '==', '"POST"', ':', 'url', '=', "'{API_URL}{API_PATH}'", '.', 'format', '(', 'API_URL', '=', 'self', '.', 'api_url', ',', 'API_PATH', '=', 'path', ')', 'r', '=', 'requests', '.', 'post', '(', 'url', ',', 'data', '=', 'json', '.', 'dumps', '(', 'params', ')', ',', 'headers', '=', 'self', '.', 'headers', ')', 'return', 'r', 'elif', 'method', '==', '"GET"', ':', 'url', '=', "'{API_URL}{API_PATH}'", '.', 'format', '(', 'API_URL', '=', 'self', '.', 'api_url', ',', 'API_PATH', '=', 'path', ')', 'r', '=', 'requests', '.', 'get', '(', 'url', ',', 'params', '=', 'params', ',', 'headers', '=', 'self', '.', 'headers', ')', 'return', 'r']
Used internally for requests. :param str path: The path to hit. :param str method: The method to use, either `'GET'` or `'POST.` :param dict data: The optional paramters to the `GET` or the data to `POST`. :returns: Requests object -- Requires you to handle the status codes yourself.
['Used', 'internally', 'for', 'requests', '.']
train
https://github.com/KnightHawk3/Hummingbird/blob/10b918534b112c95a93f04dd76bfb7479c4f3f21/hummingbird/__init__.py#L27-L50
7,096
bxlab/bx-python
lib/bx_extras/pstat.py
duplicates
def duplicates(inlist): """ Returns duplicate items in the FIRST dimension of the passed list. Usage: duplicates (inlist) """ dups = [] for i in range(len(inlist)): if inlist[i] in inlist[i+1:]: dups.append(inlist[i]) return dups
python
def duplicates(inlist): """ Returns duplicate items in the FIRST dimension of the passed list. Usage: duplicates (inlist) """ dups = [] for i in range(len(inlist)): if inlist[i] in inlist[i+1:]: dups.append(inlist[i]) return dups
['def', 'duplicates', '(', 'inlist', ')', ':', 'dups', '=', '[', ']', 'for', 'i', 'in', 'range', '(', 'len', '(', 'inlist', ')', ')', ':', 'if', 'inlist', '[', 'i', ']', 'in', 'inlist', '[', 'i', '+', '1', ':', ']', ':', 'dups', '.', 'append', '(', 'inlist', '[', 'i', ']', ')', 'return', 'dups']
Returns duplicate items in the FIRST dimension of the passed list. Usage: duplicates (inlist)
['Returns', 'duplicate', 'items', 'in', 'the', 'FIRST', 'dimension', 'of', 'the', 'passed', 'list', '.']
train
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx_extras/pstat.py#L676-L686
7,097
d0c-s4vage/pfp
pfp/interp.py
PfpInterp._handle_for
def _handle_for(self, node, scope, ctxt, stream): """Handle For nodes :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """ self._dlog("handling for") if node.init is not None: # perform the init self._handle_node(node.init, scope, ctxt, stream) while node.cond is None or self._handle_node(node.cond, scope, ctxt, stream): if node.stmt is not None: try: # do the for body self._handle_node(node.stmt, scope, ctxt, stream) except errors.InterpBreak as e: break # we still need to interpret the "next" statement, # so just pass except errors.InterpContinue as e: pass if node.next is not None: # do the next statement self._handle_node(node.next, scope, ctxt, stream)
python
def _handle_for(self, node, scope, ctxt, stream): """Handle For nodes :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """ self._dlog("handling for") if node.init is not None: # perform the init self._handle_node(node.init, scope, ctxt, stream) while node.cond is None or self._handle_node(node.cond, scope, ctxt, stream): if node.stmt is not None: try: # do the for body self._handle_node(node.stmt, scope, ctxt, stream) except errors.InterpBreak as e: break # we still need to interpret the "next" statement, # so just pass except errors.InterpContinue as e: pass if node.next is not None: # do the next statement self._handle_node(node.next, scope, ctxt, stream)
['def', '_handle_for', '(', 'self', ',', 'node', ',', 'scope', ',', 'ctxt', ',', 'stream', ')', ':', 'self', '.', '_dlog', '(', '"handling for"', ')', 'if', 'node', '.', 'init', 'is', 'not', 'None', ':', '# perform the init', 'self', '.', '_handle_node', '(', 'node', '.', 'init', ',', 'scope', ',', 'ctxt', ',', 'stream', ')', 'while', 'node', '.', 'cond', 'is', 'None', 'or', 'self', '.', '_handle_node', '(', 'node', '.', 'cond', ',', 'scope', ',', 'ctxt', ',', 'stream', ')', ':', 'if', 'node', '.', 'stmt', 'is', 'not', 'None', ':', 'try', ':', '# do the for body', 'self', '.', '_handle_node', '(', 'node', '.', 'stmt', ',', 'scope', ',', 'ctxt', ',', 'stream', ')', 'except', 'errors', '.', 'InterpBreak', 'as', 'e', ':', 'break', '# we still need to interpret the "next" statement,', '# so just pass', 'except', 'errors', '.', 'InterpContinue', 'as', 'e', ':', 'pass', 'if', 'node', '.', 'next', 'is', 'not', 'None', ':', '# do the next statement', 'self', '.', '_handle_node', '(', 'node', '.', 'next', ',', 'scope', ',', 'ctxt', ',', 'stream', ')']
Handle For nodes :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO
['Handle', 'For', 'nodes']
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/interp.py#L2060-L2090
7,098
gem/oq-engine
openquake/hazardlib/contexts.py
ContextMaker.add_rup_params
def add_rup_params(self, rupture): """ Add .REQUIRES_RUPTURE_PARAMETERS to the rupture """ for param in self.REQUIRES_RUPTURE_PARAMETERS: if param == 'mag': value = rupture.mag elif param == 'strike': value = rupture.surface.get_strike() elif param == 'dip': value = rupture.surface.get_dip() elif param == 'rake': value = rupture.rake elif param == 'ztor': value = rupture.surface.get_top_edge_depth() elif param == 'hypo_lon': value = rupture.hypocenter.longitude elif param == 'hypo_lat': value = rupture.hypocenter.latitude elif param == 'hypo_depth': value = rupture.hypocenter.depth elif param == 'width': value = rupture.surface.get_width() else: raise ValueError('%s requires unknown rupture parameter %r' % (type(self).__name__, param)) setattr(rupture, param, value)
python
def add_rup_params(self, rupture): """ Add .REQUIRES_RUPTURE_PARAMETERS to the rupture """ for param in self.REQUIRES_RUPTURE_PARAMETERS: if param == 'mag': value = rupture.mag elif param == 'strike': value = rupture.surface.get_strike() elif param == 'dip': value = rupture.surface.get_dip() elif param == 'rake': value = rupture.rake elif param == 'ztor': value = rupture.surface.get_top_edge_depth() elif param == 'hypo_lon': value = rupture.hypocenter.longitude elif param == 'hypo_lat': value = rupture.hypocenter.latitude elif param == 'hypo_depth': value = rupture.hypocenter.depth elif param == 'width': value = rupture.surface.get_width() else: raise ValueError('%s requires unknown rupture parameter %r' % (type(self).__name__, param)) setattr(rupture, param, value)
['def', 'add_rup_params', '(', 'self', ',', 'rupture', ')', ':', 'for', 'param', 'in', 'self', '.', 'REQUIRES_RUPTURE_PARAMETERS', ':', 'if', 'param', '==', "'mag'", ':', 'value', '=', 'rupture', '.', 'mag', 'elif', 'param', '==', "'strike'", ':', 'value', '=', 'rupture', '.', 'surface', '.', 'get_strike', '(', ')', 'elif', 'param', '==', "'dip'", ':', 'value', '=', 'rupture', '.', 'surface', '.', 'get_dip', '(', ')', 'elif', 'param', '==', "'rake'", ':', 'value', '=', 'rupture', '.', 'rake', 'elif', 'param', '==', "'ztor'", ':', 'value', '=', 'rupture', '.', 'surface', '.', 'get_top_edge_depth', '(', ')', 'elif', 'param', '==', "'hypo_lon'", ':', 'value', '=', 'rupture', '.', 'hypocenter', '.', 'longitude', 'elif', 'param', '==', "'hypo_lat'", ':', 'value', '=', 'rupture', '.', 'hypocenter', '.', 'latitude', 'elif', 'param', '==', "'hypo_depth'", ':', 'value', '=', 'rupture', '.', 'hypocenter', '.', 'depth', 'elif', 'param', '==', "'width'", ':', 'value', '=', 'rupture', '.', 'surface', '.', 'get_width', '(', ')', 'else', ':', 'raise', 'ValueError', '(', "'%s requires unknown rupture parameter %r'", '%', '(', 'type', '(', 'self', ')', '.', '__name__', ',', 'param', ')', ')', 'setattr', '(', 'rupture', ',', 'param', ',', 'value', ')']
Add .REQUIRES_RUPTURE_PARAMETERS to the rupture
['Add', '.', 'REQUIRES_RUPTURE_PARAMETERS', 'to', 'the', 'rupture']
train
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/contexts.py#L146-L172
7,099
StackStorm/pybind
pybind/slxos/v17r_1_01a/interface/ethernet/__init__.py
ethernet._set_link_error_disable
def _set_link_error_disable(self, v, load=False): """ Setter method for link_error_disable, mapped from YANG variable /interface/ethernet/link_error_disable (container) If this variable is read-only (config: false) in the source YANG file, then _set_link_error_disable is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_link_error_disable() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=link_error_disable.link_error_disable, is_container='container', presence=False, yang_name="link-error-disable", rest_name="link-error-disable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'port link dampening', u'callpoint': u'Pld', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-pld', defining_module='brocade-pld', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """link_error_disable must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=link_error_disable.link_error_disable, is_container='container', presence=False, yang_name="link-error-disable", rest_name="link-error-disable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'port link dampening', u'callpoint': u'Pld', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-pld', defining_module='brocade-pld', yang_type='container', is_config=True)""", }) self.__link_error_disable = t if hasattr(self, '_set'): self._set()
python
def _set_link_error_disable(self, v, load=False): """ Setter method for link_error_disable, mapped from YANG variable /interface/ethernet/link_error_disable (container) If this variable is read-only (config: false) in the source YANG file, then _set_link_error_disable is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_link_error_disable() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=link_error_disable.link_error_disable, is_container='container', presence=False, yang_name="link-error-disable", rest_name="link-error-disable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'port link dampening', u'callpoint': u'Pld', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-pld', defining_module='brocade-pld', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """link_error_disable must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=link_error_disable.link_error_disable, is_container='container', presence=False, yang_name="link-error-disable", rest_name="link-error-disable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'port link dampening', u'callpoint': u'Pld', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-pld', defining_module='brocade-pld', yang_type='container', is_config=True)""", }) self.__link_error_disable = t if hasattr(self, '_set'): self._set()
['def', '_set_link_error_disable', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'link_error_disable', '.', 'link_error_disable', ',', 'is_container', '=', "'container'", ',', 'presence', '=', 'False', ',', 'yang_name', '=', '"link-error-disable"', ',', 'rest_name', '=', '"link-error-disable"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'port link dampening'", ',', "u'callpoint'", ':', "u'Pld'", ',', "u'cli-compact-syntax'", ':', 'None', ',', "u'cli-sequence-commands'", ':', 'None', ',', "u'cli-incomplete-command'", ':', 'None', ',', "u'cli-full-no'", ':', 'None', '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-pld'", ',', 'defining_module', '=', "'brocade-pld'", ',', 'yang_type', '=', "'container'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""link_error_disable must be of a type compatible with container"""', ',', "'defined-type'", ':', '"container"', ',', "'generated-type'", ':', '"""YANGDynClass(base=link_error_disable.link_error_disable, is_container=\'container\', presence=False, yang_name="link-error-disable", rest_name="link-error-disable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'port link dampening\', u\'callpoint\': u\'Pld\', u\'cli-compact-syntax\': None, u\'cli-sequence-commands\': None, u\'cli-incomplete-command\': None, u\'cli-full-no\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-pld\', defining_module=\'brocade-pld\', yang_type=\'container\', is_config=True)"""', ',', '}', ')', 'self', '.', '__link_error_disable', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')']
Setter method for link_error_disable, mapped from YANG variable /interface/ethernet/link_error_disable (container) If this variable is read-only (config: false) in the source YANG file, then _set_link_error_disable is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_link_error_disable() directly.
['Setter', 'method', 'for', 'link_error_disable', 'mapped', 'from', 'YANG', 'variable', '/', 'interface', '/', 'ethernet', '/', 'link_error_disable', '(', 'container', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_link_error_disable', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_link_error_disable', '()', 'directly', '.']
train
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_1_01a/interface/ethernet/__init__.py#L1514-L1535