Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
6,100
toomore/goristock
grs/all_portf.py
all_portf.ck_portf_004
def ck_portf_004(self): ''' 價走平一個半月。(箱型整理、盤整) ''' return self.a.SD < 0.25 and self.a.stock_vol[-1] > 1000*1000 and self.a.raw_data[-1] > 10
python
def ck_portf_004(self): ''' 價走平一個半月。(箱型整理、盤整) ''' return self.a.SD < 0.25 and self.a.stock_vol[-1] > 1000*1000 and self.a.raw_data[-1] > 10
['def', 'ck_portf_004', '(', 'self', ')', ':', 'return', 'self', '.', 'a', '.', 'SD', '<', '0.25', 'and', 'self', '.', 'a', '.', 'stock_vol', '[', '-', '1', ']', '>', '1000', '*', '1000', 'and', 'self', '.', 'a', '.', 'raw_data', '[', '-', '1', ']', '>', '10']
價走平一個半月。(箱型整理、盤整)
['價走平一個半月。(箱型整理、盤整)']
train
https://github.com/toomore/goristock/blob/e61f57f11a626cfbc4afbf66337fd9d1c51e3e71/grs/all_portf.py#L40-L42
6,101
lehins/python-wepay
wepay/calls/subscription.py
Subscription.__modify
def __modify(self, subscription_id, **kwargs): """Call documentation: `/subscription/modify <https://www.wepay.com/developer/reference/subscription#modify>`_, plus extra keyword parameters: :keyword str access_token: will be used instead of instance's ``access_token``, with ``batch_mode=True`` will set `authorization` param to it's value. :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay` """ params = { 'subscription_id': subscription_id } return self.make_call(self.__modify, params, kwargs)
python
def __modify(self, subscription_id, **kwargs): """Call documentation: `/subscription/modify <https://www.wepay.com/developer/reference/subscription#modify>`_, plus extra keyword parameters: :keyword str access_token: will be used instead of instance's ``access_token``, with ``batch_mode=True`` will set `authorization` param to it's value. :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay` """ params = { 'subscription_id': subscription_id } return self.make_call(self.__modify, params, kwargs)
['def', '__modify', '(', 'self', ',', 'subscription_id', ',', '*', '*', 'kwargs', ')', ':', 'params', '=', '{', "'subscription_id'", ':', 'subscription_id', '}', 'return', 'self', '.', 'make_call', '(', 'self', '.', '__modify', ',', 'params', ',', 'kwargs', ')']
Call documentation: `/subscription/modify <https://www.wepay.com/developer/reference/subscription#modify>`_, plus extra keyword parameters: :keyword str access_token: will be used instead of instance's ``access_token``, with ``batch_mode=True`` will set `authorization` param to it's value. :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay`
['Call', 'documentation', ':', '/', 'subscription', '/', 'modify', '<https', ':', '//', 'www', '.', 'wepay', '.', 'com', '/', 'developer', '/', 'reference', '/', 'subscription#modify', '>', '_', 'plus', 'extra', 'keyword', 'parameters', ':', ':', 'keyword', 'str', 'access_token', ':', 'will', 'be', 'used', 'instead', 'of', 'instance', 's', 'access_token', 'with', 'batch_mode', '=', 'True', 'will', 'set', 'authorization', 'param', 'to', 'it', 's', 'value', '.']
train
https://github.com/lehins/python-wepay/blob/414d25a1a8d0ecb22a3ddd1f16c60b805bb52a1f/wepay/calls/subscription.py#L118-L140
6,102
wrobstory/vincent
vincent/data.py
Data.from_mult_iters
def from_mult_iters(cls, name=None, idx=None, **kwargs): """Load values from multiple iters Parameters ---------- name : string, default None Name of the data set. If None (default), the name will be set to ``'table'``. idx: string, default None Iterable to use for the data index **kwargs : dict of iterables The ``values`` field will contain dictionaries with keys for each of the iterables provided. For example, d = Data.from_iters(idx='x', x=[0, 1, 5], y=(10, 20, 30)) would result in ``d`` having a ``values`` field with [{'idx': 0, 'col': 'y', 'val': 10}, {'idx': 1, 'col': 'y', 'val': 20} If the iterables are not the same length, then ValueError is raised. """ if not name: name = 'table' lengths = [len(v) for v in kwargs.values()] if len(set(lengths)) != 1: raise ValueError('Iterables must all be same length') if not idx: raise ValueError('Must provide iter name index reference') index = kwargs.pop(idx) vega_vals = [] for k, v in sorted(kwargs.items()): for idx, val in zip(index, v): value = {} value['idx'] = idx value['col'] = k value['val'] = val vega_vals.append(value) return cls(name, values=vega_vals)
python
def from_mult_iters(cls, name=None, idx=None, **kwargs): """Load values from multiple iters Parameters ---------- name : string, default None Name of the data set. If None (default), the name will be set to ``'table'``. idx: string, default None Iterable to use for the data index **kwargs : dict of iterables The ``values`` field will contain dictionaries with keys for each of the iterables provided. For example, d = Data.from_iters(idx='x', x=[0, 1, 5], y=(10, 20, 30)) would result in ``d`` having a ``values`` field with [{'idx': 0, 'col': 'y', 'val': 10}, {'idx': 1, 'col': 'y', 'val': 20} If the iterables are not the same length, then ValueError is raised. """ if not name: name = 'table' lengths = [len(v) for v in kwargs.values()] if len(set(lengths)) != 1: raise ValueError('Iterables must all be same length') if not idx: raise ValueError('Must provide iter name index reference') index = kwargs.pop(idx) vega_vals = [] for k, v in sorted(kwargs.items()): for idx, val in zip(index, v): value = {} value['idx'] = idx value['col'] = k value['val'] = val vega_vals.append(value) return cls(name, values=vega_vals)
['def', 'from_mult_iters', '(', 'cls', ',', 'name', '=', 'None', ',', 'idx', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'if', 'not', 'name', ':', 'name', '=', "'table'", 'lengths', '=', '[', 'len', '(', 'v', ')', 'for', 'v', 'in', 'kwargs', '.', 'values', '(', ')', ']', 'if', 'len', '(', 'set', '(', 'lengths', ')', ')', '!=', '1', ':', 'raise', 'ValueError', '(', "'Iterables must all be same length'", ')', 'if', 'not', 'idx', ':', 'raise', 'ValueError', '(', "'Must provide iter name index reference'", ')', 'index', '=', 'kwargs', '.', 'pop', '(', 'idx', ')', 'vega_vals', '=', '[', ']', 'for', 'k', ',', 'v', 'in', 'sorted', '(', 'kwargs', '.', 'items', '(', ')', ')', ':', 'for', 'idx', ',', 'val', 'in', 'zip', '(', 'index', ',', 'v', ')', ':', 'value', '=', '{', '}', 'value', '[', "'idx'", ']', '=', 'idx', 'value', '[', "'col'", ']', '=', 'k', 'value', '[', "'val'", ']', '=', 'val', 'vega_vals', '.', 'append', '(', 'value', ')', 'return', 'cls', '(', 'name', ',', 'values', '=', 'vega_vals', ')']
Load values from multiple iters Parameters ---------- name : string, default None Name of the data set. If None (default), the name will be set to ``'table'``. idx: string, default None Iterable to use for the data index **kwargs : dict of iterables The ``values`` field will contain dictionaries with keys for each of the iterables provided. For example, d = Data.from_iters(idx='x', x=[0, 1, 5], y=(10, 20, 30)) would result in ``d`` having a ``values`` field with [{'idx': 0, 'col': 'y', 'val': 10}, {'idx': 1, 'col': 'y', 'val': 20} If the iterables are not the same length, then ValueError is raised.
['Load', 'values', 'from', 'multiple', 'iters']
train
https://github.com/wrobstory/vincent/blob/c5a06e50179015fbb788a7a42e4570ff4467a9e9/vincent/data.py#L294-L339
6,103
mrstephenneal/dirutility
dirutility/gui.py
BackupZipGUI.source
def source(self): """Parameters for saving zip backups""" with gui.FlexForm(self.title, auto_size_text=True, default_element_size=(40, 1)) as form: layout = [ [gui.Text('Zip Backup utility', size=(30, 1), font=("Helvetica", 30), text_color='blue')], [gui.Text('Create a zip backup of a file or directory.', size=(50, 1), font=("Helvetica", 18), text_color='black')], [gui.Text('-' * 200)], # Source [gui.Text('Select source folder', size=(20, 1), font=("Helvetica", 25), auto_size_text=False), gui.InputText('', key='source', font=("Helvetica", 20)), gui.FolderBrowse()], [gui.Submit(), gui.Cancel()]] button, values = form.LayoutAndRead(layout) if button == 'Submit': return values['source'] else: exit()
python
def source(self): """Parameters for saving zip backups""" with gui.FlexForm(self.title, auto_size_text=True, default_element_size=(40, 1)) as form: layout = [ [gui.Text('Zip Backup utility', size=(30, 1), font=("Helvetica", 30), text_color='blue')], [gui.Text('Create a zip backup of a file or directory.', size=(50, 1), font=("Helvetica", 18), text_color='black')], [gui.Text('-' * 200)], # Source [gui.Text('Select source folder', size=(20, 1), font=("Helvetica", 25), auto_size_text=False), gui.InputText('', key='source', font=("Helvetica", 20)), gui.FolderBrowse()], [gui.Submit(), gui.Cancel()]] button, values = form.LayoutAndRead(layout) if button == 'Submit': return values['source'] else: exit()
['def', 'source', '(', 'self', ')', ':', 'with', 'gui', '.', 'FlexForm', '(', 'self', '.', 'title', ',', 'auto_size_text', '=', 'True', ',', 'default_element_size', '=', '(', '40', ',', '1', ')', ')', 'as', 'form', ':', 'layout', '=', '[', '[', 'gui', '.', 'Text', '(', "'Zip Backup utility'", ',', 'size', '=', '(', '30', ',', '1', ')', ',', 'font', '=', '(', '"Helvetica"', ',', '30', ')', ',', 'text_color', '=', "'blue'", ')', ']', ',', '[', 'gui', '.', 'Text', '(', "'Create a zip backup of a file or directory.'", ',', 'size', '=', '(', '50', ',', '1', ')', ',', 'font', '=', '(', '"Helvetica"', ',', '18', ')', ',', 'text_color', '=', "'black'", ')', ']', ',', '[', 'gui', '.', 'Text', '(', "'-'", '*', '200', ')', ']', ',', '# Source', '[', 'gui', '.', 'Text', '(', "'Select source folder'", ',', 'size', '=', '(', '20', ',', '1', ')', ',', 'font', '=', '(', '"Helvetica"', ',', '25', ')', ',', 'auto_size_text', '=', 'False', ')', ',', 'gui', '.', 'InputText', '(', "''", ',', 'key', '=', "'source'", ',', 'font', '=', '(', '"Helvetica"', ',', '20', ')', ')', ',', 'gui', '.', 'FolderBrowse', '(', ')', ']', ',', '[', 'gui', '.', 'Submit', '(', ')', ',', 'gui', '.', 'Cancel', '(', ')', ']', ']', 'button', ',', 'values', '=', 'form', '.', 'LayoutAndRead', '(', 'layout', ')', 'if', 'button', '==', "'Submit'", ':', 'return', 'values', '[', "'source'", ']', 'else', ':', 'exit', '(', ')']
Parameters for saving zip backups
['Parameters', 'for', 'saving', 'zip', 'backups']
train
https://github.com/mrstephenneal/dirutility/blob/339378659e2d7e09c53acfc51c5df745bb0cd517/dirutility/gui.py#L123-L143
6,104
bionikspoon/pureyaml
pureyaml/__init__.py
dumps
def dumps(obj, indent=None, default=None, sort_keys=False, **kw): """Dump string.""" return YAMLEncoder(indent=indent, default=default, sort_keys=sort_keys, **kw).encode(obj)
python
def dumps(obj, indent=None, default=None, sort_keys=False, **kw): """Dump string.""" return YAMLEncoder(indent=indent, default=default, sort_keys=sort_keys, **kw).encode(obj)
['def', 'dumps', '(', 'obj', ',', 'indent', '=', 'None', ',', 'default', '=', 'None', ',', 'sort_keys', '=', 'False', ',', '*', '*', 'kw', ')', ':', 'return', 'YAMLEncoder', '(', 'indent', '=', 'indent', ',', 'default', '=', 'default', ',', 'sort_keys', '=', 'sort_keys', ',', '*', '*', 'kw', ')', '.', 'encode', '(', 'obj', ')']
Dump string.
['Dump', 'string', '.']
train
https://github.com/bionikspoon/pureyaml/blob/784830b907ca14525c4cecdb6ae35306f6f8a877/pureyaml/__init__.py#L47-L49
6,105
Unidata/siphon
siphon/cdmr/ncstream.py
process_vlen
def process_vlen(data_header, array): """Process vlen coming back from NCStream v2. This takes the array of values and slices into an object array, with entries containing the appropriate pieces of the original array. Sizes are controlled by the passed in `data_header`. Parameters ---------- data_header : Header array : :class:`numpy.ndarray` Returns ------- ndarray object array containing sub-sequences from the original primitive array """ source = iter(array) return np.array([np.fromiter(itertools.islice(source, size), dtype=array.dtype) for size in data_header.vlens])
python
def process_vlen(data_header, array): """Process vlen coming back from NCStream v2. This takes the array of values and slices into an object array, with entries containing the appropriate pieces of the original array. Sizes are controlled by the passed in `data_header`. Parameters ---------- data_header : Header array : :class:`numpy.ndarray` Returns ------- ndarray object array containing sub-sequences from the original primitive array """ source = iter(array) return np.array([np.fromiter(itertools.islice(source, size), dtype=array.dtype) for size in data_header.vlens])
['def', 'process_vlen', '(', 'data_header', ',', 'array', ')', ':', 'source', '=', 'iter', '(', 'array', ')', 'return', 'np', '.', 'array', '(', '[', 'np', '.', 'fromiter', '(', 'itertools', '.', 'islice', '(', 'source', ',', 'size', ')', ',', 'dtype', '=', 'array', '.', 'dtype', ')', 'for', 'size', 'in', 'data_header', '.', 'vlens', ']', ')']
Process vlen coming back from NCStream v2. This takes the array of values and slices into an object array, with entries containing the appropriate pieces of the original array. Sizes are controlled by the passed in `data_header`. Parameters ---------- data_header : Header array : :class:`numpy.ndarray` Returns ------- ndarray object array containing sub-sequences from the original primitive array
['Process', 'vlen', 'coming', 'back', 'from', 'NCStream', 'v2', '.']
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/ncstream.py#L201-L221
6,106
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/build/build_client.py
BuildClient.add_build_tags
def add_build_tags(self, tags, project, build_id): """AddBuildTags. Adds tags to a build. :param [str] tags: The tags to add. :param str project: Project ID or project name :param int build_id: The ID of the build. :rtype: [str] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if build_id is not None: route_values['buildId'] = self._serialize.url('build_id', build_id, 'int') content = self._serialize.body(tags, '[str]') response = self._send(http_method='POST', location_id='6e6114b2-8161-44c8-8f6c-c5505782427f', version='5.0', route_values=route_values, content=content) return self._deserialize('[str]', self._unwrap_collection(response))
python
def add_build_tags(self, tags, project, build_id): """AddBuildTags. Adds tags to a build. :param [str] tags: The tags to add. :param str project: Project ID or project name :param int build_id: The ID of the build. :rtype: [str] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if build_id is not None: route_values['buildId'] = self._serialize.url('build_id', build_id, 'int') content = self._serialize.body(tags, '[str]') response = self._send(http_method='POST', location_id='6e6114b2-8161-44c8-8f6c-c5505782427f', version='5.0', route_values=route_values, content=content) return self._deserialize('[str]', self._unwrap_collection(response))
['def', 'add_build_tags', '(', 'self', ',', 'tags', ',', 'project', ',', 'build_id', ')', ':', 'route_values', '=', '{', '}', 'if', 'project', 'is', 'not', 'None', ':', 'route_values', '[', "'project'", ']', '=', 'self', '.', '_serialize', '.', 'url', '(', "'project'", ',', 'project', ',', "'str'", ')', 'if', 'build_id', 'is', 'not', 'None', ':', 'route_values', '[', "'buildId'", ']', '=', 'self', '.', '_serialize', '.', 'url', '(', "'build_id'", ',', 'build_id', ',', "'int'", ')', 'content', '=', 'self', '.', '_serialize', '.', 'body', '(', 'tags', ',', "'[str]'", ')', 'response', '=', 'self', '.', '_send', '(', 'http_method', '=', "'POST'", ',', 'location_id', '=', "'6e6114b2-8161-44c8-8f6c-c5505782427f'", ',', 'version', '=', "'5.0'", ',', 'route_values', '=', 'route_values', ',', 'content', '=', 'content', ')', 'return', 'self', '.', '_deserialize', '(', "'[str]'", ',', 'self', '.', '_unwrap_collection', '(', 'response', ')', ')']
AddBuildTags. Adds tags to a build. :param [str] tags: The tags to add. :param str project: Project ID or project name :param int build_id: The ID of the build. :rtype: [str]
['AddBuildTags', '.', 'Adds', 'tags', 'to', 'a', 'build', '.', ':', 'param', '[', 'str', ']', 'tags', ':', 'The', 'tags', 'to', 'add', '.', ':', 'param', 'str', 'project', ':', 'Project', 'ID', 'or', 'project', 'name', ':', 'param', 'int', 'build_id', ':', 'The', 'ID', 'of', 'the', 'build', '.', ':', 'rtype', ':', '[', 'str', ']']
train
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/build/build_client.py#L1549-L1568
6,107
DataDog/integrations-core
datadog_checks_base/datadog_checks/base/checks/prometheus/mixins.py
PrometheusScraperMixin.process
def process(self, endpoint, **kwargs): """ Polls the data from prometheus and pushes them as gauges `endpoint` is the metrics endpoint to use to poll metrics from Prometheus Note that if the instance has a 'tags' attribute, it will be pushed automatically as additional custom tags and added to the metrics """ instance = kwargs.get('instance') if instance: kwargs['custom_tags'] = instance.get('tags', []) for metric in self.scrape_metrics(endpoint): self.process_metric(metric, **kwargs)
python
def process(self, endpoint, **kwargs): """ Polls the data from prometheus and pushes them as gauges `endpoint` is the metrics endpoint to use to poll metrics from Prometheus Note that if the instance has a 'tags' attribute, it will be pushed automatically as additional custom tags and added to the metrics """ instance = kwargs.get('instance') if instance: kwargs['custom_tags'] = instance.get('tags', []) for metric in self.scrape_metrics(endpoint): self.process_metric(metric, **kwargs)
['def', 'process', '(', 'self', ',', 'endpoint', ',', '*', '*', 'kwargs', ')', ':', 'instance', '=', 'kwargs', '.', 'get', '(', "'instance'", ')', 'if', 'instance', ':', 'kwargs', '[', "'custom_tags'", ']', '=', 'instance', '.', 'get', '(', "'tags'", ',', '[', ']', ')', 'for', 'metric', 'in', 'self', '.', 'scrape_metrics', '(', 'endpoint', ')', ':', 'self', '.', 'process_metric', '(', 'metric', ',', '*', '*', 'kwargs', ')']
Polls the data from prometheus and pushes them as gauges `endpoint` is the metrics endpoint to use to poll metrics from Prometheus Note that if the instance has a 'tags' attribute, it will be pushed automatically as additional custom tags and added to the metrics
['Polls', 'the', 'data', 'from', 'prometheus', 'and', 'pushes', 'them', 'as', 'gauges', 'endpoint', 'is', 'the', 'metrics', 'endpoint', 'to', 'use', 'to', 'poll', 'metrics', 'from', 'Prometheus']
train
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/datadog_checks_base/datadog_checks/base/checks/prometheus/mixins.py#L386-L399
6,108
horazont/aioxmpp
aioxmpp/bookmarks/service.py
BookmarkClient.get_bookmarks
def get_bookmarks(self): """ Get the stored bookmarks from the server. Causes signals to be fired to reflect the changes. :returns: a list of bookmarks """ with (yield from self._lock): bookmarks = yield from self._get_bookmarks() self._diff_emit_update(bookmarks) return bookmarks
python
def get_bookmarks(self): """ Get the stored bookmarks from the server. Causes signals to be fired to reflect the changes. :returns: a list of bookmarks """ with (yield from self._lock): bookmarks = yield from self._get_bookmarks() self._diff_emit_update(bookmarks) return bookmarks
['def', 'get_bookmarks', '(', 'self', ')', ':', 'with', '(', 'yield', 'from', 'self', '.', '_lock', ')', ':', 'bookmarks', '=', 'yield', 'from', 'self', '.', '_get_bookmarks', '(', ')', 'self', '.', '_diff_emit_update', '(', 'bookmarks', ')', 'return', 'bookmarks']
Get the stored bookmarks from the server. Causes signals to be fired to reflect the changes. :returns: a list of bookmarks
['Get', 'the', 'stored', 'bookmarks', 'from', 'the', 'server', '.', 'Causes', 'signals', 'to', 'be', 'fired', 'to', 'reflect', 'the', 'changes', '.']
train
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/bookmarks/service.py#L268-L278
6,109
influxdata/influxdb-python
influxdb/influxdb08/client.py
InfluxDBClient.alter_database_admin
def alter_database_admin(self, username, is_admin): """Alter the database admin.""" url = "db/{0}/users/{1}".format(self._database, username) data = {'admin': is_admin} self.request( url=url, method='POST', data=data, expected_response_code=200 ) return True
python
def alter_database_admin(self, username, is_admin): """Alter the database admin.""" url = "db/{0}/users/{1}".format(self._database, username) data = {'admin': is_admin} self.request( url=url, method='POST', data=data, expected_response_code=200 ) return True
['def', 'alter_database_admin', '(', 'self', ',', 'username', ',', 'is_admin', ')', ':', 'url', '=', '"db/{0}/users/{1}"', '.', 'format', '(', 'self', '.', '_database', ',', 'username', ')', 'data', '=', '{', "'admin'", ':', 'is_admin', '}', 'self', '.', 'request', '(', 'url', '=', 'url', ',', 'method', '=', "'POST'", ',', 'data', '=', 'data', ',', 'expected_response_code', '=', '200', ')', 'return', 'True']
Alter the database admin.
['Alter', 'the', 'database', 'admin', '.']
train
https://github.com/influxdata/influxdb-python/blob/d5d12499f3755199d5eedd8b363450f1cf4073bd/influxdb/influxdb08/client.py#L660-L673
6,110
hotdoc/hotdoc
hotdoc/core/database.py
Database.add_comment
def add_comment(self, comment): """ Add a comment to the database. Args: comment (hotdoc.core.Comment): comment to add """ if not comment: return self.__comments[comment.name] = comment self.comment_added_signal(self, comment)
python
def add_comment(self, comment): """ Add a comment to the database. Args: comment (hotdoc.core.Comment): comment to add """ if not comment: return self.__comments[comment.name] = comment self.comment_added_signal(self, comment)
['def', 'add_comment', '(', 'self', ',', 'comment', ')', ':', 'if', 'not', 'comment', ':', 'return', 'self', '.', '__comments', '[', 'comment', '.', 'name', ']', '=', 'comment', 'self', '.', 'comment_added_signal', '(', 'self', ',', 'comment', ')']
Add a comment to the database. Args: comment (hotdoc.core.Comment): comment to add
['Add', 'a', 'comment', 'to', 'the', 'database', '.']
train
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/database.py#L77-L88
6,111
zhexiao/ezhost
ezhost/ServerBase.py
ServerBase.init_host
def init_host(self): """ Initial host """ env.host_string = self.host_string env.user = self.host_user env.password = self.host_passwd env.key_filename = self.host_keyfile
python
def init_host(self): """ Initial host """ env.host_string = self.host_string env.user = self.host_user env.password = self.host_passwd env.key_filename = self.host_keyfile
['def', 'init_host', '(', 'self', ')', ':', 'env', '.', 'host_string', '=', 'self', '.', 'host_string', 'env', '.', 'user', '=', 'self', '.', 'host_user', 'env', '.', 'password', '=', 'self', '.', 'host_passwd', 'env', '.', 'key_filename', '=', 'self', '.', 'host_keyfile']
Initial host
['Initial', 'host']
train
https://github.com/zhexiao/ezhost/blob/4146bc0be14bb1bfe98ec19283d19fab420871b3/ezhost/ServerBase.py#L75-L82
6,112
avinassh/haxor
hackernews/__init__.py
HackerNews.get_items_by_ids
def get_items_by_ids(self, item_ids, item_type=None): """Given a list of item ids, return all the Item objects Args: item_ids (obj): List of item IDs to query item_type (str): (optional) Item type to filter results with Returns: List of `Item` objects for given item IDs and given item type """ urls = [urljoin(self.item_url, F"{i}.json") for i in item_ids] result = self._run_async(urls=urls) items = [Item(r) for r in result if r] if item_type: return [item for item in items if item.item_type == item_type] else: return items
python
def get_items_by_ids(self, item_ids, item_type=None): """Given a list of item ids, return all the Item objects Args: item_ids (obj): List of item IDs to query item_type (str): (optional) Item type to filter results with Returns: List of `Item` objects for given item IDs and given item type """ urls = [urljoin(self.item_url, F"{i}.json") for i in item_ids] result = self._run_async(urls=urls) items = [Item(r) for r in result if r] if item_type: return [item for item in items if item.item_type == item_type] else: return items
['def', 'get_items_by_ids', '(', 'self', ',', 'item_ids', ',', 'item_type', '=', 'None', ')', ':', 'urls', '=', '[', 'urljoin', '(', 'self', '.', 'item_url', ',', 'F"{i}.json"', ')', 'for', 'i', 'in', 'item_ids', ']', 'result', '=', 'self', '.', '_run_async', '(', 'urls', '=', 'urls', ')', 'items', '=', '[', 'Item', '(', 'r', ')', 'for', 'r', 'in', 'result', 'if', 'r', ']', 'if', 'item_type', ':', 'return', '[', 'item', 'for', 'item', 'in', 'items', 'if', 'item', '.', 'item_type', '==', 'item_type', ']', 'else', ':', 'return', 'items']
Given a list of item ids, return all the Item objects Args: item_ids (obj): List of item IDs to query item_type (str): (optional) Item type to filter results with Returns: List of `Item` objects for given item IDs and given item type
['Given', 'a', 'list', 'of', 'item', 'ids', 'return', 'all', 'the', 'Item', 'objects']
train
https://github.com/avinassh/haxor/blob/71dbecf87531f7a24bb39c736d53127427aaca84/hackernews/__init__.py#L204-L221
6,113
ContextLab/quail
quail/helpers.py
list2pd
def list2pd(all_data, subjindex=None, listindex=None): """ Makes multi-indexed dataframe of subject data Parameters ---------- all_data : list of lists of strings strings are either all presented or all recalled items, in the order of presentation or recall *should also work for presented / recalled ints and floats, if desired Returns ---------- subs_list_of_dfs : multi-indexed dataframe dataframe of subject data (presented or recalled words/items), indexed by subject and list number cell populated by the term presented or recalled in the position indicated by the column number """ # set default index if it is not defined # max_nlists = max(map(lambda x: len(x), all_data)) listindex = [[idx for idx in range(len(sub))] for sub in all_data] if not listindex else listindex subjindex = [idx for idx,subj in enumerate(all_data)] if not subjindex else subjindex def make_multi_index(listindex, sub_num): return pd.MultiIndex.from_tuples([(sub_num,lst) for lst in listindex], names = ['Subject', 'List']) listindex = list(listindex) subjindex = list(subjindex) subs_list_of_dfs = [pd.DataFrame(sub_data, index=make_multi_index(listindex[sub_num], subjindex[sub_num])) for sub_num,sub_data in enumerate(all_data)] return pd.concat(subs_list_of_dfs)
python
def list2pd(all_data, subjindex=None, listindex=None): """ Makes multi-indexed dataframe of subject data Parameters ---------- all_data : list of lists of strings strings are either all presented or all recalled items, in the order of presentation or recall *should also work for presented / recalled ints and floats, if desired Returns ---------- subs_list_of_dfs : multi-indexed dataframe dataframe of subject data (presented or recalled words/items), indexed by subject and list number cell populated by the term presented or recalled in the position indicated by the column number """ # set default index if it is not defined # max_nlists = max(map(lambda x: len(x), all_data)) listindex = [[idx for idx in range(len(sub))] for sub in all_data] if not listindex else listindex subjindex = [idx for idx,subj in enumerate(all_data)] if not subjindex else subjindex def make_multi_index(listindex, sub_num): return pd.MultiIndex.from_tuples([(sub_num,lst) for lst in listindex], names = ['Subject', 'List']) listindex = list(listindex) subjindex = list(subjindex) subs_list_of_dfs = [pd.DataFrame(sub_data, index=make_multi_index(listindex[sub_num], subjindex[sub_num])) for sub_num,sub_data in enumerate(all_data)] return pd.concat(subs_list_of_dfs)
['def', 'list2pd', '(', 'all_data', ',', 'subjindex', '=', 'None', ',', 'listindex', '=', 'None', ')', ':', '# set default index if it is not defined', '# max_nlists = max(map(lambda x: len(x), all_data))', 'listindex', '=', '[', '[', 'idx', 'for', 'idx', 'in', 'range', '(', 'len', '(', 'sub', ')', ')', ']', 'for', 'sub', 'in', 'all_data', ']', 'if', 'not', 'listindex', 'else', 'listindex', 'subjindex', '=', '[', 'idx', 'for', 'idx', ',', 'subj', 'in', 'enumerate', '(', 'all_data', ')', ']', 'if', 'not', 'subjindex', 'else', 'subjindex', 'def', 'make_multi_index', '(', 'listindex', ',', 'sub_num', ')', ':', 'return', 'pd', '.', 'MultiIndex', '.', 'from_tuples', '(', '[', '(', 'sub_num', ',', 'lst', ')', 'for', 'lst', 'in', 'listindex', ']', ',', 'names', '=', '[', "'Subject'", ',', "'List'", ']', ')', 'listindex', '=', 'list', '(', 'listindex', ')', 'subjindex', '=', 'list', '(', 'subjindex', ')', 'subs_list_of_dfs', '=', '[', 'pd', '.', 'DataFrame', '(', 'sub_data', ',', 'index', '=', 'make_multi_index', '(', 'listindex', '[', 'sub_num', ']', ',', 'subjindex', '[', 'sub_num', ']', ')', ')', 'for', 'sub_num', ',', 'sub_data', 'in', 'enumerate', '(', 'all_data', ')', ']', 'return', 'pd', '.', 'concat', '(', 'subs_list_of_dfs', ')']
Makes multi-indexed dataframe of subject data Parameters ---------- all_data : list of lists of strings strings are either all presented or all recalled items, in the order of presentation or recall *should also work for presented / recalled ints and floats, if desired Returns ---------- subs_list_of_dfs : multi-indexed dataframe dataframe of subject data (presented or recalled words/items), indexed by subject and list number cell populated by the term presented or recalled in the position indicated by the column number
['Makes', 'multi', '-', 'indexed', 'dataframe', 'of', 'subject', 'data']
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/helpers.py#L11-L43
6,114
memphis-iis/GLUDB
gludb/backends/dynamodb.py
Backend.find_all
def find_all(self, cls): """Required functionality.""" final_results = [] table = self.get_class_table(cls) for db_result in table.scan(): obj = cls.from_data(db_result['value']) final_results.append(obj) return final_results
python
def find_all(self, cls): """Required functionality.""" final_results = [] table = self.get_class_table(cls) for db_result in table.scan(): obj = cls.from_data(db_result['value']) final_results.append(obj) return final_results
['def', 'find_all', '(', 'self', ',', 'cls', ')', ':', 'final_results', '=', '[', ']', 'table', '=', 'self', '.', 'get_class_table', '(', 'cls', ')', 'for', 'db_result', 'in', 'table', '.', 'scan', '(', ')', ':', 'obj', '=', 'cls', '.', 'from_data', '(', 'db_result', '[', "'value'", ']', ')', 'final_results', '.', 'append', '(', 'obj', ')', 'return', 'final_results']
Required functionality.
['Required', 'functionality', '.']
train
https://github.com/memphis-iis/GLUDB/blob/25692528ff6fe8184a3570f61f31f1a90088a388/gludb/backends/dynamodb.py#L153-L161
6,115
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.count_SMS
def count_SMS(self, conditions={}): """ Count all certified sms """ url = self.SMS_COUNT_URL + "?" for key, value in conditions.items(): if key is 'ids': value = ",".join(value) url += '&%s=%s' % (key, value) connection = Connection(self.token) connection.set_url(self.production, url) connection.set_url(self.production, url) return connection.get_request()
python
def count_SMS(self, conditions={}): """ Count all certified sms """ url = self.SMS_COUNT_URL + "?" for key, value in conditions.items(): if key is 'ids': value = ",".join(value) url += '&%s=%s' % (key, value) connection = Connection(self.token) connection.set_url(self.production, url) connection.set_url(self.production, url) return connection.get_request()
['def', 'count_SMS', '(', 'self', ',', 'conditions', '=', '{', '}', ')', ':', 'url', '=', 'self', '.', 'SMS_COUNT_URL', '+', '"?"', 'for', 'key', ',', 'value', 'in', 'conditions', '.', 'items', '(', ')', ':', 'if', 'key', 'is', "'ids'", ':', 'value', '=', '","', '.', 'join', '(', 'value', ')', 'url', '+=', "'&%s=%s'", '%', '(', 'key', ',', 'value', ')', 'connection', '=', 'Connection', '(', 'self', '.', 'token', ')', 'connection', '.', 'set_url', '(', 'self', '.', 'production', ',', 'url', ')', 'connection', '.', 'set_url', '(', 'self', '.', 'production', ',', 'url', ')', 'return', 'connection', '.', 'get_request', '(', ')']
Count all certified sms
['Count', 'all', 'certified', 'sms']
train
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L377-L393
6,116
estnltk/estnltk
estnltk/syntax/syntax_preprocessing.py
tag_subcat_info
def tag_subcat_info( mrf_lines, subcat_rules ): ''' Adds subcategorization information (hashtags) to verbs and adpositions; Argument subcat_rules must be a dict containing subcategorization information, loaded via method load_subcat_info(); Performs word lemma lookups in subcat_rules, and in case of a match, checks word part-of-speech conditions. If the POS conditions match, adds subcategorization information either to a single analysis line, or to multiple analysis lines (depending on the exact conditions in the rule); Returns the input list where verb/adposition analyses have been augmented with available subcategorization information; ''' i = 0 while ( i < len(mrf_lines) ): line = mrf_lines[i] if line.startswith(' '): lemma_match = analysisLemmaPat.match(line) if lemma_match: lemma = lemma_match.group(1) # Find whether there is subcategorization info associated # with the lemma if lemma in subcat_rules: analysis_match = analysisPat.search(line) if not analysis_match: raise Exception(' Could not find analysis from the line:',line) analysis = analysis_match.group(1) for rule in subcat_rules[lemma]: condition, addition = rule.split('>') # Check the condition string; If there are multiple conditions, # all must be satisfied for the rule to fire condition = condition.strip() conditions = condition.split() satisfied1 = [ _check_condition(c, analysis) for c in conditions ] if all( satisfied1 ): # # There can be multiple additions: # 1) additions without '|' must be added to a single analysis line; # 2) additions separated by '|' must be placed on separate analysis # lines; # additions = addition.split('|') j = i # Add new line or lines for a in additions: line_copy = line if i == j else line[:] items_to_add = a.split() for item in items_to_add: if not _check_condition(item, analysis): line_copy = \ re.sub('(//.+\S)\s+//', '\\1 '+item+' //', line_copy) if j == i: # 1) replace the existing line mrf_lines[i] = line_copy else: # 2) add a new line mrf_lines.insert(i, line_copy) j += 1 i = j - 1 # No need to search forward break i += 1 return mrf_lines
python
def tag_subcat_info( mrf_lines, subcat_rules ): ''' Adds subcategorization information (hashtags) to verbs and adpositions; Argument subcat_rules must be a dict containing subcategorization information, loaded via method load_subcat_info(); Performs word lemma lookups in subcat_rules, and in case of a match, checks word part-of-speech conditions. If the POS conditions match, adds subcategorization information either to a single analysis line, or to multiple analysis lines (depending on the exact conditions in the rule); Returns the input list where verb/adposition analyses have been augmented with available subcategorization information; ''' i = 0 while ( i < len(mrf_lines) ): line = mrf_lines[i] if line.startswith(' '): lemma_match = analysisLemmaPat.match(line) if lemma_match: lemma = lemma_match.group(1) # Find whether there is subcategorization info associated # with the lemma if lemma in subcat_rules: analysis_match = analysisPat.search(line) if not analysis_match: raise Exception(' Could not find analysis from the line:',line) analysis = analysis_match.group(1) for rule in subcat_rules[lemma]: condition, addition = rule.split('>') # Check the condition string; If there are multiple conditions, # all must be satisfied for the rule to fire condition = condition.strip() conditions = condition.split() satisfied1 = [ _check_condition(c, analysis) for c in conditions ] if all( satisfied1 ): # # There can be multiple additions: # 1) additions without '|' must be added to a single analysis line; # 2) additions separated by '|' must be placed on separate analysis # lines; # additions = addition.split('|') j = i # Add new line or lines for a in additions: line_copy = line if i == j else line[:] items_to_add = a.split() for item in items_to_add: if not _check_condition(item, analysis): line_copy = \ re.sub('(//.+\S)\s+//', '\\1 '+item+' //', line_copy) if j == i: # 1) replace the existing line mrf_lines[i] = line_copy else: # 2) add a new line mrf_lines.insert(i, line_copy) j += 1 i = j - 1 # No need to search forward break i += 1 return mrf_lines
['def', 'tag_subcat_info', '(', 'mrf_lines', ',', 'subcat_rules', ')', ':', 'i', '=', '0', 'while', '(', 'i', '<', 'len', '(', 'mrf_lines', ')', ')', ':', 'line', '=', 'mrf_lines', '[', 'i', ']', 'if', 'line', '.', 'startswith', '(', "' '", ')', ':', 'lemma_match', '=', 'analysisLemmaPat', '.', 'match', '(', 'line', ')', 'if', 'lemma_match', ':', 'lemma', '=', 'lemma_match', '.', 'group', '(', '1', ')', '# Find whether there is subcategorization info associated ', '# with the lemma', 'if', 'lemma', 'in', 'subcat_rules', ':', 'analysis_match', '=', 'analysisPat', '.', 'search', '(', 'line', ')', 'if', 'not', 'analysis_match', ':', 'raise', 'Exception', '(', "' Could not find analysis from the line:'", ',', 'line', ')', 'analysis', '=', 'analysis_match', '.', 'group', '(', '1', ')', 'for', 'rule', 'in', 'subcat_rules', '[', 'lemma', ']', ':', 'condition', ',', 'addition', '=', 'rule', '.', 'split', '(', "'>'", ')', '# Check the condition string; If there are multiple conditions, ', '# all must be satisfied for the rule to fire', 'condition', '=', 'condition', '.', 'strip', '(', ')', 'conditions', '=', 'condition', '.', 'split', '(', ')', 'satisfied1', '=', '[', '_check_condition', '(', 'c', ',', 'analysis', ')', 'for', 'c', 'in', 'conditions', ']', 'if', 'all', '(', 'satisfied1', ')', ':', '#', '# There can be multiple additions:', "# 1) additions without '|' must be added to a single analysis line;", "# 2) additions separated by '|' must be placed on separate analysis ", '# lines;', '#', 'additions', '=', 'addition', '.', 'split', '(', "'|'", ')', 'j', '=', 'i', '# Add new line or lines', 'for', 'a', 'in', 'additions', ':', 'line_copy', '=', 'line', 'if', 'i', '==', 'j', 'else', 'line', '[', ':', ']', 'items_to_add', '=', 'a', '.', 'split', '(', ')', 'for', 'item', 'in', 'items_to_add', ':', 'if', 'not', '_check_condition', '(', 'item', ',', 'analysis', ')', ':', 'line_copy', '=', 're', '.', 'sub', '(', "'(//.+\\S)\\s+//'", ',', "'\\\\1 '", '+', 'item', '+', "' //'", ',', 'line_copy', ')', 'if', 'j', '==', 'i', ':', '# 1) replace the existing line', 'mrf_lines', '[', 'i', ']', '=', 'line_copy', 'else', ':', '# 2) add a new line ', 'mrf_lines', '.', 'insert', '(', 'i', ',', 'line_copy', ')', 'j', '+=', '1', 'i', '=', 'j', '-', '1', '# No need to search forward', 'break', 'i', '+=', '1', 'return', 'mrf_lines']
Adds subcategorization information (hashtags) to verbs and adpositions; Argument subcat_rules must be a dict containing subcategorization information, loaded via method load_subcat_info(); Performs word lemma lookups in subcat_rules, and in case of a match, checks word part-of-speech conditions. If the POS conditions match, adds subcategorization information either to a single analysis line, or to multiple analysis lines (depending on the exact conditions in the rule); Returns the input list where verb/adposition analyses have been augmented with available subcategorization information;
['Adds', 'subcategorization', 'information', '(', 'hashtags', ')', 'to', 'verbs', 'and', 'adpositions', ';', 'Argument', 'subcat_rules', 'must', 'be', 'a', 'dict', 'containing', 'subcategorization', 'information', 'loaded', 'via', 'method', 'load_subcat_info', '()', ';']
train
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/syntax_preprocessing.py#L721-L784
6,117
DataBiosphere/toil
src/toil/wdl/wdl_analysis.py
AnalyzeWDL.parse_declaration_expressn_memberaccess
def parse_declaration_expressn_memberaccess(self, lhsAST, rhsAST, es): """ Instead of "Class.variablename", use "Class.rv('variablename')". :param lhsAST: :param rhsAST: :param es: :return: """ if isinstance(lhsAST, wdl_parser.Terminal): es = es + lhsAST.source_string elif isinstance(lhsAST, wdl_parser.Ast): raise NotImplementedError elif isinstance(lhsAST, wdl_parser.AstList): raise NotImplementedError es = es + '_' if isinstance(rhsAST, wdl_parser.Terminal): es = es + rhsAST.source_string elif isinstance(rhsAST, wdl_parser.Ast): raise NotImplementedError elif isinstance(rhsAST, wdl_parser.AstList): raise NotImplementedError return es
python
def parse_declaration_expressn_memberaccess(self, lhsAST, rhsAST, es): """ Instead of "Class.variablename", use "Class.rv('variablename')". :param lhsAST: :param rhsAST: :param es: :return: """ if isinstance(lhsAST, wdl_parser.Terminal): es = es + lhsAST.source_string elif isinstance(lhsAST, wdl_parser.Ast): raise NotImplementedError elif isinstance(lhsAST, wdl_parser.AstList): raise NotImplementedError es = es + '_' if isinstance(rhsAST, wdl_parser.Terminal): es = es + rhsAST.source_string elif isinstance(rhsAST, wdl_parser.Ast): raise NotImplementedError elif isinstance(rhsAST, wdl_parser.AstList): raise NotImplementedError return es
['def', 'parse_declaration_expressn_memberaccess', '(', 'self', ',', 'lhsAST', ',', 'rhsAST', ',', 'es', ')', ':', 'if', 'isinstance', '(', 'lhsAST', ',', 'wdl_parser', '.', 'Terminal', ')', ':', 'es', '=', 'es', '+', 'lhsAST', '.', 'source_string', 'elif', 'isinstance', '(', 'lhsAST', ',', 'wdl_parser', '.', 'Ast', ')', ':', 'raise', 'NotImplementedError', 'elif', 'isinstance', '(', 'lhsAST', ',', 'wdl_parser', '.', 'AstList', ')', ':', 'raise', 'NotImplementedError', 'es', '=', 'es', '+', "'_'", 'if', 'isinstance', '(', 'rhsAST', ',', 'wdl_parser', '.', 'Terminal', ')', ':', 'es', '=', 'es', '+', 'rhsAST', '.', 'source_string', 'elif', 'isinstance', '(', 'rhsAST', ',', 'wdl_parser', '.', 'Ast', ')', ':', 'raise', 'NotImplementedError', 'elif', 'isinstance', '(', 'rhsAST', ',', 'wdl_parser', '.', 'AstList', ')', ':', 'raise', 'NotImplementedError', 'return', 'es']
Instead of "Class.variablename", use "Class.rv('variablename')". :param lhsAST: :param rhsAST: :param es: :return:
['Instead', 'of', 'Class', '.', 'variablename', 'use', 'Class', '.', 'rv', '(', 'variablename', ')', '.']
train
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/wdl/wdl_analysis.py#L772-L797
6,118
daviddrysdale/python-phonenumbers
python/phonenumbers/phonenumberutil.py
supported_types_for_non_geo_entity
def supported_types_for_non_geo_entity(country_code): """Returns the types for a country-code belonging to a non-geographical entity which the library has metadata for. Will not include FIXED_LINE_OR_MOBILE (if numbers for this non-geographical entity could be classified as FIXED_LINE_OR_MOBILE, both FIXED_LINE and MOBILE would be present) and UNKNOWN. No types will be returned for country calling codes that do not map to a known non-geographical entity. """ metadata = PhoneMetadata.metadata_for_nongeo_region(country_code, None) if metadata is None: return set() return _supported_types_for_metadata(metadata)
python
def supported_types_for_non_geo_entity(country_code): """Returns the types for a country-code belonging to a non-geographical entity which the library has metadata for. Will not include FIXED_LINE_OR_MOBILE (if numbers for this non-geographical entity could be classified as FIXED_LINE_OR_MOBILE, both FIXED_LINE and MOBILE would be present) and UNKNOWN. No types will be returned for country calling codes that do not map to a known non-geographical entity. """ metadata = PhoneMetadata.metadata_for_nongeo_region(country_code, None) if metadata is None: return set() return _supported_types_for_metadata(metadata)
['def', 'supported_types_for_non_geo_entity', '(', 'country_code', ')', ':', 'metadata', '=', 'PhoneMetadata', '.', 'metadata_for_nongeo_region', '(', 'country_code', ',', 'None', ')', 'if', 'metadata', 'is', 'None', ':', 'return', 'set', '(', ')', 'return', '_supported_types_for_metadata', '(', 'metadata', ')']
Returns the types for a country-code belonging to a non-geographical entity which the library has metadata for. Will not include FIXED_LINE_OR_MOBILE (if numbers for this non-geographical entity could be classified as FIXED_LINE_OR_MOBILE, both FIXED_LINE and MOBILE would be present) and UNKNOWN. No types will be returned for country calling codes that do not map to a known non-geographical entity.
['Returns', 'the', 'types', 'for', 'a', 'country', '-', 'code', 'belonging', 'to', 'a', 'non', '-', 'geographical', 'entity', 'which', 'the', 'library', 'has', 'metadata', 'for', '.', 'Will', 'not', 'include', 'FIXED_LINE_OR_MOBILE', '(', 'if', 'numbers', 'for', 'this', 'non', '-', 'geographical', 'entity', 'could', 'be', 'classified', 'as', 'FIXED_LINE_OR_MOBILE', 'both', 'FIXED_LINE', 'and', 'MOBILE', 'would', 'be', 'present', ')', 'and', 'UNKNOWN', '.']
train
https://github.com/daviddrysdale/python-phonenumbers/blob/9cc5bb4ab5e661e70789b4c64bf7a9383c7bdc20/python/phonenumbers/phonenumberutil.py#L877-L890
6,119
christian-oudard/htmltreediff
htmltreediff/changes.py
merge_adjacent
def merge_adjacent(dom, tag_name): """ Merge all adjacent tags with the specified tag name. Return the number of merges performed. """ for node in dom.getElementsByTagName(tag_name): prev_sib = node.previousSibling if prev_sib and prev_sib.nodeName == node.tagName: for child in list(node.childNodes): prev_sib.appendChild(child) remove_node(node)
python
def merge_adjacent(dom, tag_name): """ Merge all adjacent tags with the specified tag name. Return the number of merges performed. """ for node in dom.getElementsByTagName(tag_name): prev_sib = node.previousSibling if prev_sib and prev_sib.nodeName == node.tagName: for child in list(node.childNodes): prev_sib.appendChild(child) remove_node(node)
['def', 'merge_adjacent', '(', 'dom', ',', 'tag_name', ')', ':', 'for', 'node', 'in', 'dom', '.', 'getElementsByTagName', '(', 'tag_name', ')', ':', 'prev_sib', '=', 'node', '.', 'previousSibling', 'if', 'prev_sib', 'and', 'prev_sib', '.', 'nodeName', '==', 'node', '.', 'tagName', ':', 'for', 'child', 'in', 'list', '(', 'node', '.', 'childNodes', ')', ':', 'prev_sib', '.', 'appendChild', '(', 'child', ')', 'remove_node', '(', 'node', ')']
Merge all adjacent tags with the specified tag name. Return the number of merges performed.
['Merge', 'all', 'adjacent', 'tags', 'with', 'the', 'specified', 'tag', 'name', '.', 'Return', 'the', 'number', 'of', 'merges', 'performed', '.']
train
https://github.com/christian-oudard/htmltreediff/blob/0e28f56492ae7e69bb0f74f9a79a8909a5ad588d/htmltreediff/changes.py#L104-L114
6,120
codelv/enaml-native
src/enamlnative/core/dev.py
DevServer.render_files
def render_files(self, root=None): """ Render the file path as accordions """ if root is None: tmp = os.environ.get('TMP') root = sys.path[1 if tmp and tmp in sys.path else 0] items = [] for filename in os.listdir(root): # for subdirname in dirnames: # path = os.path.join(dirname, subdirname) # items.append(FOLDER_TMPL.format( # name=subdirname, # id=path, # items=self.render_files(path) # )) #for filename in filenames: f,ext = os.path.splitext(filename) if ext in ['.py', '.enaml']: items.append(FILE_TMPL.format( name=filename, id=filename )) return "".join(items)
python
def render_files(self, root=None): """ Render the file path as accordions """ if root is None: tmp = os.environ.get('TMP') root = sys.path[1 if tmp and tmp in sys.path else 0] items = [] for filename in os.listdir(root): # for subdirname in dirnames: # path = os.path.join(dirname, subdirname) # items.append(FOLDER_TMPL.format( # name=subdirname, # id=path, # items=self.render_files(path) # )) #for filename in filenames: f,ext = os.path.splitext(filename) if ext in ['.py', '.enaml']: items.append(FILE_TMPL.format( name=filename, id=filename )) return "".join(items)
['def', 'render_files', '(', 'self', ',', 'root', '=', 'None', ')', ':', 'if', 'root', 'is', 'None', ':', 'tmp', '=', 'os', '.', 'environ', '.', 'get', '(', "'TMP'", ')', 'root', '=', 'sys', '.', 'path', '[', '1', 'if', 'tmp', 'and', 'tmp', 'in', 'sys', '.', 'path', 'else', '0', ']', 'items', '=', '[', ']', 'for', 'filename', 'in', 'os', '.', 'listdir', '(', 'root', ')', ':', '# for subdirname in dirnames:', '# path = os.path.join(dirname, subdirname)', '# items.append(FOLDER_TMPL.format(', '# name=subdirname,', '# id=path,', '# items=self.render_files(path)', '# ))', '#for filename in filenames:', 'f', ',', 'ext', '=', 'os', '.', 'path', '.', 'splitext', '(', 'filename', ')', 'if', 'ext', 'in', '[', "'.py'", ',', "'.enaml'", ']', ':', 'items', '.', 'append', '(', 'FILE_TMPL', '.', 'format', '(', 'name', '=', 'filename', ',', 'id', '=', 'filename', ')', ')', 'return', '""', '.', 'join', '(', 'items', ')']
Render the file path as accordions
['Render', 'the', 'file', 'path', 'as', 'accordions']
train
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/core/dev.py#L491-L514
6,121
aliyun/aliyun-log-python-sdk
aliyun/log/logclient.py
LogClient.get_machine_group_applied_configs
def get_machine_group_applied_configs(self, project_name, group_name): """ get the logtail config names applied in a machine group Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type group_name: string :param group_name: the group name list :return: GetMachineGroupAppliedConfigResponse :raise: LogException """ headers = {} params = {} resource = "/machinegroups/" + group_name + "/configs" (resp, header) = self._send("GET", project_name, None, resource, params, headers) return GetMachineGroupAppliedConfigResponse(resp, header)
python
def get_machine_group_applied_configs(self, project_name, group_name): """ get the logtail config names applied in a machine group Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type group_name: string :param group_name: the group name list :return: GetMachineGroupAppliedConfigResponse :raise: LogException """ headers = {} params = {} resource = "/machinegroups/" + group_name + "/configs" (resp, header) = self._send("GET", project_name, None, resource, params, headers) return GetMachineGroupAppliedConfigResponse(resp, header)
['def', 'get_machine_group_applied_configs', '(', 'self', ',', 'project_name', ',', 'group_name', ')', ':', 'headers', '=', '{', '}', 'params', '=', '{', '}', 'resource', '=', '"/machinegroups/"', '+', 'group_name', '+', '"/configs"', '(', 'resp', ',', 'header', ')', '=', 'self', '.', '_send', '(', '"GET"', ',', 'project_name', ',', 'None', ',', 'resource', ',', 'params', ',', 'headers', ')', 'return', 'GetMachineGroupAppliedConfigResponse', '(', 'resp', ',', 'header', ')']
get the logtail config names applied in a machine group Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type group_name: string :param group_name: the group name list :return: GetMachineGroupAppliedConfigResponse :raise: LogException
['get', 'the', 'logtail', 'config', 'names', 'applied', 'in', 'a', 'machine', 'group', 'Unsuccessful', 'opertaion', 'will', 'cause', 'an', 'LogException', '.', ':', 'type', 'project_name', ':', 'string', ':', 'param', 'project_name', ':', 'the', 'Project', 'name', ':', 'type', 'group_name', ':', 'string', ':', 'param', 'group_name', ':', 'the', 'group', 'name', 'list', ':', 'return', ':', 'GetMachineGroupAppliedConfigResponse', ':', 'raise', ':', 'LogException']
train
https://github.com/aliyun/aliyun-log-python-sdk/blob/ac383db0a16abf1e5ef7df36074374184b43516e/aliyun/log/logclient.py#L1821-L1840
6,122
happyleavesaoc/python-firetv
firetv/__main__.py
device_connect
def device_connect(device_id): """ Force a connection attempt via HTTP GET. """ success = False if device_id in devices: devices[device_id].connect() success = True return jsonify(success=success)
python
def device_connect(device_id): """ Force a connection attempt via HTTP GET. """ success = False if device_id in devices: devices[device_id].connect() success = True return jsonify(success=success)
['def', 'device_connect', '(', 'device_id', ')', ':', 'success', '=', 'False', 'if', 'device_id', 'in', 'devices', ':', 'devices', '[', 'device_id', ']', '.', 'connect', '(', ')', 'success', '=', 'True', 'return', 'jsonify', '(', 'success', '=', 'success', ')']
Force a connection attempt via HTTP GET.
['Force', 'a', 'connection', 'attempt', 'via', 'HTTP', 'GET', '.']
train
https://github.com/happyleavesaoc/python-firetv/blob/3dd953376c0d5af502e775ae14ed0afe03224781/firetv/__main__.py#L220-L226
6,123
tapilab/brandelion
brandelion/cli/analyze.py
proportion_merge
def proportion_merge(brands, exemplars): """ Return the proportion of a brand's followers who also follower an exemplar. We merge all exemplar followers into one big pseudo-account.""" scores = {} exemplar_followers = set() for followers in exemplars.values(): exemplar_followers |= followers for brand, followers in brands: scores[brand] = _proportion(followers, exemplar_followers) return scores
python
def proportion_merge(brands, exemplars): """ Return the proportion of a brand's followers who also follower an exemplar. We merge all exemplar followers into one big pseudo-account.""" scores = {} exemplar_followers = set() for followers in exemplars.values(): exemplar_followers |= followers for brand, followers in brands: scores[brand] = _proportion(followers, exemplar_followers) return scores
['def', 'proportion_merge', '(', 'brands', ',', 'exemplars', ')', ':', 'scores', '=', '{', '}', 'exemplar_followers', '=', 'set', '(', ')', 'for', 'followers', 'in', 'exemplars', '.', 'values', '(', ')', ':', 'exemplar_followers', '|=', 'followers', 'for', 'brand', ',', 'followers', 'in', 'brands', ':', 'scores', '[', 'brand', ']', '=', '_proportion', '(', 'followers', ',', 'exemplar_followers', ')', 'return', 'scores']
Return the proportion of a brand's followers who also follower an exemplar. We merge all exemplar followers into one big pseudo-account.
['Return', 'the', 'proportion', 'of', 'a', 'brand', 's', 'followers', 'who', 'also', 'follower', 'an', 'exemplar', '.', 'We', 'merge', 'all', 'exemplar', 'followers', 'into', 'one', 'big', 'pseudo', '-', 'account', '.']
train
https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L298-L308
6,124
inasafe/inasafe
safe/impact_function/provenance_utilities.py
get_analysis_question
def get_analysis_question(hazard, exposure): """Construct analysis question based on hazard and exposure. :param hazard: A hazard definition. :type hazard: dict :param exposure: An exposure definition. :type exposure: dict :returns: Analysis question based on reporting standards. :rtype: str """ # First we look for a translated hardcoded question. question = specific_analysis_question(hazard, exposure) if question: return question if hazard == hazard_generic: # Secondly, if the hazard is generic, we don't need the hazard. question = tr( 'In each of the hazard zones {exposure_measure} {exposure_name} ' 'might be affected?').format( exposure_measure=exposure['measure_question'], exposure_name=exposure['name']) return question # Then, we fallback on a generated string on the fly. question = tr( 'In the event of a {hazard_name}, {exposure_measure} {exposure_name} ' 'might be affected?').format( hazard_name=hazard['name'], exposure_measure=exposure['measure_question'], exposure_name=exposure['name']) return question
python
def get_analysis_question(hazard, exposure): """Construct analysis question based on hazard and exposure. :param hazard: A hazard definition. :type hazard: dict :param exposure: An exposure definition. :type exposure: dict :returns: Analysis question based on reporting standards. :rtype: str """ # First we look for a translated hardcoded question. question = specific_analysis_question(hazard, exposure) if question: return question if hazard == hazard_generic: # Secondly, if the hazard is generic, we don't need the hazard. question = tr( 'In each of the hazard zones {exposure_measure} {exposure_name} ' 'might be affected?').format( exposure_measure=exposure['measure_question'], exposure_name=exposure['name']) return question # Then, we fallback on a generated string on the fly. question = tr( 'In the event of a {hazard_name}, {exposure_measure} {exposure_name} ' 'might be affected?').format( hazard_name=hazard['name'], exposure_measure=exposure['measure_question'], exposure_name=exposure['name']) return question
['def', 'get_analysis_question', '(', 'hazard', ',', 'exposure', ')', ':', '# First we look for a translated hardcoded question.', 'question', '=', 'specific_analysis_question', '(', 'hazard', ',', 'exposure', ')', 'if', 'question', ':', 'return', 'question', 'if', 'hazard', '==', 'hazard_generic', ':', "# Secondly, if the hazard is generic, we don't need the hazard.", 'question', '=', 'tr', '(', "'In each of the hazard zones {exposure_measure} {exposure_name} '", "'might be affected?'", ')', '.', 'format', '(', 'exposure_measure', '=', 'exposure', '[', "'measure_question'", ']', ',', 'exposure_name', '=', 'exposure', '[', "'name'", ']', ')', 'return', 'question', '# Then, we fallback on a generated string on the fly.', 'question', '=', 'tr', '(', "'In the event of a {hazard_name}, {exposure_measure} {exposure_name} '", "'might be affected?'", ')', '.', 'format', '(', 'hazard_name', '=', 'hazard', '[', "'name'", ']', ',', 'exposure_measure', '=', 'exposure', '[', "'measure_question'", ']', ',', 'exposure_name', '=', 'exposure', '[', "'name'", ']', ')', 'return', 'question']
Construct analysis question based on hazard and exposure. :param hazard: A hazard definition. :type hazard: dict :param exposure: An exposure definition. :type exposure: dict :returns: Analysis question based on reporting standards. :rtype: str
['Construct', 'analysis', 'question', 'based', 'on', 'hazard', 'and', 'exposure', '.']
train
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/impact_function/provenance_utilities.py#L48-L81
6,125
valohai/valohai-yaml
valohai_yaml/parsing.py
parse
def parse(yaml, validate=True): """ Parse the given YAML data into a `Config` object, optionally validating it first. :param yaml: YAML data (either a string, a stream, or pre-parsed Python dict/list) :type yaml: list|dict|str|file :param validate: Whether to validate the data before attempting to parse it. :type validate: bool :return: Config object :rtype: valohai_yaml.objs.Config """ data = read_yaml(yaml) if validate: # pragma: no branch from .validation import validate validate(data, raise_exc=True) return Config.parse(data)
python
def parse(yaml, validate=True): """ Parse the given YAML data into a `Config` object, optionally validating it first. :param yaml: YAML data (either a string, a stream, or pre-parsed Python dict/list) :type yaml: list|dict|str|file :param validate: Whether to validate the data before attempting to parse it. :type validate: bool :return: Config object :rtype: valohai_yaml.objs.Config """ data = read_yaml(yaml) if validate: # pragma: no branch from .validation import validate validate(data, raise_exc=True) return Config.parse(data)
['def', 'parse', '(', 'yaml', ',', 'validate', '=', 'True', ')', ':', 'data', '=', 'read_yaml', '(', 'yaml', ')', 'if', 'validate', ':', '# pragma: no branch', 'from', '.', 'validation', 'import', 'validate', 'validate', '(', 'data', ',', 'raise_exc', '=', 'True', ')', 'return', 'Config', '.', 'parse', '(', 'data', ')']
Parse the given YAML data into a `Config` object, optionally validating it first. :param yaml: YAML data (either a string, a stream, or pre-parsed Python dict/list) :type yaml: list|dict|str|file :param validate: Whether to validate the data before attempting to parse it. :type validate: bool :return: Config object :rtype: valohai_yaml.objs.Config
['Parse', 'the', 'given', 'YAML', 'data', 'into', 'a', 'Config', 'object', 'optionally', 'validating', 'it', 'first', '.']
train
https://github.com/valohai/valohai-yaml/blob/3d2e92381633d84cdba039f6905df34c9633a2e1/valohai_yaml/parsing.py#L6-L21
6,126
serhatbolsu/robotframework-appiumlibrary
AppiumLibrary/keywords/_element.py
_ElementKeywords.input_value
def input_value(self, locator, text): """Sets the given value into text field identified by `locator`. This is an IOS only keyword, input value makes use of set_value See `introduction` for details about locating elements. """ self._info("Setting text '%s' into text field '%s'" % (text, locator)) self._element_input_value_by_locator(locator, text)
python
def input_value(self, locator, text): """Sets the given value into text field identified by `locator`. This is an IOS only keyword, input value makes use of set_value See `introduction` for details about locating elements. """ self._info("Setting text '%s' into text field '%s'" % (text, locator)) self._element_input_value_by_locator(locator, text)
['def', 'input_value', '(', 'self', ',', 'locator', ',', 'text', ')', ':', 'self', '.', '_info', '(', '"Setting text \'%s\' into text field \'%s\'"', '%', '(', 'text', ',', 'locator', ')', ')', 'self', '.', '_element_input_value_by_locator', '(', 'locator', ',', 'text', ')']
Sets the given value into text field identified by `locator`. This is an IOS only keyword, input value makes use of set_value See `introduction` for details about locating elements.
['Sets', 'the', 'given', 'value', 'into', 'text', 'field', 'identified', 'by', 'locator', '.', 'This', 'is', 'an', 'IOS', 'only', 'keyword', 'input', 'value', 'makes', 'use', 'of', 'set_value', 'See', 'introduction', 'for', 'details', 'about', 'locating', 'elements', '.']
train
https://github.com/serhatbolsu/robotframework-appiumlibrary/blob/91c808cf0602af6be8135ac529fa488fded04a85/AppiumLibrary/keywords/_element.py#L82-L88
6,127
honzamach/pynspect
pynspect/traversers.py
BaseFilteringTreeTraverser.evaluate_binop_math
def evaluate_binop_math(self, operation, left, right, **kwargs): """ Evaluate given mathematical binary operation with given operands. """ if not operation in self.binops_math: raise ValueError("Invalid math binary operation '{}'".format(operation)) if left is None or right is None: return None if not isinstance(left, (list, ListIP)): left = [left] if not isinstance(right, (list, ListIP)): right = [right] if not left or not right: return None try: vect = self._calculate_vector(operation, left, right) if len(vect) > 1: return vect return vect[0] except: return None
python
def evaluate_binop_math(self, operation, left, right, **kwargs): """ Evaluate given mathematical binary operation with given operands. """ if not operation in self.binops_math: raise ValueError("Invalid math binary operation '{}'".format(operation)) if left is None or right is None: return None if not isinstance(left, (list, ListIP)): left = [left] if not isinstance(right, (list, ListIP)): right = [right] if not left or not right: return None try: vect = self._calculate_vector(operation, left, right) if len(vect) > 1: return vect return vect[0] except: return None
['def', 'evaluate_binop_math', '(', 'self', ',', 'operation', ',', 'left', ',', 'right', ',', '*', '*', 'kwargs', ')', ':', 'if', 'not', 'operation', 'in', 'self', '.', 'binops_math', ':', 'raise', 'ValueError', '(', '"Invalid math binary operation \'{}\'"', '.', 'format', '(', 'operation', ')', ')', 'if', 'left', 'is', 'None', 'or', 'right', 'is', 'None', ':', 'return', 'None', 'if', 'not', 'isinstance', '(', 'left', ',', '(', 'list', ',', 'ListIP', ')', ')', ':', 'left', '=', '[', 'left', ']', 'if', 'not', 'isinstance', '(', 'right', ',', '(', 'list', ',', 'ListIP', ')', ')', ':', 'right', '=', '[', 'right', ']', 'if', 'not', 'left', 'or', 'not', 'right', ':', 'return', 'None', 'try', ':', 'vect', '=', 'self', '.', '_calculate_vector', '(', 'operation', ',', 'left', ',', 'right', ')', 'if', 'len', '(', 'vect', ')', '>', '1', ':', 'return', 'vect', 'return', 'vect', '[', '0', ']', 'except', ':', 'return', 'None']
Evaluate given mathematical binary operation with given operands.
['Evaluate', 'given', 'mathematical', 'binary', 'operation', 'with', 'given', 'operands', '.']
train
https://github.com/honzamach/pynspect/blob/0582dcc1f7aafe50e25a21c792ea1b3367ea5881/pynspect/traversers.py#L710-L730
6,128
Yubico/yubikey-manager
ykman/cli/oath.py
code
def code(ctx, show_hidden, query, single): """ Generate codes. Generate codes from credentials stored on your YubiKey. Provide a query string to match one or more specific credentials. Touch and HOTP credentials require a single match to be triggered. """ ensure_validated(ctx) controller = ctx.obj['controller'] creds = [(cr, c) for (cr, c) in controller.calculate_all() if show_hidden or not cr.is_hidden ] creds = _search(creds, query) if len(creds) == 1: cred, code = creds[0] if cred.touch: prompt_for_touch() try: if cred.oath_type == OATH_TYPE.HOTP: # HOTP might require touch, we don't know. # Assume yes after 500ms. hotp_touch_timer = Timer(0.500, prompt_for_touch) hotp_touch_timer.start() creds = [(cred, controller.calculate(cred))] hotp_touch_timer.cancel() elif code is None: creds = [(cred, controller.calculate(cred))] except APDUError as e: if e.sw == SW.SECURITY_CONDITION_NOT_SATISFIED: ctx.fail('Touch credential timed out!') elif single: _error_multiple_hits(ctx, [cr for cr, c in creds]) if single: click.echo(creds[0][1].value) else: creds.sort() outputs = [ ( cr.printable_key, c.value if c else '[Touch Credential]' if cr.touch else '[HOTP Credential]' if cr.oath_type == OATH_TYPE.HOTP else '' ) for (cr, c) in creds ] longest_name = max(len(n) for (n, c) in outputs) if outputs else 0 longest_code = max(len(c) for (n, c) in outputs) if outputs else 0 format_str = u'{:<%d} {:>%d}' % (longest_name, longest_code) for name, result in outputs: click.echo(format_str.format(name, result))
python
def code(ctx, show_hidden, query, single): """ Generate codes. Generate codes from credentials stored on your YubiKey. Provide a query string to match one or more specific credentials. Touch and HOTP credentials require a single match to be triggered. """ ensure_validated(ctx) controller = ctx.obj['controller'] creds = [(cr, c) for (cr, c) in controller.calculate_all() if show_hidden or not cr.is_hidden ] creds = _search(creds, query) if len(creds) == 1: cred, code = creds[0] if cred.touch: prompt_for_touch() try: if cred.oath_type == OATH_TYPE.HOTP: # HOTP might require touch, we don't know. # Assume yes after 500ms. hotp_touch_timer = Timer(0.500, prompt_for_touch) hotp_touch_timer.start() creds = [(cred, controller.calculate(cred))] hotp_touch_timer.cancel() elif code is None: creds = [(cred, controller.calculate(cred))] except APDUError as e: if e.sw == SW.SECURITY_CONDITION_NOT_SATISFIED: ctx.fail('Touch credential timed out!') elif single: _error_multiple_hits(ctx, [cr for cr, c in creds]) if single: click.echo(creds[0][1].value) else: creds.sort() outputs = [ ( cr.printable_key, c.value if c else '[Touch Credential]' if cr.touch else '[HOTP Credential]' if cr.oath_type == OATH_TYPE.HOTP else '' ) for (cr, c) in creds ] longest_name = max(len(n) for (n, c) in outputs) if outputs else 0 longest_code = max(len(c) for (n, c) in outputs) if outputs else 0 format_str = u'{:<%d} {:>%d}' % (longest_name, longest_code) for name, result in outputs: click.echo(format_str.format(name, result))
['def', 'code', '(', 'ctx', ',', 'show_hidden', ',', 'query', ',', 'single', ')', ':', 'ensure_validated', '(', 'ctx', ')', 'controller', '=', 'ctx', '.', 'obj', '[', "'controller'", ']', 'creds', '=', '[', '(', 'cr', ',', 'c', ')', 'for', '(', 'cr', ',', 'c', ')', 'in', 'controller', '.', 'calculate_all', '(', ')', 'if', 'show_hidden', 'or', 'not', 'cr', '.', 'is_hidden', ']', 'creds', '=', '_search', '(', 'creds', ',', 'query', ')', 'if', 'len', '(', 'creds', ')', '==', '1', ':', 'cred', ',', 'code', '=', 'creds', '[', '0', ']', 'if', 'cred', '.', 'touch', ':', 'prompt_for_touch', '(', ')', 'try', ':', 'if', 'cred', '.', 'oath_type', '==', 'OATH_TYPE', '.', 'HOTP', ':', "# HOTP might require touch, we don't know.", '# Assume yes after 500ms.', 'hotp_touch_timer', '=', 'Timer', '(', '0.500', ',', 'prompt_for_touch', ')', 'hotp_touch_timer', '.', 'start', '(', ')', 'creds', '=', '[', '(', 'cred', ',', 'controller', '.', 'calculate', '(', 'cred', ')', ')', ']', 'hotp_touch_timer', '.', 'cancel', '(', ')', 'elif', 'code', 'is', 'None', ':', 'creds', '=', '[', '(', 'cred', ',', 'controller', '.', 'calculate', '(', 'cred', ')', ')', ']', 'except', 'APDUError', 'as', 'e', ':', 'if', 'e', '.', 'sw', '==', 'SW', '.', 'SECURITY_CONDITION_NOT_SATISFIED', ':', 'ctx', '.', 'fail', '(', "'Touch credential timed out!'", ')', 'elif', 'single', ':', '_error_multiple_hits', '(', 'ctx', ',', '[', 'cr', 'for', 'cr', ',', 'c', 'in', 'creds', ']', ')', 'if', 'single', ':', 'click', '.', 'echo', '(', 'creds', '[', '0', ']', '[', '1', ']', '.', 'value', ')', 'else', ':', 'creds', '.', 'sort', '(', ')', 'outputs', '=', '[', '(', 'cr', '.', 'printable_key', ',', 'c', '.', 'value', 'if', 'c', 'else', "'[Touch Credential]'", 'if', 'cr', '.', 'touch', 'else', "'[HOTP Credential]'", 'if', 'cr', '.', 'oath_type', '==', 'OATH_TYPE', '.', 'HOTP', 'else', "''", ')', 'for', '(', 'cr', ',', 'c', ')', 'in', 'creds', ']', 'longest_name', '=', 'max', '(', 'len', '(', 'n', ')', 'for', '(', 'n', ',', 'c', ')', 'in', 'outputs', ')', 'if', 'outputs', 'else', '0', 'longest_code', '=', 'max', '(', 'len', '(', 'c', ')', 'for', '(', 'n', ',', 'c', ')', 'in', 'outputs', ')', 'if', 'outputs', 'else', '0', 'format_str', '=', "u'{:<%d} {:>%d}'", '%', '(', 'longest_name', ',', 'longest_code', ')', 'for', 'name', ',', 'result', 'in', 'outputs', ':', 'click', '.', 'echo', '(', 'format_str', '.', 'format', '(', 'name', ',', 'result', ')', ')']
Generate codes. Generate codes from credentials stored on your YubiKey. Provide a query string to match one or more specific credentials. Touch and HOTP credentials require a single match to be triggered.
['Generate', 'codes', '.']
train
https://github.com/Yubico/yubikey-manager/blob/3ac27bc59ae76a59db9d09a530494add2edbbabf/ykman/cli/oath.py#L335-L395
6,129
PGower/PyCanvas
pycanvas/apis/courses.py
CoursesAPI.update_course_settings
def update_course_settings(self, course_id, allow_student_discussion_editing=None, allow_student_discussion_topics=None, allow_student_forum_attachments=None, allow_student_organized_groups=None, hide_distribution_graphs=None, hide_final_grades=None, home_page_announcement_limit=None, lock_all_announcements=None, restrict_student_future_view=None, restrict_student_past_view=None, show_announcements_on_home_page=None): """ Update course settings. Can update the following course settings: """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - allow_student_discussion_topics """Let students create discussion topics""" if allow_student_discussion_topics is not None: data["allow_student_discussion_topics"] = allow_student_discussion_topics # OPTIONAL - allow_student_forum_attachments """Let students attach files to discussions""" if allow_student_forum_attachments is not None: data["allow_student_forum_attachments"] = allow_student_forum_attachments # OPTIONAL - allow_student_discussion_editing """Let students edit or delete their own discussion posts""" if allow_student_discussion_editing is not None: data["allow_student_discussion_editing"] = allow_student_discussion_editing # OPTIONAL - allow_student_organized_groups """Let students organize their own groups""" if allow_student_organized_groups is not None: data["allow_student_organized_groups"] = allow_student_organized_groups # OPTIONAL - hide_final_grades """Hide totals in student grades summary""" if hide_final_grades is not None: data["hide_final_grades"] = hide_final_grades # OPTIONAL - hide_distribution_graphs """Hide grade distribution graphs from students""" if hide_distribution_graphs is not None: data["hide_distribution_graphs"] = hide_distribution_graphs # OPTIONAL - lock_all_announcements """Disable comments on announcements""" if lock_all_announcements is not None: data["lock_all_announcements"] = lock_all_announcements # OPTIONAL - restrict_student_past_view """Restrict students from viewing courses after end date""" if restrict_student_past_view is not None: data["restrict_student_past_view"] = restrict_student_past_view # OPTIONAL - restrict_student_future_view """Restrict students from viewing courses before start date""" if restrict_student_future_view is not None: data["restrict_student_future_view"] = restrict_student_future_view # OPTIONAL - show_announcements_on_home_page """Show the most recent announcements on the Course home page (if a Wiki, defaults to five announcements, configurable via home_page_announcement_limit)""" if show_announcements_on_home_page is not None: data["show_announcements_on_home_page"] = show_announcements_on_home_page # OPTIONAL - home_page_announcement_limit """Limit the number of announcements on the home page if enabled via show_announcements_on_home_page""" if home_page_announcement_limit is not None: data["home_page_announcement_limit"] = home_page_announcement_limit self.logger.debug("PUT /api/v1/courses/{course_id}/settings with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/courses/{course_id}/settings".format(**path), data=data, params=params, no_data=True)
python
def update_course_settings(self, course_id, allow_student_discussion_editing=None, allow_student_discussion_topics=None, allow_student_forum_attachments=None, allow_student_organized_groups=None, hide_distribution_graphs=None, hide_final_grades=None, home_page_announcement_limit=None, lock_all_announcements=None, restrict_student_future_view=None, restrict_student_past_view=None, show_announcements_on_home_page=None): """ Update course settings. Can update the following course settings: """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - allow_student_discussion_topics """Let students create discussion topics""" if allow_student_discussion_topics is not None: data["allow_student_discussion_topics"] = allow_student_discussion_topics # OPTIONAL - allow_student_forum_attachments """Let students attach files to discussions""" if allow_student_forum_attachments is not None: data["allow_student_forum_attachments"] = allow_student_forum_attachments # OPTIONAL - allow_student_discussion_editing """Let students edit or delete their own discussion posts""" if allow_student_discussion_editing is not None: data["allow_student_discussion_editing"] = allow_student_discussion_editing # OPTIONAL - allow_student_organized_groups """Let students organize their own groups""" if allow_student_organized_groups is not None: data["allow_student_organized_groups"] = allow_student_organized_groups # OPTIONAL - hide_final_grades """Hide totals in student grades summary""" if hide_final_grades is not None: data["hide_final_grades"] = hide_final_grades # OPTIONAL - hide_distribution_graphs """Hide grade distribution graphs from students""" if hide_distribution_graphs is not None: data["hide_distribution_graphs"] = hide_distribution_graphs # OPTIONAL - lock_all_announcements """Disable comments on announcements""" if lock_all_announcements is not None: data["lock_all_announcements"] = lock_all_announcements # OPTIONAL - restrict_student_past_view """Restrict students from viewing courses after end date""" if restrict_student_past_view is not None: data["restrict_student_past_view"] = restrict_student_past_view # OPTIONAL - restrict_student_future_view """Restrict students from viewing courses before start date""" if restrict_student_future_view is not None: data["restrict_student_future_view"] = restrict_student_future_view # OPTIONAL - show_announcements_on_home_page """Show the most recent announcements on the Course home page (if a Wiki, defaults to five announcements, configurable via home_page_announcement_limit)""" if show_announcements_on_home_page is not None: data["show_announcements_on_home_page"] = show_announcements_on_home_page # OPTIONAL - home_page_announcement_limit """Limit the number of announcements on the home page if enabled via show_announcements_on_home_page""" if home_page_announcement_limit is not None: data["home_page_announcement_limit"] = home_page_announcement_limit self.logger.debug("PUT /api/v1/courses/{course_id}/settings with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/courses/{course_id}/settings".format(**path), data=data, params=params, no_data=True)
['def', 'update_course_settings', '(', 'self', ',', 'course_id', ',', 'allow_student_discussion_editing', '=', 'None', ',', 'allow_student_discussion_topics', '=', 'None', ',', 'allow_student_forum_attachments', '=', 'None', ',', 'allow_student_organized_groups', '=', 'None', ',', 'hide_distribution_graphs', '=', 'None', ',', 'hide_final_grades', '=', 'None', ',', 'home_page_announcement_limit', '=', 'None', ',', 'lock_all_announcements', '=', 'None', ',', 'restrict_student_future_view', '=', 'None', ',', 'restrict_student_past_view', '=', 'None', ',', 'show_announcements_on_home_page', '=', 'None', ')', ':', 'path', '=', '{', '}', 'data', '=', '{', '}', 'params', '=', '{', '}', '# REQUIRED - PATH - course_id\r', '"""ID"""', 'path', '[', '"course_id"', ']', '=', 'course_id', '# OPTIONAL - allow_student_discussion_topics\r', '"""Let students create discussion topics"""', 'if', 'allow_student_discussion_topics', 'is', 'not', 'None', ':', 'data', '[', '"allow_student_discussion_topics"', ']', '=', 'allow_student_discussion_topics', '# OPTIONAL - allow_student_forum_attachments\r', '"""Let students attach files to discussions"""', 'if', 'allow_student_forum_attachments', 'is', 'not', 'None', ':', 'data', '[', '"allow_student_forum_attachments"', ']', '=', 'allow_student_forum_attachments', '# OPTIONAL - allow_student_discussion_editing\r', '"""Let students edit or delete their own discussion posts"""', 'if', 'allow_student_discussion_editing', 'is', 'not', 'None', ':', 'data', '[', '"allow_student_discussion_editing"', ']', '=', 'allow_student_discussion_editing', '# OPTIONAL - allow_student_organized_groups\r', '"""Let students organize their own groups"""', 'if', 'allow_student_organized_groups', 'is', 'not', 'None', ':', 'data', '[', '"allow_student_organized_groups"', ']', '=', 'allow_student_organized_groups', '# OPTIONAL - hide_final_grades\r', '"""Hide totals in student grades summary"""', 'if', 'hide_final_grades', 'is', 'not', 'None', ':', 'data', '[', '"hide_final_grades"', ']', '=', 'hide_final_grades', '# OPTIONAL - hide_distribution_graphs\r', '"""Hide grade distribution graphs from students"""', 'if', 'hide_distribution_graphs', 'is', 'not', 'None', ':', 'data', '[', '"hide_distribution_graphs"', ']', '=', 'hide_distribution_graphs', '# OPTIONAL - lock_all_announcements\r', '"""Disable comments on announcements"""', 'if', 'lock_all_announcements', 'is', 'not', 'None', ':', 'data', '[', '"lock_all_announcements"', ']', '=', 'lock_all_announcements', '# OPTIONAL - restrict_student_past_view\r', '"""Restrict students from viewing courses after end date"""', 'if', 'restrict_student_past_view', 'is', 'not', 'None', ':', 'data', '[', '"restrict_student_past_view"', ']', '=', 'restrict_student_past_view', '# OPTIONAL - restrict_student_future_view\r', '"""Restrict students from viewing courses before start date"""', 'if', 'restrict_student_future_view', 'is', 'not', 'None', ':', 'data', '[', '"restrict_student_future_view"', ']', '=', 'restrict_student_future_view', '# OPTIONAL - show_announcements_on_home_page\r', '"""Show the most recent announcements on the Course home page (if a Wiki, defaults to five announcements, configurable via home_page_announcement_limit)"""', 'if', 'show_announcements_on_home_page', 'is', 'not', 'None', ':', 'data', '[', '"show_announcements_on_home_page"', ']', '=', 'show_announcements_on_home_page', '# OPTIONAL - home_page_announcement_limit\r', '"""Limit the number of announcements on the home page if enabled via show_announcements_on_home_page"""', 'if', 'home_page_announcement_limit', 'is', 'not', 'None', ':', 'data', '[', '"home_page_announcement_limit"', ']', '=', 'home_page_announcement_limit', 'self', '.', 'logger', '.', 'debug', '(', '"PUT /api/v1/courses/{course_id}/settings with query params: {params} and form data: {data}"', '.', 'format', '(', 'params', '=', 'params', ',', 'data', '=', 'data', ',', '*', '*', 'path', ')', ')', 'return', 'self', '.', 'generic_request', '(', '"PUT"', ',', '"/api/v1/courses/{course_id}/settings"', '.', 'format', '(', '*', '*', 'path', ')', ',', 'data', '=', 'data', ',', 'params', '=', 'params', ',', 'no_data', '=', 'True', ')']
Update course settings. Can update the following course settings:
['Update', 'course', 'settings', '.', 'Can', 'update', 'the', 'following', 'course', 'settings', ':']
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/courses.py#L798-L868
6,130
danijar/sets
sets/core/step.py
Step.download
def download(cls, url, filename=None): """ Download a file into the correct cache directory. """ return utility.download(url, cls.directory(), filename)
python
def download(cls, url, filename=None): """ Download a file into the correct cache directory. """ return utility.download(url, cls.directory(), filename)
['def', 'download', '(', 'cls', ',', 'url', ',', 'filename', '=', 'None', ')', ':', 'return', 'utility', '.', 'download', '(', 'url', ',', 'cls', '.', 'directory', '(', ')', ',', 'filename', ')']
Download a file into the correct cache directory.
['Download', 'a', 'file', 'into', 'the', 'correct', 'cache', 'directory', '.']
train
https://github.com/danijar/sets/blob/2542c28f43d0af18932cb5b82f54ffb6ae557d12/sets/core/step.py#L24-L28
6,131
saltstack/salt
salt/netapi/rest_cherrypy/event_processor.py
SaltInfo.process
def process(self, salt_data, token, opts): ''' Process events and publish data ''' parts = salt_data['tag'].split('/') if len(parts) < 2: return # TBD: Simplify these conditional expressions if parts[1] == 'job': if parts[3] == 'new': self.process_new_job_event(salt_data) if salt_data['data']['fun'] == 'grains.items': self.minions = {} elif parts[3] == 'ret': self.process_ret_job_event(salt_data) if salt_data['data']['fun'] == 'grains.items': self.process_minion_update(salt_data) if parts[1] == 'key': self.process_key_event(salt_data) if parts[1] == 'presence': self.process_presence_events(salt_data, token, opts)
python
def process(self, salt_data, token, opts): ''' Process events and publish data ''' parts = salt_data['tag'].split('/') if len(parts) < 2: return # TBD: Simplify these conditional expressions if parts[1] == 'job': if parts[3] == 'new': self.process_new_job_event(salt_data) if salt_data['data']['fun'] == 'grains.items': self.minions = {} elif parts[3] == 'ret': self.process_ret_job_event(salt_data) if salt_data['data']['fun'] == 'grains.items': self.process_minion_update(salt_data) if parts[1] == 'key': self.process_key_event(salt_data) if parts[1] == 'presence': self.process_presence_events(salt_data, token, opts)
['def', 'process', '(', 'self', ',', 'salt_data', ',', 'token', ',', 'opts', ')', ':', 'parts', '=', 'salt_data', '[', "'tag'", ']', '.', 'split', '(', "'/'", ')', 'if', 'len', '(', 'parts', ')', '<', '2', ':', 'return', '# TBD: Simplify these conditional expressions', 'if', 'parts', '[', '1', ']', '==', "'job'", ':', 'if', 'parts', '[', '3', ']', '==', "'new'", ':', 'self', '.', 'process_new_job_event', '(', 'salt_data', ')', 'if', 'salt_data', '[', "'data'", ']', '[', "'fun'", ']', '==', "'grains.items'", ':', 'self', '.', 'minions', '=', '{', '}', 'elif', 'parts', '[', '3', ']', '==', "'ret'", ':', 'self', '.', 'process_ret_job_event', '(', 'salt_data', ')', 'if', 'salt_data', '[', "'data'", ']', '[', "'fun'", ']', '==', "'grains.items'", ':', 'self', '.', 'process_minion_update', '(', 'salt_data', ')', 'if', 'parts', '[', '1', ']', '==', "'key'", ':', 'self', '.', 'process_key_event', '(', 'salt_data', ')', 'if', 'parts', '[', '1', ']', '==', "'presence'", ':', 'self', '.', 'process_presence_events', '(', 'salt_data', ',', 'token', ',', 'opts', ')']
Process events and publish data
['Process', 'events', 'and', 'publish', 'data']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_cherrypy/event_processor.py#L190-L211
6,132
quiltdata/quilt
compiler/quilt/tools/command.py
_check_team_exists
def _check_team_exists(team): """ Check that the team registry actually exists. """ if team is None: return hostname = urlparse(get_registry_url(team)).hostname try: socket.gethostbyname(hostname) except IOError: try: # Do we have internet? socket.gethostbyname('quiltdata.com') except IOError: message = "Can't find quiltdata.com. Check your internet connection." else: message = "Unable to connect to registry. Is the team name %r correct?" % team raise CommandException(message)
python
def _check_team_exists(team): """ Check that the team registry actually exists. """ if team is None: return hostname = urlparse(get_registry_url(team)).hostname try: socket.gethostbyname(hostname) except IOError: try: # Do we have internet? socket.gethostbyname('quiltdata.com') except IOError: message = "Can't find quiltdata.com. Check your internet connection." else: message = "Unable to connect to registry. Is the team name %r correct?" % team raise CommandException(message)
['def', '_check_team_exists', '(', 'team', ')', ':', 'if', 'team', 'is', 'None', ':', 'return', 'hostname', '=', 'urlparse', '(', 'get_registry_url', '(', 'team', ')', ')', '.', 'hostname', 'try', ':', 'socket', '.', 'gethostbyname', '(', 'hostname', ')', 'except', 'IOError', ':', 'try', ':', '# Do we have internet?', 'socket', '.', 'gethostbyname', '(', "'quiltdata.com'", ')', 'except', 'IOError', ':', 'message', '=', '"Can\'t find quiltdata.com. Check your internet connection."', 'else', ':', 'message', '=', '"Unable to connect to registry. Is the team name %r correct?"', '%', 'team', 'raise', 'CommandException', '(', 'message', ')']
Check that the team registry actually exists.
['Check', 'that', 'the', 'team', 'registry', 'actually', 'exists', '.']
train
https://github.com/quiltdata/quilt/blob/651853e7e89a8af86e0ff26167e752efa5878c12/compiler/quilt/tools/command.py#L375-L393
6,133
Erotemic/utool
_broken/_grave.py
roundrobin
def roundrobin(*iterables): """roundrobin('ABC', 'D', 'EF') --> A D E B F C""" raise NotImplementedError('not sure if this implementation is correct') # http://stackoverflow.com/questions/11125212/interleaving-lists-in-python #sentinel = object() #return (x for x in chain(*zip_longest(fillvalue=sentinel, *iterables)) if x is not sentinel) pending = len(iterables) if six.PY2: nexts = cycle(iter(it).next for it in iterables) else: nexts = cycle(iter(it).__next__ for it in iterables) while pending: try: for next in nexts: yield next() except StopIteration: pending -= 1 nexts = cycle(islice(nexts, pending))
python
def roundrobin(*iterables): """roundrobin('ABC', 'D', 'EF') --> A D E B F C""" raise NotImplementedError('not sure if this implementation is correct') # http://stackoverflow.com/questions/11125212/interleaving-lists-in-python #sentinel = object() #return (x for x in chain(*zip_longest(fillvalue=sentinel, *iterables)) if x is not sentinel) pending = len(iterables) if six.PY2: nexts = cycle(iter(it).next for it in iterables) else: nexts = cycle(iter(it).__next__ for it in iterables) while pending: try: for next in nexts: yield next() except StopIteration: pending -= 1 nexts = cycle(islice(nexts, pending))
['def', 'roundrobin', '(', '*', 'iterables', ')', ':', 'raise', 'NotImplementedError', '(', "'not sure if this implementation is correct'", ')', '# http://stackoverflow.com/questions/11125212/interleaving-lists-in-python', '#sentinel = object()', '#return (x for x in chain(*zip_longest(fillvalue=sentinel, *iterables)) if x is not sentinel)', 'pending', '=', 'len', '(', 'iterables', ')', 'if', 'six', '.', 'PY2', ':', 'nexts', '=', 'cycle', '(', 'iter', '(', 'it', ')', '.', 'next', 'for', 'it', 'in', 'iterables', ')', 'else', ':', 'nexts', '=', 'cycle', '(', 'iter', '(', 'it', ')', '.', '__next__', 'for', 'it', 'in', 'iterables', ')', 'while', 'pending', ':', 'try', ':', 'for', 'next', 'in', 'nexts', ':', 'yield', 'next', '(', ')', 'except', 'StopIteration', ':', 'pending', '-=', '1', 'nexts', '=', 'cycle', '(', 'islice', '(', 'nexts', ',', 'pending', ')', ')']
roundrobin('ABC', 'D', 'EF') --> A D E B F C
['roundrobin', '(', 'ABC', 'D', 'EF', ')', '--', '>', 'A', 'D', 'E', 'B', 'F', 'C']
train
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/_broken/_grave.py#L47-L64
6,134
ga4gh/ga4gh-server
ga4gh/server/sqlite_backend.py
limitsSql
def limitsSql(startIndex=0, maxResults=0): """ Construct a SQL LIMIT clause """ if startIndex and maxResults: return " LIMIT {}, {}".format(startIndex, maxResults) elif startIndex: raise Exception("startIndex was provided, but maxResults was not") elif maxResults: return " LIMIT {}".format(maxResults) else: return ""
python
def limitsSql(startIndex=0, maxResults=0): """ Construct a SQL LIMIT clause """ if startIndex and maxResults: return " LIMIT {}, {}".format(startIndex, maxResults) elif startIndex: raise Exception("startIndex was provided, but maxResults was not") elif maxResults: return " LIMIT {}".format(maxResults) else: return ""
['def', 'limitsSql', '(', 'startIndex', '=', '0', ',', 'maxResults', '=', '0', ')', ':', 'if', 'startIndex', 'and', 'maxResults', ':', 'return', '" LIMIT {}, {}"', '.', 'format', '(', 'startIndex', ',', 'maxResults', ')', 'elif', 'startIndex', ':', 'raise', 'Exception', '(', '"startIndex was provided, but maxResults was not"', ')', 'elif', 'maxResults', ':', 'return', '" LIMIT {}"', '.', 'format', '(', 'maxResults', ')', 'else', ':', 'return', '""']
Construct a SQL LIMIT clause
['Construct', 'a', 'SQL', 'LIMIT', 'clause']
train
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/sqlite_backend.py#L35-L46
6,135
maceoutliner/django-fiction-outlines
fiction_outlines/models.py
StoryElementNode.all_characters
def all_characters(self): ''' Returns a queryset of all characters associated with this node and its descendants, excluding any duplicates. ''' qs = self.assoc_characters.all() for node in self.get_descendants(): qs2 = node.assoc_characters.all() qs = qs.union(qs2).distinct('pk') return qs
python
def all_characters(self): ''' Returns a queryset of all characters associated with this node and its descendants, excluding any duplicates. ''' qs = self.assoc_characters.all() for node in self.get_descendants(): qs2 = node.assoc_characters.all() qs = qs.union(qs2).distinct('pk') return qs
['def', 'all_characters', '(', 'self', ')', ':', 'qs', '=', 'self', '.', 'assoc_characters', '.', 'all', '(', ')', 'for', 'node', 'in', 'self', '.', 'get_descendants', '(', ')', ':', 'qs2', '=', 'node', '.', 'assoc_characters', '.', 'all', '(', ')', 'qs', '=', 'qs', '.', 'union', '(', 'qs2', ')', '.', 'distinct', '(', "'pk'", ')', 'return', 'qs']
Returns a queryset of all characters associated with this node and its descendants, excluding any duplicates.
['Returns', 'a', 'queryset', 'of', 'all', 'characters', 'associated', 'with', 'this', 'node', 'and', 'its', 'descendants', 'excluding', 'any', 'duplicates', '.']
train
https://github.com/maceoutliner/django-fiction-outlines/blob/6c58e356af3fbe7b23557643ba27e46eaef9d4e3/fiction_outlines/models.py#L817-L826
6,136
mbedmicro/pyOCD
pyocd/target/pack/cmsis_pack.py
CmsisPackDevice._build_memory_regions
def _build_memory_regions(self): """! @brief Creates memory region instances for the device. For each `<memory>` element in the device info, a memory region object is created and added to the `_regions` attribute. IROM or non-writable memories are created as RomRegions by this method. They will be converted to FlashRegions by _build_flash_regions(). """ for elem in self._info.memories: try: # Get the region name, type, and access permissions. if 'name' in elem.attrib: name = elem.attrib['name'] access = elem.attrib['access'] if ('p' in access): type = MemoryType.DEVICE elif ('w' in access): type = MemoryType.RAM else: type = MemoryType.ROM elif 'id' in elem.attrib: name = elem.attrib['id'] if 'RAM' in name: access = 'rwx' type = MemoryType.RAM else: access = 'rx' type = MemoryType.ROM else: continue # Both start and size are required attributes. start = int(elem.attrib['start'], base=0) size = int(elem.attrib['size'], base=0) isDefault = _get_bool_attribute(elem, 'default') isStartup = _get_bool_attribute(elem, 'startup') if isStartup: self._saw_startup = True attrs = { 'name': name, 'start': start, 'length': size, 'access': access, 'is_default': isDefault, 'is_boot_memory': isStartup, 'is_testable': isDefault, 'alias': elem.attrib.get('alias', None), } # Create the memory region and add to map. region = MEMORY_TYPE_CLASS_MAP[type](**attrs) self._regions.append(region) # Record the first default ram for use in flash algos. if self._default_ram is None and type == MemoryType.RAM and isDefault: self._default_ram = region except (KeyError, ValueError) as err: # Ignore errors. LOG.debug("ignoring error parsing memories for CMSIS-Pack devices %s: %s", self.part_number, str(err))
python
def _build_memory_regions(self): """! @brief Creates memory region instances for the device. For each `<memory>` element in the device info, a memory region object is created and added to the `_regions` attribute. IROM or non-writable memories are created as RomRegions by this method. They will be converted to FlashRegions by _build_flash_regions(). """ for elem in self._info.memories: try: # Get the region name, type, and access permissions. if 'name' in elem.attrib: name = elem.attrib['name'] access = elem.attrib['access'] if ('p' in access): type = MemoryType.DEVICE elif ('w' in access): type = MemoryType.RAM else: type = MemoryType.ROM elif 'id' in elem.attrib: name = elem.attrib['id'] if 'RAM' in name: access = 'rwx' type = MemoryType.RAM else: access = 'rx' type = MemoryType.ROM else: continue # Both start and size are required attributes. start = int(elem.attrib['start'], base=0) size = int(elem.attrib['size'], base=0) isDefault = _get_bool_attribute(elem, 'default') isStartup = _get_bool_attribute(elem, 'startup') if isStartup: self._saw_startup = True attrs = { 'name': name, 'start': start, 'length': size, 'access': access, 'is_default': isDefault, 'is_boot_memory': isStartup, 'is_testable': isDefault, 'alias': elem.attrib.get('alias', None), } # Create the memory region and add to map. region = MEMORY_TYPE_CLASS_MAP[type](**attrs) self._regions.append(region) # Record the first default ram for use in flash algos. if self._default_ram is None and type == MemoryType.RAM and isDefault: self._default_ram = region except (KeyError, ValueError) as err: # Ignore errors. LOG.debug("ignoring error parsing memories for CMSIS-Pack devices %s: %s", self.part_number, str(err))
['def', '_build_memory_regions', '(', 'self', ')', ':', 'for', 'elem', 'in', 'self', '.', '_info', '.', 'memories', ':', 'try', ':', '# Get the region name, type, and access permissions.', 'if', "'name'", 'in', 'elem', '.', 'attrib', ':', 'name', '=', 'elem', '.', 'attrib', '[', "'name'", ']', 'access', '=', 'elem', '.', 'attrib', '[', "'access'", ']', 'if', '(', "'p'", 'in', 'access', ')', ':', 'type', '=', 'MemoryType', '.', 'DEVICE', 'elif', '(', "'w'", 'in', 'access', ')', ':', 'type', '=', 'MemoryType', '.', 'RAM', 'else', ':', 'type', '=', 'MemoryType', '.', 'ROM', 'elif', "'id'", 'in', 'elem', '.', 'attrib', ':', 'name', '=', 'elem', '.', 'attrib', '[', "'id'", ']', 'if', "'RAM'", 'in', 'name', ':', 'access', '=', "'rwx'", 'type', '=', 'MemoryType', '.', 'RAM', 'else', ':', 'access', '=', "'rx'", 'type', '=', 'MemoryType', '.', 'ROM', 'else', ':', 'continue', '# Both start and size are required attributes.', 'start', '=', 'int', '(', 'elem', '.', 'attrib', '[', "'start'", ']', ',', 'base', '=', '0', ')', 'size', '=', 'int', '(', 'elem', '.', 'attrib', '[', "'size'", ']', ',', 'base', '=', '0', ')', 'isDefault', '=', '_get_bool_attribute', '(', 'elem', ',', "'default'", ')', 'isStartup', '=', '_get_bool_attribute', '(', 'elem', ',', "'startup'", ')', 'if', 'isStartup', ':', 'self', '.', '_saw_startup', '=', 'True', 'attrs', '=', '{', "'name'", ':', 'name', ',', "'start'", ':', 'start', ',', "'length'", ':', 'size', ',', "'access'", ':', 'access', ',', "'is_default'", ':', 'isDefault', ',', "'is_boot_memory'", ':', 'isStartup', ',', "'is_testable'", ':', 'isDefault', ',', "'alias'", ':', 'elem', '.', 'attrib', '.', 'get', '(', "'alias'", ',', 'None', ')', ',', '}', '# Create the memory region and add to map.', 'region', '=', 'MEMORY_TYPE_CLASS_MAP', '[', 'type', ']', '(', '*', '*', 'attrs', ')', 'self', '.', '_regions', '.', 'append', '(', 'region', ')', '# Record the first default ram for use in flash algos.', 'if', 'self', '.', '_default_ram', 'is', 'None', 'and', 'type', '==', 'MemoryType', '.', 'RAM', 'and', 'isDefault', ':', 'self', '.', '_default_ram', '=', 'region', 'except', '(', 'KeyError', ',', 'ValueError', ')', 'as', 'err', ':', '# Ignore errors.', 'LOG', '.', 'debug', '(', '"ignoring error parsing memories for CMSIS-Pack devices %s: %s"', ',', 'self', '.', 'part_number', ',', 'str', '(', 'err', ')', ')']
! @brief Creates memory region instances for the device. For each `<memory>` element in the device info, a memory region object is created and added to the `_regions` attribute. IROM or non-writable memories are created as RomRegions by this method. They will be converted to FlashRegions by _build_flash_regions().
['!']
train
https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/target/pack/cmsis_pack.py#L279-L341
6,137
hotzenklotz/pybeerxml
pybeerxml/parser.py
Parser.parse
def parse(self, xml_file): "Get a list of parsed recipes from BeerXML input" recipes = [] with open(xml_file, "rt") as f: tree = ElementTree.parse(f) for recipeNode in tree.iter(): if self.to_lower(recipeNode.tag) != "recipe": continue recipe = Recipe() recipes.append(recipe) for recipeProperty in list(recipeNode): tag_name = self.to_lower(recipeProperty.tag) if tag_name == "fermentables": for fermentable_node in list(recipeProperty): fermentable = Fermentable() self.nodes_to_object(fermentable_node, fermentable) recipe.fermentables.append(fermentable) elif tag_name == "yeasts": for yeast_node in list(recipeProperty): yeast = Yeast() self.nodes_to_object(yeast_node, yeast) recipe.yeasts.append(yeast) elif tag_name == "hops": for hop_node in list(recipeProperty): hop = Hop() self.nodes_to_object(hop_node, hop) recipe.hops.append(hop) elif tag_name == "miscs": for misc_node in list(recipeProperty): misc = Misc() self.nodes_to_object(misc_node, misc) recipe.miscs.append(misc) elif tag_name == "style": style = Style() recipe.style = style self.nodes_to_object(recipeProperty, style) elif tag_name == "mash": for mash_node in list(recipeProperty): mash = Mash() recipe.mash = mash if self.to_lower(mash_node.tag) == "mash_steps": for mash_step_node in list(mash_node): mash_step = MashStep() self.nodes_to_object(mash_step_node, mash_step) mash.steps.append(mash_step) else: self.nodes_to_object(mash_node, mash) else: self.node_to_object(recipeProperty, recipe) return recipes
python
def parse(self, xml_file): "Get a list of parsed recipes from BeerXML input" recipes = [] with open(xml_file, "rt") as f: tree = ElementTree.parse(f) for recipeNode in tree.iter(): if self.to_lower(recipeNode.tag) != "recipe": continue recipe = Recipe() recipes.append(recipe) for recipeProperty in list(recipeNode): tag_name = self.to_lower(recipeProperty.tag) if tag_name == "fermentables": for fermentable_node in list(recipeProperty): fermentable = Fermentable() self.nodes_to_object(fermentable_node, fermentable) recipe.fermentables.append(fermentable) elif tag_name == "yeasts": for yeast_node in list(recipeProperty): yeast = Yeast() self.nodes_to_object(yeast_node, yeast) recipe.yeasts.append(yeast) elif tag_name == "hops": for hop_node in list(recipeProperty): hop = Hop() self.nodes_to_object(hop_node, hop) recipe.hops.append(hop) elif tag_name == "miscs": for misc_node in list(recipeProperty): misc = Misc() self.nodes_to_object(misc_node, misc) recipe.miscs.append(misc) elif tag_name == "style": style = Style() recipe.style = style self.nodes_to_object(recipeProperty, style) elif tag_name == "mash": for mash_node in list(recipeProperty): mash = Mash() recipe.mash = mash if self.to_lower(mash_node.tag) == "mash_steps": for mash_step_node in list(mash_node): mash_step = MashStep() self.nodes_to_object(mash_step_node, mash_step) mash.steps.append(mash_step) else: self.nodes_to_object(mash_node, mash) else: self.node_to_object(recipeProperty, recipe) return recipes
['def', 'parse', '(', 'self', ',', 'xml_file', ')', ':', 'recipes', '=', '[', ']', 'with', 'open', '(', 'xml_file', ',', '"rt"', ')', 'as', 'f', ':', 'tree', '=', 'ElementTree', '.', 'parse', '(', 'f', ')', 'for', 'recipeNode', 'in', 'tree', '.', 'iter', '(', ')', ':', 'if', 'self', '.', 'to_lower', '(', 'recipeNode', '.', 'tag', ')', '!=', '"recipe"', ':', 'continue', 'recipe', '=', 'Recipe', '(', ')', 'recipes', '.', 'append', '(', 'recipe', ')', 'for', 'recipeProperty', 'in', 'list', '(', 'recipeNode', ')', ':', 'tag_name', '=', 'self', '.', 'to_lower', '(', 'recipeProperty', '.', 'tag', ')', 'if', 'tag_name', '==', '"fermentables"', ':', 'for', 'fermentable_node', 'in', 'list', '(', 'recipeProperty', ')', ':', 'fermentable', '=', 'Fermentable', '(', ')', 'self', '.', 'nodes_to_object', '(', 'fermentable_node', ',', 'fermentable', ')', 'recipe', '.', 'fermentables', '.', 'append', '(', 'fermentable', ')', 'elif', 'tag_name', '==', '"yeasts"', ':', 'for', 'yeast_node', 'in', 'list', '(', 'recipeProperty', ')', ':', 'yeast', '=', 'Yeast', '(', ')', 'self', '.', 'nodes_to_object', '(', 'yeast_node', ',', 'yeast', ')', 'recipe', '.', 'yeasts', '.', 'append', '(', 'yeast', ')', 'elif', 'tag_name', '==', '"hops"', ':', 'for', 'hop_node', 'in', 'list', '(', 'recipeProperty', ')', ':', 'hop', '=', 'Hop', '(', ')', 'self', '.', 'nodes_to_object', '(', 'hop_node', ',', 'hop', ')', 'recipe', '.', 'hops', '.', 'append', '(', 'hop', ')', 'elif', 'tag_name', '==', '"miscs"', ':', 'for', 'misc_node', 'in', 'list', '(', 'recipeProperty', ')', ':', 'misc', '=', 'Misc', '(', ')', 'self', '.', 'nodes_to_object', '(', 'misc_node', ',', 'misc', ')', 'recipe', '.', 'miscs', '.', 'append', '(', 'misc', ')', 'elif', 'tag_name', '==', '"style"', ':', 'style', '=', 'Style', '(', ')', 'recipe', '.', 'style', '=', 'style', 'self', '.', 'nodes_to_object', '(', 'recipeProperty', ',', 'style', ')', 'elif', 'tag_name', '==', '"mash"', ':', 'for', 'mash_node', 'in', 'list', '(', 'recipeProperty', ')', ':', 'mash', '=', 'Mash', '(', ')', 'recipe', '.', 'mash', '=', 'mash', 'if', 'self', '.', 'to_lower', '(', 'mash_node', '.', 'tag', ')', '==', '"mash_steps"', ':', 'for', 'mash_step_node', 'in', 'list', '(', 'mash_node', ')', ':', 'mash_step', '=', 'MashStep', '(', ')', 'self', '.', 'nodes_to_object', '(', 'mash_step_node', ',', 'mash_step', ')', 'mash', '.', 'steps', '.', 'append', '(', 'mash_step', ')', 'else', ':', 'self', '.', 'nodes_to_object', '(', 'mash_node', ',', 'mash', ')', 'else', ':', 'self', '.', 'node_to_object', '(', 'recipeProperty', ',', 'recipe', ')', 'return', 'recipes']
Get a list of parsed recipes from BeerXML input
['Get', 'a', 'list', 'of', 'parsed', 'recipes', 'from', 'BeerXML', 'input']
train
https://github.com/hotzenklotz/pybeerxml/blob/e9cf8d6090b1e01e5bbb101e255792b134affbe0/pybeerxml/parser.py#L40-L104
6,138
jslang/responsys
responsys/client.py
InteractClient.connect
def connect(self): """ Connects to the Responsys soap service Uses the credentials passed to the client init to login and setup the session id returned. Returns True on successful connection, otherwise False. """ if self.session and self.session.is_expired: # Close the session to avoid max concurrent session errors self.disconnect(abandon_session=True) if not self.session: try: login_result = self.login(self.username, self.password) except AccountFault: log.error('Login failed, invalid username or password') raise else: self.session = login_result.session_id self.connected = time() return self.connected
python
def connect(self): """ Connects to the Responsys soap service Uses the credentials passed to the client init to login and setup the session id returned. Returns True on successful connection, otherwise False. """ if self.session and self.session.is_expired: # Close the session to avoid max concurrent session errors self.disconnect(abandon_session=True) if not self.session: try: login_result = self.login(self.username, self.password) except AccountFault: log.error('Login failed, invalid username or password') raise else: self.session = login_result.session_id self.connected = time() return self.connected
['def', 'connect', '(', 'self', ')', ':', 'if', 'self', '.', 'session', 'and', 'self', '.', 'session', '.', 'is_expired', ':', '# Close the session to avoid max concurrent session errors', 'self', '.', 'disconnect', '(', 'abandon_session', '=', 'True', ')', 'if', 'not', 'self', '.', 'session', ':', 'try', ':', 'login_result', '=', 'self', '.', 'login', '(', 'self', '.', 'username', ',', 'self', '.', 'password', ')', 'except', 'AccountFault', ':', 'log', '.', 'error', '(', "'Login failed, invalid username or password'", ')', 'raise', 'else', ':', 'self', '.', 'session', '=', 'login_result', '.', 'session_id', 'self', '.', 'connected', '=', 'time', '(', ')', 'return', 'self', '.', 'connected']
Connects to the Responsys soap service Uses the credentials passed to the client init to login and setup the session id returned. Returns True on successful connection, otherwise False.
['Connects', 'to', 'the', 'Responsys', 'soap', 'service']
train
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L133-L155
6,139
QUANTAXIS/QUANTAXIS
QUANTAXIS/QASU/save_tdx_file.py
QA_save_tdx_to_mongo
def QA_save_tdx_to_mongo(file_dir, client=DATABASE): """save file Arguments: file_dir {str:direction} -- 文件的地址 Keyword Arguments: client {Mongodb:Connection} -- Mongo Connection (default: {DATABASE}) """ reader = TdxMinBarReader() __coll = client.stock_min_five for a, v, files in os.walk(file_dir): for file in files: if (str(file)[0:2] == 'sh' and int(str(file)[2]) == 6) or \ (str(file)[0:2] == 'sz' and int(str(file)[2]) == 0) or \ (str(file)[0:2] == 'sz' and int(str(file)[2]) == 3): QA_util_log_info('Now_saving ' + str(file) [2:8] + '\'s 5 min tick') fname = file_dir + os.sep + file df = reader.get_df(fname) df['code'] = str(file)[2:8] df['market'] = str(file)[0:2] df['datetime'] = [str(x) for x in list(df.index)] df['date'] = [str(x)[0:10] for x in list(df.index)] df['time_stamp'] = df['datetime'].apply( lambda x: QA_util_time_stamp(x)) df['date_stamp'] = df['date'].apply( lambda x: QA_util_date_stamp(x)) data_json = json.loads(df.to_json(orient='records')) __coll.insert_many(data_json)
python
def QA_save_tdx_to_mongo(file_dir, client=DATABASE): """save file Arguments: file_dir {str:direction} -- 文件的地址 Keyword Arguments: client {Mongodb:Connection} -- Mongo Connection (default: {DATABASE}) """ reader = TdxMinBarReader() __coll = client.stock_min_five for a, v, files in os.walk(file_dir): for file in files: if (str(file)[0:2] == 'sh' and int(str(file)[2]) == 6) or \ (str(file)[0:2] == 'sz' and int(str(file)[2]) == 0) or \ (str(file)[0:2] == 'sz' and int(str(file)[2]) == 3): QA_util_log_info('Now_saving ' + str(file) [2:8] + '\'s 5 min tick') fname = file_dir + os.sep + file df = reader.get_df(fname) df['code'] = str(file)[2:8] df['market'] = str(file)[0:2] df['datetime'] = [str(x) for x in list(df.index)] df['date'] = [str(x)[0:10] for x in list(df.index)] df['time_stamp'] = df['datetime'].apply( lambda x: QA_util_time_stamp(x)) df['date_stamp'] = df['date'].apply( lambda x: QA_util_date_stamp(x)) data_json = json.loads(df.to_json(orient='records')) __coll.insert_many(data_json)
['def', 'QA_save_tdx_to_mongo', '(', 'file_dir', ',', 'client', '=', 'DATABASE', ')', ':', 'reader', '=', 'TdxMinBarReader', '(', ')', '__coll', '=', 'client', '.', 'stock_min_five', 'for', 'a', ',', 'v', ',', 'files', 'in', 'os', '.', 'walk', '(', 'file_dir', ')', ':', 'for', 'file', 'in', 'files', ':', 'if', '(', 'str', '(', 'file', ')', '[', '0', ':', '2', ']', '==', "'sh'", 'and', 'int', '(', 'str', '(', 'file', ')', '[', '2', ']', ')', '==', '6', ')', 'or', '(', 'str', '(', 'file', ')', '[', '0', ':', '2', ']', '==', "'sz'", 'and', 'int', '(', 'str', '(', 'file', ')', '[', '2', ']', ')', '==', '0', ')', 'or', '(', 'str', '(', 'file', ')', '[', '0', ':', '2', ']', '==', "'sz'", 'and', 'int', '(', 'str', '(', 'file', ')', '[', '2', ']', ')', '==', '3', ')', ':', 'QA_util_log_info', '(', "'Now_saving '", '+', 'str', '(', 'file', ')', '[', '2', ':', '8', ']', '+', "'\\'s 5 min tick'", ')', 'fname', '=', 'file_dir', '+', 'os', '.', 'sep', '+', 'file', 'df', '=', 'reader', '.', 'get_df', '(', 'fname', ')', 'df', '[', "'code'", ']', '=', 'str', '(', 'file', ')', '[', '2', ':', '8', ']', 'df', '[', "'market'", ']', '=', 'str', '(', 'file', ')', '[', '0', ':', '2', ']', 'df', '[', "'datetime'", ']', '=', '[', 'str', '(', 'x', ')', 'for', 'x', 'in', 'list', '(', 'df', '.', 'index', ')', ']', 'df', '[', "'date'", ']', '=', '[', 'str', '(', 'x', ')', '[', '0', ':', '10', ']', 'for', 'x', 'in', 'list', '(', 'df', '.', 'index', ')', ']', 'df', '[', "'time_stamp'", ']', '=', 'df', '[', "'datetime'", ']', '.', 'apply', '(', 'lambda', 'x', ':', 'QA_util_time_stamp', '(', 'x', ')', ')', 'df', '[', "'date_stamp'", ']', '=', 'df', '[', "'date'", ']', '.', 'apply', '(', 'lambda', 'x', ':', 'QA_util_date_stamp', '(', 'x', ')', ')', 'data_json', '=', 'json', '.', 'loads', '(', 'df', '.', 'to_json', '(', 'orient', '=', "'records'", ')', ')', '__coll', '.', 'insert_many', '(', 'data_json', ')']
save file Arguments: file_dir {str:direction} -- 文件的地址 Keyword Arguments: client {Mongodb:Connection} -- Mongo Connection (default: {DATABASE})
['save', 'file', 'Arguments', ':', 'file_dir', '{', 'str', ':', 'direction', '}', '--', '文件的地址', 'Keyword', 'Arguments', ':', 'client', '{', 'Mongodb', ':', 'Connection', '}', '--', 'Mongo', 'Connection', '(', 'default', ':', '{', 'DATABASE', '}', ')']
train
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QASU/save_tdx_file.py#L35-L68
6,140
crossbario/txaio
txaio/tx.py
_TxApi.make_batched_timer
def make_batched_timer(self, bucket_seconds, chunk_size=100): """ Creates and returns an object implementing :class:`txaio.IBatchedTimer`. :param bucket_seconds: the number of seconds in each bucket. That is, a value of 5 means that any timeout within a 5 second window will be in the same bucket, and get notified at the same time. This is only accurate to "milliseconds". :param chunk_size: when "doing" the callbacks in a particular bucket, this controls how many we do at once before yielding to the reactor. """ def get_seconds(): return self._get_loop().seconds() def create_delayed_call(delay, fun, *args, **kwargs): return self._get_loop().callLater(delay, fun, *args, **kwargs) return _BatchedTimer( bucket_seconds * 1000.0, chunk_size, seconds_provider=get_seconds, delayed_call_creator=create_delayed_call, )
python
def make_batched_timer(self, bucket_seconds, chunk_size=100): """ Creates and returns an object implementing :class:`txaio.IBatchedTimer`. :param bucket_seconds: the number of seconds in each bucket. That is, a value of 5 means that any timeout within a 5 second window will be in the same bucket, and get notified at the same time. This is only accurate to "milliseconds". :param chunk_size: when "doing" the callbacks in a particular bucket, this controls how many we do at once before yielding to the reactor. """ def get_seconds(): return self._get_loop().seconds() def create_delayed_call(delay, fun, *args, **kwargs): return self._get_loop().callLater(delay, fun, *args, **kwargs) return _BatchedTimer( bucket_seconds * 1000.0, chunk_size, seconds_provider=get_seconds, delayed_call_creator=create_delayed_call, )
['def', 'make_batched_timer', '(', 'self', ',', 'bucket_seconds', ',', 'chunk_size', '=', '100', ')', ':', 'def', 'get_seconds', '(', ')', ':', 'return', 'self', '.', '_get_loop', '(', ')', '.', 'seconds', '(', ')', 'def', 'create_delayed_call', '(', 'delay', ',', 'fun', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'return', 'self', '.', '_get_loop', '(', ')', '.', 'callLater', '(', 'delay', ',', 'fun', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', '_BatchedTimer', '(', 'bucket_seconds', '*', '1000.0', ',', 'chunk_size', ',', 'seconds_provider', '=', 'get_seconds', ',', 'delayed_call_creator', '=', 'create_delayed_call', ',', ')']
Creates and returns an object implementing :class:`txaio.IBatchedTimer`. :param bucket_seconds: the number of seconds in each bucket. That is, a value of 5 means that any timeout within a 5 second window will be in the same bucket, and get notified at the same time. This is only accurate to "milliseconds". :param chunk_size: when "doing" the callbacks in a particular bucket, this controls how many we do at once before yielding to the reactor.
['Creates', 'and', 'returns', 'an', 'object', 'implementing', ':', 'class', ':', 'txaio', '.', 'IBatchedTimer', '.']
train
https://github.com/crossbario/txaio/blob/29c77ff1210cabd4cc03f16f34672612e7eef704/txaio/tx.py#L437-L462
6,141
TrafficSenseMSD/SumoTools
traci/_gui.py
GuiDomain.screenshot
def screenshot(self, viewID, filename): """screenshot(string, string) -> None Save a screenshot for the given view to the given filename. The fileformat is guessed from the extension, the available formats differ from platform to platform but should at least include ps, svg and pdf, on linux probably gif, png and jpg as well. """ self._connection._sendStringCmd( tc.CMD_SET_GUI_VARIABLE, tc.VAR_SCREENSHOT, viewID, filename)
python
def screenshot(self, viewID, filename): """screenshot(string, string) -> None Save a screenshot for the given view to the given filename. The fileformat is guessed from the extension, the available formats differ from platform to platform but should at least include ps, svg and pdf, on linux probably gif, png and jpg as well. """ self._connection._sendStringCmd( tc.CMD_SET_GUI_VARIABLE, tc.VAR_SCREENSHOT, viewID, filename)
['def', 'screenshot', '(', 'self', ',', 'viewID', ',', 'filename', ')', ':', 'self', '.', '_connection', '.', '_sendStringCmd', '(', 'tc', '.', 'CMD_SET_GUI_VARIABLE', ',', 'tc', '.', 'VAR_SCREENSHOT', ',', 'viewID', ',', 'filename', ')']
screenshot(string, string) -> None Save a screenshot for the given view to the given filename. The fileformat is guessed from the extension, the available formats differ from platform to platform but should at least include ps, svg and pdf, on linux probably gif, png and jpg as well.
['screenshot', '(', 'string', 'string', ')', '-', '>', 'None']
train
https://github.com/TrafficSenseMSD/SumoTools/blob/8607b4f885f1d1798e43240be643efe6dccccdaa/traci/_gui.py#L101-L110
6,142
tswicegood/Dolt
dolt/__init__.py
Dolt.with_headers
def with_headers(self, headers=None, **params): """ Add headers to the request. :param headers: A dict, or a list of key, value pairs :param params: A dict of key value pairs """ if isinstance(headers, (tuple, list)): headers = dict(headers) if params: if isinstance(headers, dict): headers.update(params) elif headers is None: headers = params self._headers.update(headers) return self
python
def with_headers(self, headers=None, **params): """ Add headers to the request. :param headers: A dict, or a list of key, value pairs :param params: A dict of key value pairs """ if isinstance(headers, (tuple, list)): headers = dict(headers) if params: if isinstance(headers, dict): headers.update(params) elif headers is None: headers = params self._headers.update(headers) return self
['def', 'with_headers', '(', 'self', ',', 'headers', '=', 'None', ',', '*', '*', 'params', ')', ':', 'if', 'isinstance', '(', 'headers', ',', '(', 'tuple', ',', 'list', ')', ')', ':', 'headers', '=', 'dict', '(', 'headers', ')', 'if', 'params', ':', 'if', 'isinstance', '(', 'headers', ',', 'dict', ')', ':', 'headers', '.', 'update', '(', 'params', ')', 'elif', 'headers', 'is', 'None', ':', 'headers', '=', 'params', 'self', '.', '_headers', '.', 'update', '(', 'headers', ')', 'return', 'self']
Add headers to the request. :param headers: A dict, or a list of key, value pairs :param params: A dict of key value pairs
['Add', 'headers', 'to', 'the', 'request', '.', ':', 'param', 'headers', ':', 'A', 'dict', 'or', 'a', 'list', 'of', 'key', 'value', 'pairs', ':', 'param', 'params', ':', 'A', 'dict', 'of', 'key', 'value', 'pairs']
train
https://github.com/tswicegood/Dolt/blob/e0da1918b7db18f885734a89f824b9e173cc30a5/dolt/__init__.py#L203-L220
6,143
mitsei/dlkit
dlkit/records/assessment/orthographic_visualization/orthographic_records.py
FirstAngleProjectionFormRecord._init_metadata
def _init_metadata(self): """stub""" self._first_angle_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'first_angle'), 'element_label': 'First Angle', 'instructions': 'set boolean, is this a first angle projection', 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_boolean_values': [False], 'syntax': 'BOOLEAN', }
python
def _init_metadata(self): """stub""" self._first_angle_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'first_angle'), 'element_label': 'First Angle', 'instructions': 'set boolean, is this a first angle projection', 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_boolean_values': [False], 'syntax': 'BOOLEAN', }
['def', '_init_metadata', '(', 'self', ')', ':', 'self', '.', '_first_angle_metadata', '=', '{', "'element_id'", ':', 'Id', '(', 'self', '.', 'my_osid_object_form', '.', '_authority', ',', 'self', '.', 'my_osid_object_form', '.', '_namespace', ',', "'first_angle'", ')', ',', "'element_label'", ':', "'First Angle'", ',', "'instructions'", ':', "'set boolean, is this a first angle projection'", ',', "'required'", ':', 'False', ',', "'read_only'", ':', 'False', ',', "'linked'", ':', 'False', ',', "'array'", ':', 'False', ',', "'default_boolean_values'", ':', '[', 'False', ']', ',', "'syntax'", ':', "'BOOLEAN'", ',', '}']
stub
['stub']
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/orthographic_visualization/orthographic_records.py#L60-L74
6,144
bcbio/bcbio-nextgen
bcbio/variation/genotype.py
_run_variantcall_batch_multicore
def _run_variantcall_batch_multicore(items, regions, final_file): """Run variant calling on a batch of items using multiple cores. """ batch_name = _get_batch_name(items) variantcaller = _get_batch_variantcaller(items) work_bams = [dd.get_work_bam(d) or dd.get_align_bam(d) for d in items] def split_fn(data): out = [] for region in regions: region = _region_to_coords(region) chrom, start, end = region region_str = "_".join(str(x) for x in region) out_file = os.path.join(dd.get_work_dir(items[0]), variantcaller, chrom, "%s-%s.vcf.gz" % (batch_name, region_str)) out.append((region, work_bams, out_file)) return final_file, out parallel = {"type": "local", "num_jobs": dd.get_num_cores(items[0]), "cores_per_job": 1} run_parallel = dmulti.runner(parallel, items[0]["config"]) to_run = copy.deepcopy(items[0]) to_run["sam_ref"] = dd.get_ref_file(to_run) to_run["group_orig"] = items parallel_split_combine([[to_run]], split_fn, run_parallel, "variantcall_sample", "concat_variant_files", "vrn_file", ["region", "sam_ref", "config"]) return final_file
python
def _run_variantcall_batch_multicore(items, regions, final_file): """Run variant calling on a batch of items using multiple cores. """ batch_name = _get_batch_name(items) variantcaller = _get_batch_variantcaller(items) work_bams = [dd.get_work_bam(d) or dd.get_align_bam(d) for d in items] def split_fn(data): out = [] for region in regions: region = _region_to_coords(region) chrom, start, end = region region_str = "_".join(str(x) for x in region) out_file = os.path.join(dd.get_work_dir(items[0]), variantcaller, chrom, "%s-%s.vcf.gz" % (batch_name, region_str)) out.append((region, work_bams, out_file)) return final_file, out parallel = {"type": "local", "num_jobs": dd.get_num_cores(items[0]), "cores_per_job": 1} run_parallel = dmulti.runner(parallel, items[0]["config"]) to_run = copy.deepcopy(items[0]) to_run["sam_ref"] = dd.get_ref_file(to_run) to_run["group_orig"] = items parallel_split_combine([[to_run]], split_fn, run_parallel, "variantcall_sample", "concat_variant_files", "vrn_file", ["region", "sam_ref", "config"]) return final_file
['def', '_run_variantcall_batch_multicore', '(', 'items', ',', 'regions', ',', 'final_file', ')', ':', 'batch_name', '=', '_get_batch_name', '(', 'items', ')', 'variantcaller', '=', '_get_batch_variantcaller', '(', 'items', ')', 'work_bams', '=', '[', 'dd', '.', 'get_work_bam', '(', 'd', ')', 'or', 'dd', '.', 'get_align_bam', '(', 'd', ')', 'for', 'd', 'in', 'items', ']', 'def', 'split_fn', '(', 'data', ')', ':', 'out', '=', '[', ']', 'for', 'region', 'in', 'regions', ':', 'region', '=', '_region_to_coords', '(', 'region', ')', 'chrom', ',', 'start', ',', 'end', '=', 'region', 'region_str', '=', '"_"', '.', 'join', '(', 'str', '(', 'x', ')', 'for', 'x', 'in', 'region', ')', 'out_file', '=', 'os', '.', 'path', '.', 'join', '(', 'dd', '.', 'get_work_dir', '(', 'items', '[', '0', ']', ')', ',', 'variantcaller', ',', 'chrom', ',', '"%s-%s.vcf.gz"', '%', '(', 'batch_name', ',', 'region_str', ')', ')', 'out', '.', 'append', '(', '(', 'region', ',', 'work_bams', ',', 'out_file', ')', ')', 'return', 'final_file', ',', 'out', 'parallel', '=', '{', '"type"', ':', '"local"', ',', '"num_jobs"', ':', 'dd', '.', 'get_num_cores', '(', 'items', '[', '0', ']', ')', ',', '"cores_per_job"', ':', '1', '}', 'run_parallel', '=', 'dmulti', '.', 'runner', '(', 'parallel', ',', 'items', '[', '0', ']', '[', '"config"', ']', ')', 'to_run', '=', 'copy', '.', 'deepcopy', '(', 'items', '[', '0', ']', ')', 'to_run', '[', '"sam_ref"', ']', '=', 'dd', '.', 'get_ref_file', '(', 'to_run', ')', 'to_run', '[', '"group_orig"', ']', '=', 'items', 'parallel_split_combine', '(', '[', '[', 'to_run', ']', ']', ',', 'split_fn', ',', 'run_parallel', ',', '"variantcall_sample"', ',', '"concat_variant_files"', ',', '"vrn_file"', ',', '[', '"region"', ',', '"sam_ref"', ',', '"config"', ']', ')', 'return', 'final_file']
Run variant calling on a batch of items using multiple cores.
['Run', 'variant', 'calling', 'on', 'a', 'batch', 'of', 'items', 'using', 'multiple', 'cores', '.']
train
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L462-L486
6,145
offu/WeRoBot
werobot/client.py
Client.update_custom_service_account
def update_custom_service_account(self, account, nickname, password): """ 修改客服帐号。 :param account: 客服账号的用户名 :param nickname: 客服账号的昵称 :param password: 客服账号的密码 :return: 返回的 JSON 数据包 """ return self.post( url="https://api.weixin.qq.com/customservice/kfaccount/update", data={ "kf_account": account, "nickname": nickname, "password": password } )
python
def update_custom_service_account(self, account, nickname, password): """ 修改客服帐号。 :param account: 客服账号的用户名 :param nickname: 客服账号的昵称 :param password: 客服账号的密码 :return: 返回的 JSON 数据包 """ return self.post( url="https://api.weixin.qq.com/customservice/kfaccount/update", data={ "kf_account": account, "nickname": nickname, "password": password } )
['def', 'update_custom_service_account', '(', 'self', ',', 'account', ',', 'nickname', ',', 'password', ')', ':', 'return', 'self', '.', 'post', '(', 'url', '=', '"https://api.weixin.qq.com/customservice/kfaccount/update"', ',', 'data', '=', '{', '"kf_account"', ':', 'account', ',', '"nickname"', ':', 'nickname', ',', '"password"', ':', 'password', '}', ')']
修改客服帐号。 :param account: 客服账号的用户名 :param nickname: 客服账号的昵称 :param password: 客服账号的密码 :return: 返回的 JSON 数据包
['修改客服帐号。']
train
https://github.com/offu/WeRoBot/blob/fd42109105b03f9acf45ebd9dcabb9d5cff98f3c/werobot/client.py#L292-L308
6,146
mgoral/subconvert
src/subconvert/utils/SubFile.py
File._writeFile
def _writeFile(cls, filePath, content, encoding = None): """Safe file writing. Most common mistakes are checked against and reported before write operation. After that, if anything unexpected happens, user won't be left without data or with corrupted one as this method writes to a temporary file and then simply renames it (which should be atomic operation according to POSIX but who knows how Ext4 really works. @see: http://lwn.net/Articles/322823/).""" filePath = os.path.realpath(filePath) log.debug(_("Real file path to write: %s" % filePath)) if encoding is None: encoding = File.DEFAULT_ENCODING try: encodedContent = ''.join(content).encode(encoding) except LookupError as msg: raise SubFileError(_("Unknown encoding name: '%s'.") % encoding) except UnicodeEncodeError: raise SubFileError( _("There are some characters in '%(file)s' that cannot be encoded to '%(enc)s'.") % {"file": filePath, "enc": encoding}) tmpFilePath = "%s.tmp" % filePath bakFilePath = "%s.bak" % filePath with open(tmpFilePath, 'wb') as f: f.write(encodedContent) # ensure that all data is on disk. # for performance reasons, we skip os.fsync(f.fileno()) f.flush() try: os.rename(filePath, bakFilePath) except FileNotFoundError: # there's nothing to move when filePath doesn't exist # note the Python bug: http://bugs.python.org/issue16074 pass os.rename(tmpFilePath, filePath) try: os.unlink(bakFilePath) except FileNotFoundError: pass
python
def _writeFile(cls, filePath, content, encoding = None): """Safe file writing. Most common mistakes are checked against and reported before write operation. After that, if anything unexpected happens, user won't be left without data or with corrupted one as this method writes to a temporary file and then simply renames it (which should be atomic operation according to POSIX but who knows how Ext4 really works. @see: http://lwn.net/Articles/322823/).""" filePath = os.path.realpath(filePath) log.debug(_("Real file path to write: %s" % filePath)) if encoding is None: encoding = File.DEFAULT_ENCODING try: encodedContent = ''.join(content).encode(encoding) except LookupError as msg: raise SubFileError(_("Unknown encoding name: '%s'.") % encoding) except UnicodeEncodeError: raise SubFileError( _("There are some characters in '%(file)s' that cannot be encoded to '%(enc)s'.") % {"file": filePath, "enc": encoding}) tmpFilePath = "%s.tmp" % filePath bakFilePath = "%s.bak" % filePath with open(tmpFilePath, 'wb') as f: f.write(encodedContent) # ensure that all data is on disk. # for performance reasons, we skip os.fsync(f.fileno()) f.flush() try: os.rename(filePath, bakFilePath) except FileNotFoundError: # there's nothing to move when filePath doesn't exist # note the Python bug: http://bugs.python.org/issue16074 pass os.rename(tmpFilePath, filePath) try: os.unlink(bakFilePath) except FileNotFoundError: pass
['def', '_writeFile', '(', 'cls', ',', 'filePath', ',', 'content', ',', 'encoding', '=', 'None', ')', ':', 'filePath', '=', 'os', '.', 'path', '.', 'realpath', '(', 'filePath', ')', 'log', '.', 'debug', '(', '_', '(', '"Real file path to write: %s"', '%', 'filePath', ')', ')', 'if', 'encoding', 'is', 'None', ':', 'encoding', '=', 'File', '.', 'DEFAULT_ENCODING', 'try', ':', 'encodedContent', '=', "''", '.', 'join', '(', 'content', ')', '.', 'encode', '(', 'encoding', ')', 'except', 'LookupError', 'as', 'msg', ':', 'raise', 'SubFileError', '(', '_', '(', '"Unknown encoding name: \'%s\'."', ')', '%', 'encoding', ')', 'except', 'UnicodeEncodeError', ':', 'raise', 'SubFileError', '(', '_', '(', '"There are some characters in \'%(file)s\' that cannot be encoded to \'%(enc)s\'."', ')', '%', '{', '"file"', ':', 'filePath', ',', '"enc"', ':', 'encoding', '}', ')', 'tmpFilePath', '=', '"%s.tmp"', '%', 'filePath', 'bakFilePath', '=', '"%s.bak"', '%', 'filePath', 'with', 'open', '(', 'tmpFilePath', ',', "'wb'", ')', 'as', 'f', ':', 'f', '.', 'write', '(', 'encodedContent', ')', '# ensure that all data is on disk.', '# for performance reasons, we skip os.fsync(f.fileno())', 'f', '.', 'flush', '(', ')', 'try', ':', 'os', '.', 'rename', '(', 'filePath', ',', 'bakFilePath', ')', 'except', 'FileNotFoundError', ':', "# there's nothing to move when filePath doesn't exist", '# note the Python bug: http://bugs.python.org/issue16074', 'pass', 'os', '.', 'rename', '(', 'tmpFilePath', ',', 'filePath', ')', 'try', ':', 'os', '.', 'unlink', '(', 'bakFilePath', ')', 'except', 'FileNotFoundError', ':', 'pass']
Safe file writing. Most common mistakes are checked against and reported before write operation. After that, if anything unexpected happens, user won't be left without data or with corrupted one as this method writes to a temporary file and then simply renames it (which should be atomic operation according to POSIX but who knows how Ext4 really works. @see: http://lwn.net/Articles/322823/).
['Safe', 'file', 'writing', '.', 'Most', 'common', 'mistakes', 'are', 'checked', 'against', 'and', 'reported', 'before', 'write', 'operation', '.', 'After', 'that', 'if', 'anything', 'unexpected', 'happens', 'user', 'won', 't', 'be', 'left', 'without', 'data', 'or', 'with', 'corrupted', 'one', 'as', 'this', 'method', 'writes', 'to', 'a', 'temporary', 'file', 'and', 'then', 'simply', 'renames', 'it', '(', 'which', 'should', 'be', 'atomic', 'operation', 'according', 'to', 'POSIX', 'but', 'who', 'knows', 'how', 'Ext4', 'really', 'works', '.']
train
https://github.com/mgoral/subconvert/blob/59701e5e69ef1ca26ce7d1d766c936664aa2cb32/src/subconvert/utils/SubFile.py#L117-L159
6,147
indietyp/django-automated-logging
automated_logging/signals/__init__.py
get_current_user
def get_current_user(): """Get current user object from middleware""" thread_local = AutomatedLoggingMiddleware.thread_local if hasattr(thread_local, 'current_user'): user = thread_local.current_user if isinstance(user, AnonymousUser): user = None else: user = None return user
python
def get_current_user(): """Get current user object from middleware""" thread_local = AutomatedLoggingMiddleware.thread_local if hasattr(thread_local, 'current_user'): user = thread_local.current_user if isinstance(user, AnonymousUser): user = None else: user = None return user
['def', 'get_current_user', '(', ')', ':', 'thread_local', '=', 'AutomatedLoggingMiddleware', '.', 'thread_local', 'if', 'hasattr', '(', 'thread_local', ',', "'current_user'", ')', ':', 'user', '=', 'thread_local', '.', 'current_user', 'if', 'isinstance', '(', 'user', ',', 'AnonymousUser', ')', ':', 'user', '=', 'None', 'else', ':', 'user', '=', 'None', 'return', 'user']
Get current user object from middleware
['Get', 'current', 'user', 'object', 'from', 'middleware']
train
https://github.com/indietyp/django-automated-logging/blob/095dfc6df62dca45f7db4516bc35e52085d0a01c/automated_logging/signals/__init__.py#L36-L46
6,148
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/device_directory/apis/default_api.py
DefaultApi.group_members_add
def group_members_add(self, device_group_id, body, **kwargs): # noqa: E501 """Add a device to a group # noqa: E501 Add one device to a group # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.group_members_add(device_group_id, body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str device_group_id: The ID of the group (required) :param DeviceGroupManipulation body: Body of the request (required) :return: DevicePage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.group_members_add_with_http_info(device_group_id, body, **kwargs) # noqa: E501 else: (data) = self.group_members_add_with_http_info(device_group_id, body, **kwargs) # noqa: E501 return data
python
def group_members_add(self, device_group_id, body, **kwargs): # noqa: E501 """Add a device to a group # noqa: E501 Add one device to a group # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.group_members_add(device_group_id, body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str device_group_id: The ID of the group (required) :param DeviceGroupManipulation body: Body of the request (required) :return: DevicePage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.group_members_add_with_http_info(device_group_id, body, **kwargs) # noqa: E501 else: (data) = self.group_members_add_with_http_info(device_group_id, body, **kwargs) # noqa: E501 return data
['def', 'group_members_add', '(', 'self', ',', 'device_group_id', ',', 'body', ',', '*', '*', 'kwargs', ')', ':', '# noqa: E501', 'kwargs', '[', "'_return_http_data_only'", ']', '=', 'True', 'if', 'kwargs', '.', 'get', '(', "'asynchronous'", ')', ':', 'return', 'self', '.', 'group_members_add_with_http_info', '(', 'device_group_id', ',', 'body', ',', '*', '*', 'kwargs', ')', '# noqa: E501', 'else', ':', '(', 'data', ')', '=', 'self', '.', 'group_members_add_with_http_info', '(', 'device_group_id', ',', 'body', ',', '*', '*', 'kwargs', ')', '# noqa: E501', 'return', 'data']
Add a device to a group # noqa: E501 Add one device to a group # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.group_members_add(device_group_id, body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str device_group_id: The ID of the group (required) :param DeviceGroupManipulation body: Body of the request (required) :return: DevicePage If the method is called asynchronously, returns the request thread.
['Add', 'a', 'device', 'to', 'a', 'group', '#', 'noqa', ':', 'E501']
train
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/device_directory/apis/default_api.py#L1727-L1748
6,149
pypa/pipenv
pipenv/vendor/pexpect/popen_spawn.py
PopenSpawn.sendline
def sendline(self, s=''): '''Wraps send(), sending string ``s`` to child process, with os.linesep automatically appended. Returns number of bytes written. ''' n = self.send(s) return n + self.send(self.linesep)
python
def sendline(self, s=''): '''Wraps send(), sending string ``s`` to child process, with os.linesep automatically appended. Returns number of bytes written. ''' n = self.send(s) return n + self.send(self.linesep)
['def', 'sendline', '(', 'self', ',', 's', '=', "''", ')', ':', 'n', '=', 'self', '.', 'send', '(', 's', ')', 'return', 'n', '+', 'self', '.', 'send', '(', 'self', '.', 'linesep', ')']
Wraps send(), sending string ``s`` to child process, with os.linesep automatically appended. Returns number of bytes written.
['Wraps', 'send', '()', 'sending', 'string', 's', 'to', 'child', 'process', 'with', 'os', '.', 'linesep', 'automatically', 'appended', '.', 'Returns', 'number', 'of', 'bytes', 'written', '.']
train
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pexpect/popen_spawn.py#L149-L154
6,150
dropbox/stone
stone/frontend/lexer.py
Lexer.input
def input(self, file_data, **kwargs): """ Required by ply.yacc for this to quack (duck typing) like a ply lexer. :param str file_data: Contents of the file to lex. """ self.lex = lex.lex(module=self, **kwargs) self.tokens_queue = [] self.cur_indent = 0 # Hack to avoid tokenization bugs caused by files that do not end in a # new line. self.lex.input(file_data + '\n')
python
def input(self, file_data, **kwargs): """ Required by ply.yacc for this to quack (duck typing) like a ply lexer. :param str file_data: Contents of the file to lex. """ self.lex = lex.lex(module=self, **kwargs) self.tokens_queue = [] self.cur_indent = 0 # Hack to avoid tokenization bugs caused by files that do not end in a # new line. self.lex.input(file_data + '\n')
['def', 'input', '(', 'self', ',', 'file_data', ',', '*', '*', 'kwargs', ')', ':', 'self', '.', 'lex', '=', 'lex', '.', 'lex', '(', 'module', '=', 'self', ',', '*', '*', 'kwargs', ')', 'self', '.', 'tokens_queue', '=', '[', ']', 'self', '.', 'cur_indent', '=', '0', '# Hack to avoid tokenization bugs caused by files that do not end in a', '# new line.', 'self', '.', 'lex', '.', 'input', '(', 'file_data', '+', "'\\n'", ')']
Required by ply.yacc for this to quack (duck typing) like a ply lexer. :param str file_data: Contents of the file to lex.
['Required', 'by', 'ply', '.', 'yacc', 'for', 'this', 'to', 'quack', '(', 'duck', 'typing', ')', 'like', 'a', 'ply', 'lexer', '.']
train
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/frontend/lexer.py#L44-L55
6,151
aiogram/aiogram
aiogram/bot/bot.py
Bot.get_chat
async def get_chat(self, chat_id: typing.Union[base.Integer, base.String]) -> types.Chat: """ Use this method to get up to date information about the chat (current name of the user for one-on-one conversations, current username of a user, group or channel, etc.). Source: https://core.telegram.org/bots/api#getchat :param chat_id: Unique identifier for the target chat or username of the target supergroup or channel :type chat_id: :obj:`typing.Union[base.Integer, base.String]` :return: Returns a Chat object on success :rtype: :obj:`types.Chat` """ payload = generate_payload(**locals()) result = await self.request(api.Methods.GET_CHAT, payload) return types.Chat(**result)
python
async def get_chat(self, chat_id: typing.Union[base.Integer, base.String]) -> types.Chat: """ Use this method to get up to date information about the chat (current name of the user for one-on-one conversations, current username of a user, group or channel, etc.). Source: https://core.telegram.org/bots/api#getchat :param chat_id: Unique identifier for the target chat or username of the target supergroup or channel :type chat_id: :obj:`typing.Union[base.Integer, base.String]` :return: Returns a Chat object on success :rtype: :obj:`types.Chat` """ payload = generate_payload(**locals()) result = await self.request(api.Methods.GET_CHAT, payload) return types.Chat(**result)
['async', 'def', 'get_chat', '(', 'self', ',', 'chat_id', ':', 'typing', '.', 'Union', '[', 'base', '.', 'Integer', ',', 'base', '.', 'String', ']', ')', '->', 'types', '.', 'Chat', ':', 'payload', '=', 'generate_payload', '(', '*', '*', 'locals', '(', ')', ')', 'result', '=', 'await', 'self', '.', 'request', '(', 'api', '.', 'Methods', '.', 'GET_CHAT', ',', 'payload', ')', 'return', 'types', '.', 'Chat', '(', '*', '*', 'result', ')']
Use this method to get up to date information about the chat (current name of the user for one-on-one conversations, current username of a user, group or channel, etc.). Source: https://core.telegram.org/bots/api#getchat :param chat_id: Unique identifier for the target chat or username of the target supergroup or channel :type chat_id: :obj:`typing.Union[base.Integer, base.String]` :return: Returns a Chat object on success :rtype: :obj:`types.Chat`
['Use', 'this', 'method', 'to', 'get', 'up', 'to', 'date', 'information', 'about', 'the', 'chat', '(', 'current', 'name', 'of', 'the', 'user', 'for', 'one', '-', 'on', '-', 'one', 'conversations', 'current', 'username', 'of', 'a', 'user', 'group', 'or', 'channel', 'etc', '.', ')', '.']
train
https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/bot/bot.py#L1253-L1268
6,152
apache/incubator-mxnet
python/mxnet/gluon/parameter.py
ParameterDict.save
def save(self, filename, strip_prefix=''): """Save parameters to file. Parameters ---------- filename : str Path to parameter file. strip_prefix : str, default '' Strip prefix from parameter names before saving. """ arg_dict = {} for param in self.values(): weight = param._reduce() if not param.name.startswith(strip_prefix): raise ValueError( "Prefix '%s' is to be striped before saving, but Parameter's " "name '%s' does not start with '%s'. " "this may be due to your Block shares parameters from other " "Blocks or you forgot to use 'with name_scope()' when creating " "child blocks. For more info on naming, please see " "http://mxnet.incubator.apache.org/tutorials/basic/naming.html"%( strip_prefix, param.name, strip_prefix)) arg_dict[param.name[len(strip_prefix):]] = weight ndarray.save(filename, arg_dict)
python
def save(self, filename, strip_prefix=''): """Save parameters to file. Parameters ---------- filename : str Path to parameter file. strip_prefix : str, default '' Strip prefix from parameter names before saving. """ arg_dict = {} for param in self.values(): weight = param._reduce() if not param.name.startswith(strip_prefix): raise ValueError( "Prefix '%s' is to be striped before saving, but Parameter's " "name '%s' does not start with '%s'. " "this may be due to your Block shares parameters from other " "Blocks or you forgot to use 'with name_scope()' when creating " "child blocks. For more info on naming, please see " "http://mxnet.incubator.apache.org/tutorials/basic/naming.html"%( strip_prefix, param.name, strip_prefix)) arg_dict[param.name[len(strip_prefix):]] = weight ndarray.save(filename, arg_dict)
['def', 'save', '(', 'self', ',', 'filename', ',', 'strip_prefix', '=', "''", ')', ':', 'arg_dict', '=', '{', '}', 'for', 'param', 'in', 'self', '.', 'values', '(', ')', ':', 'weight', '=', 'param', '.', '_reduce', '(', ')', 'if', 'not', 'param', '.', 'name', '.', 'startswith', '(', 'strip_prefix', ')', ':', 'raise', 'ValueError', '(', '"Prefix \'%s\' is to be striped before saving, but Parameter\'s "', '"name \'%s\' does not start with \'%s\'. "', '"this may be due to your Block shares parameters from other "', '"Blocks or you forgot to use \'with name_scope()\' when creating "', '"child blocks. For more info on naming, please see "', '"http://mxnet.incubator.apache.org/tutorials/basic/naming.html"', '%', '(', 'strip_prefix', ',', 'param', '.', 'name', ',', 'strip_prefix', ')', ')', 'arg_dict', '[', 'param', '.', 'name', '[', 'len', '(', 'strip_prefix', ')', ':', ']', ']', '=', 'weight', 'ndarray', '.', 'save', '(', 'filename', ',', 'arg_dict', ')']
Save parameters to file. Parameters ---------- filename : str Path to parameter file. strip_prefix : str, default '' Strip prefix from parameter names before saving.
['Save', 'parameters', 'to', 'file', '.']
train
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/parameter.py#L854-L877
6,153
ssalentin/plip
plip/modules/report.py
StructureReport.construct_txt_file
def construct_txt_file(self): """Construct the header of the txt file""" textlines = ['Prediction of noncovalent interactions for PDB structure %s' % self.mol.pymol_name.upper(), ] textlines.append("=" * len(textlines[0])) textlines.append('Created on %s using PLIP v%s\n' % (time.strftime("%Y/%m/%d"), __version__)) textlines.append('If you are using PLIP in your work, please cite:') textlines.append('Salentin,S. et al. PLIP: fully automated protein-ligand interaction profiler.') textlines.append('Nucl. Acids Res. (1 July 2015) 43 (W1): W443-W447. doi: 10.1093/nar/gkv315\n') if len(self.excluded) != 0: textlines.append('Excluded molecules as ligands: %s\n' % ','.join([lig for lig in self.excluded])) if config.DNARECEPTOR: textlines.append('DNA/RNA in structure was chosen as the receptor part.\n') return textlines
python
def construct_txt_file(self): """Construct the header of the txt file""" textlines = ['Prediction of noncovalent interactions for PDB structure %s' % self.mol.pymol_name.upper(), ] textlines.append("=" * len(textlines[0])) textlines.append('Created on %s using PLIP v%s\n' % (time.strftime("%Y/%m/%d"), __version__)) textlines.append('If you are using PLIP in your work, please cite:') textlines.append('Salentin,S. et al. PLIP: fully automated protein-ligand interaction profiler.') textlines.append('Nucl. Acids Res. (1 July 2015) 43 (W1): W443-W447. doi: 10.1093/nar/gkv315\n') if len(self.excluded) != 0: textlines.append('Excluded molecules as ligands: %s\n' % ','.join([lig for lig in self.excluded])) if config.DNARECEPTOR: textlines.append('DNA/RNA in structure was chosen as the receptor part.\n') return textlines
['def', 'construct_txt_file', '(', 'self', ')', ':', 'textlines', '=', '[', "'Prediction of noncovalent interactions for PDB structure %s'", '%', 'self', '.', 'mol', '.', 'pymol_name', '.', 'upper', '(', ')', ',', ']', 'textlines', '.', 'append', '(', '"="', '*', 'len', '(', 'textlines', '[', '0', ']', ')', ')', 'textlines', '.', 'append', '(', "'Created on %s using PLIP v%s\\n'", '%', '(', 'time', '.', 'strftime', '(', '"%Y/%m/%d"', ')', ',', '__version__', ')', ')', 'textlines', '.', 'append', '(', "'If you are using PLIP in your work, please cite:'", ')', 'textlines', '.', 'append', '(', "'Salentin,S. et al. PLIP: fully automated protein-ligand interaction profiler.'", ')', 'textlines', '.', 'append', '(', "'Nucl. Acids Res. (1 July 2015) 43 (W1): W443-W447. doi: 10.1093/nar/gkv315\\n'", ')', 'if', 'len', '(', 'self', '.', 'excluded', ')', '!=', '0', ':', 'textlines', '.', 'append', '(', "'Excluded molecules as ligands: %s\\n'", '%', "','", '.', 'join', '(', '[', 'lig', 'for', 'lig', 'in', 'self', '.', 'excluded', ']', ')', ')', 'if', 'config', '.', 'DNARECEPTOR', ':', 'textlines', '.', 'append', '(', "'DNA/RNA in structure was chosen as the receptor part.\\n'", ')', 'return', 'textlines']
Construct the header of the txt file
['Construct', 'the', 'header', 'of', 'the', 'txt', 'file']
train
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/report.py#L75-L87
6,154
MAVENSDC/cdflib
cdflib/cdfread.py
CDF._default_pad
def _default_pad(self, data_type, num_elms): # @NoSelf ''' The default pad values by CDF data type ''' order = self._convert_option() if (data_type == 51 or data_type == 52): return str(' '*num_elms) if (data_type == 1) or (data_type == 41): pad_value = struct.pack(order+'b', -127) dt_string = 'i1' elif data_type == 2: pad_value = struct.pack(order+'h', -32767) dt_string = 'i2' elif data_type == 4: pad_value = struct.pack(order+'i', -2147483647) dt_string = 'i4' elif (data_type == 8) or (data_type == 33): pad_value = struct.pack(order+'q', -9223372036854775807) dt_string = 'i8' elif data_type == 11: pad_value = struct.pack(order+'B', 254) dt_string = 'u1' elif data_type == 12: pad_value = struct.pack(order+'H', 65534) dt_string = 'u2' elif data_type == 14: pad_value = struct.pack(order+'I', 4294967294) dt_string = 'u4' elif (data_type == 21) or (data_type == 44): pad_value = struct.pack(order+'f', -1.0E30) dt_string = 'f' elif (data_type == 22) or (data_type == 45) or (data_type == 31): pad_value = struct.pack(order+'d', -1.0E30) dt_string = 'd' else: # (data_type == 32): pad_value = struct.pack(order+'2d', *[-1.0E30, -1.0E30]) dt_string = 'c16' dt = np.dtype(dt_string) ret = np.frombuffer(pad_value, dtype=dt, count=1) ret.setflags('WRITEABLE') return ret
python
def _default_pad(self, data_type, num_elms): # @NoSelf ''' The default pad values by CDF data type ''' order = self._convert_option() if (data_type == 51 or data_type == 52): return str(' '*num_elms) if (data_type == 1) or (data_type == 41): pad_value = struct.pack(order+'b', -127) dt_string = 'i1' elif data_type == 2: pad_value = struct.pack(order+'h', -32767) dt_string = 'i2' elif data_type == 4: pad_value = struct.pack(order+'i', -2147483647) dt_string = 'i4' elif (data_type == 8) or (data_type == 33): pad_value = struct.pack(order+'q', -9223372036854775807) dt_string = 'i8' elif data_type == 11: pad_value = struct.pack(order+'B', 254) dt_string = 'u1' elif data_type == 12: pad_value = struct.pack(order+'H', 65534) dt_string = 'u2' elif data_type == 14: pad_value = struct.pack(order+'I', 4294967294) dt_string = 'u4' elif (data_type == 21) or (data_type == 44): pad_value = struct.pack(order+'f', -1.0E30) dt_string = 'f' elif (data_type == 22) or (data_type == 45) or (data_type == 31): pad_value = struct.pack(order+'d', -1.0E30) dt_string = 'd' else: # (data_type == 32): pad_value = struct.pack(order+'2d', *[-1.0E30, -1.0E30]) dt_string = 'c16' dt = np.dtype(dt_string) ret = np.frombuffer(pad_value, dtype=dt, count=1) ret.setflags('WRITEABLE') return ret
['def', '_default_pad', '(', 'self', ',', 'data_type', ',', 'num_elms', ')', ':', '# @NoSelf', 'order', '=', 'self', '.', '_convert_option', '(', ')', 'if', '(', 'data_type', '==', '51', 'or', 'data_type', '==', '52', ')', ':', 'return', 'str', '(', "' '", '*', 'num_elms', ')', 'if', '(', 'data_type', '==', '1', ')', 'or', '(', 'data_type', '==', '41', ')', ':', 'pad_value', '=', 'struct', '.', 'pack', '(', 'order', '+', "'b'", ',', '-', '127', ')', 'dt_string', '=', "'i1'", 'elif', 'data_type', '==', '2', ':', 'pad_value', '=', 'struct', '.', 'pack', '(', 'order', '+', "'h'", ',', '-', '32767', ')', 'dt_string', '=', "'i2'", 'elif', 'data_type', '==', '4', ':', 'pad_value', '=', 'struct', '.', 'pack', '(', 'order', '+', "'i'", ',', '-', '2147483647', ')', 'dt_string', '=', "'i4'", 'elif', '(', 'data_type', '==', '8', ')', 'or', '(', 'data_type', '==', '33', ')', ':', 'pad_value', '=', 'struct', '.', 'pack', '(', 'order', '+', "'q'", ',', '-', '9223372036854775807', ')', 'dt_string', '=', "'i8'", 'elif', 'data_type', '==', '11', ':', 'pad_value', '=', 'struct', '.', 'pack', '(', 'order', '+', "'B'", ',', '254', ')', 'dt_string', '=', "'u1'", 'elif', 'data_type', '==', '12', ':', 'pad_value', '=', 'struct', '.', 'pack', '(', 'order', '+', "'H'", ',', '65534', ')', 'dt_string', '=', "'u2'", 'elif', 'data_type', '==', '14', ':', 'pad_value', '=', 'struct', '.', 'pack', '(', 'order', '+', "'I'", ',', '4294967294', ')', 'dt_string', '=', "'u4'", 'elif', '(', 'data_type', '==', '21', ')', 'or', '(', 'data_type', '==', '44', ')', ':', 'pad_value', '=', 'struct', '.', 'pack', '(', 'order', '+', "'f'", ',', '-', '1.0E30', ')', 'dt_string', '=', "'f'", 'elif', '(', 'data_type', '==', '22', ')', 'or', '(', 'data_type', '==', '45', ')', 'or', '(', 'data_type', '==', '31', ')', ':', 'pad_value', '=', 'struct', '.', 'pack', '(', 'order', '+', "'d'", ',', '-', '1.0E30', ')', 'dt_string', '=', "'d'", 'else', ':', '# (data_type == 32):', 'pad_value', '=', 'struct', '.', 'pack', '(', 'order', '+', "'2d'", ',', '*', '[', '-', '1.0E30', ',', '-', '1.0E30', ']', ')', 'dt_string', '=', "'c16'", 'dt', '=', 'np', '.', 'dtype', '(', 'dt_string', ')', 'ret', '=', 'np', '.', 'frombuffer', '(', 'pad_value', ',', 'dtype', '=', 'dt', ',', 'count', '=', '1', ')', 'ret', '.', 'setflags', '(', "'WRITEABLE'", ')', 'return', 'ret']
The default pad values by CDF data type
['The', 'default', 'pad', 'values', 'by', 'CDF', 'data', 'type']
train
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfread.py#L2100-L2142
6,155
RI-imaging/qpformat
qpformat/file_formats/series_hdf5_qpimage.py
SeriesHdf5Qpimage.get_qpimage
def get_qpimage(self, idx): """Return background-corrected QPImage of data at index `idx`""" if self._bgdata: # The user has explicitly chosen different background data # using `get_qpimage_raw`. qpi = super(SeriesHdf5Qpimage, self).get_qpimage(idx) else: # We can use the background data stored in the qpimage hdf5 file with self._qpseries() as qps: qpi = qps.get_qpimage(index=idx).copy() # Force meta data for key in self.meta_data: qpi[key] = self.meta_data[key] # set identifier qpi["identifier"] = self.get_identifier(idx) return qpi
python
def get_qpimage(self, idx): """Return background-corrected QPImage of data at index `idx`""" if self._bgdata: # The user has explicitly chosen different background data # using `get_qpimage_raw`. qpi = super(SeriesHdf5Qpimage, self).get_qpimage(idx) else: # We can use the background data stored in the qpimage hdf5 file with self._qpseries() as qps: qpi = qps.get_qpimage(index=idx).copy() # Force meta data for key in self.meta_data: qpi[key] = self.meta_data[key] # set identifier qpi["identifier"] = self.get_identifier(idx) return qpi
['def', 'get_qpimage', '(', 'self', ',', 'idx', ')', ':', 'if', 'self', '.', '_bgdata', ':', '# The user has explicitly chosen different background data', '# using `get_qpimage_raw`.', 'qpi', '=', 'super', '(', 'SeriesHdf5Qpimage', ',', 'self', ')', '.', 'get_qpimage', '(', 'idx', ')', 'else', ':', '# We can use the background data stored in the qpimage hdf5 file', 'with', 'self', '.', '_qpseries', '(', ')', 'as', 'qps', ':', 'qpi', '=', 'qps', '.', 'get_qpimage', '(', 'index', '=', 'idx', ')', '.', 'copy', '(', ')', '# Force meta data', 'for', 'key', 'in', 'self', '.', 'meta_data', ':', 'qpi', '[', 'key', ']', '=', 'self', '.', 'meta_data', '[', 'key', ']', '# set identifier', 'qpi', '[', '"identifier"', ']', '=', 'self', '.', 'get_identifier', '(', 'idx', ')', 'return', 'qpi']
Return background-corrected QPImage of data at index `idx`
['Return', 'background', '-', 'corrected', 'QPImage', 'of', 'data', 'at', 'index', 'idx']
train
https://github.com/RI-imaging/qpformat/blob/364e29d7d9e8b9f1d7a4a25c753d1baf9d73d5eb/qpformat/file_formats/series_hdf5_qpimage.py#L32-L47
6,156
empirical-org/Quill-NLP-Tools-and-Datasets
utils/qextract/qextract/utils.py
read_in_chunks
def read_in_chunks(file_object, chunk_size=CHUNK_SIZE): """Generator to read a file piece by piece.""" while True: data = file_object.read(chunk_size) if not data: break yield data
python
def read_in_chunks(file_object, chunk_size=CHUNK_SIZE): """Generator to read a file piece by piece.""" while True: data = file_object.read(chunk_size) if not data: break yield data
['def', 'read_in_chunks', '(', 'file_object', ',', 'chunk_size', '=', 'CHUNK_SIZE', ')', ':', 'while', 'True', ':', 'data', '=', 'file_object', '.', 'read', '(', 'chunk_size', ')', 'if', 'not', 'data', ':', 'break', 'yield', 'data']
Generator to read a file piece by piece.
['Generator', 'to', 'read', 'a', 'file', 'piece', 'by', 'piece', '.']
train
https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/utils/qextract/qextract/utils.py#L5-L11
6,157
sirfoga/pyhal
hal/times/utils.py
Timing.parse_hh_mm_ss
def parse_hh_mm_ss(self): """Parses raw time :return: Time parsed """ split_count = self.raw.count(":") if split_count == 2: # hh:mm:ss return datetime.strptime(str(self.raw).strip(), "%H:%M:%S").time() elif split_count == 1: # mm:ss return datetime.strptime(str(self.raw).strip(), "%M:%S").time() return datetime.strptime(str(self.raw).strip(), "%S").time()
python
def parse_hh_mm_ss(self): """Parses raw time :return: Time parsed """ split_count = self.raw.count(":") if split_count == 2: # hh:mm:ss return datetime.strptime(str(self.raw).strip(), "%H:%M:%S").time() elif split_count == 1: # mm:ss return datetime.strptime(str(self.raw).strip(), "%M:%S").time() return datetime.strptime(str(self.raw).strip(), "%S").time()
['def', 'parse_hh_mm_ss', '(', 'self', ')', ':', 'split_count', '=', 'self', '.', 'raw', '.', 'count', '(', '":"', ')', 'if', 'split_count', '==', '2', ':', '# hh:mm:ss', 'return', 'datetime', '.', 'strptime', '(', 'str', '(', 'self', '.', 'raw', ')', '.', 'strip', '(', ')', ',', '"%H:%M:%S"', ')', '.', 'time', '(', ')', 'elif', 'split_count', '==', '1', ':', '# mm:ss', 'return', 'datetime', '.', 'strptime', '(', 'str', '(', 'self', '.', 'raw', ')', '.', 'strip', '(', ')', ',', '"%M:%S"', ')', '.', 'time', '(', ')', 'return', 'datetime', '.', 'strptime', '(', 'str', '(', 'self', '.', 'raw', ')', '.', 'strip', '(', ')', ',', '"%S"', ')', '.', 'time', '(', ')']
Parses raw time :return: Time parsed
['Parses', 'raw', 'time']
train
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/times/utils.py#L25-L37
6,158
saltstack/salt
salt/fileclient.py
Client.is_cached
def is_cached(self, path, saltenv='base', cachedir=None): ''' Returns the full path to a file if it is cached locally on the minion otherwise returns a blank string ''' if path.startswith('salt://'): path, senv = salt.utils.url.parse(path) if senv: saltenv = senv escaped = True if salt.utils.url.is_escaped(path) else False # also strip escape character '|' localsfilesdest = os.path.join( self.opts['cachedir'], 'localfiles', path.lstrip('|/')) filesdest = os.path.join( self.opts['cachedir'], 'files', saltenv, path.lstrip('|/')) extrndest = self._extrn_path(path, saltenv, cachedir=cachedir) if os.path.exists(filesdest): return salt.utils.url.escape(filesdest) if escaped else filesdest elif os.path.exists(localsfilesdest): return salt.utils.url.escape(localsfilesdest) \ if escaped \ else localsfilesdest elif os.path.exists(extrndest): return extrndest return ''
python
def is_cached(self, path, saltenv='base', cachedir=None): ''' Returns the full path to a file if it is cached locally on the minion otherwise returns a blank string ''' if path.startswith('salt://'): path, senv = salt.utils.url.parse(path) if senv: saltenv = senv escaped = True if salt.utils.url.is_escaped(path) else False # also strip escape character '|' localsfilesdest = os.path.join( self.opts['cachedir'], 'localfiles', path.lstrip('|/')) filesdest = os.path.join( self.opts['cachedir'], 'files', saltenv, path.lstrip('|/')) extrndest = self._extrn_path(path, saltenv, cachedir=cachedir) if os.path.exists(filesdest): return salt.utils.url.escape(filesdest) if escaped else filesdest elif os.path.exists(localsfilesdest): return salt.utils.url.escape(localsfilesdest) \ if escaped \ else localsfilesdest elif os.path.exists(extrndest): return extrndest return ''
['def', 'is_cached', '(', 'self', ',', 'path', ',', 'saltenv', '=', "'base'", ',', 'cachedir', '=', 'None', ')', ':', 'if', 'path', '.', 'startswith', '(', "'salt://'", ')', ':', 'path', ',', 'senv', '=', 'salt', '.', 'utils', '.', 'url', '.', 'parse', '(', 'path', ')', 'if', 'senv', ':', 'saltenv', '=', 'senv', 'escaped', '=', 'True', 'if', 'salt', '.', 'utils', '.', 'url', '.', 'is_escaped', '(', 'path', ')', 'else', 'False', "# also strip escape character '|'", 'localsfilesdest', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'opts', '[', "'cachedir'", ']', ',', "'localfiles'", ',', 'path', '.', 'lstrip', '(', "'|/'", ')', ')', 'filesdest', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'opts', '[', "'cachedir'", ']', ',', "'files'", ',', 'saltenv', ',', 'path', '.', 'lstrip', '(', "'|/'", ')', ')', 'extrndest', '=', 'self', '.', '_extrn_path', '(', 'path', ',', 'saltenv', ',', 'cachedir', '=', 'cachedir', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'filesdest', ')', ':', 'return', 'salt', '.', 'utils', '.', 'url', '.', 'escape', '(', 'filesdest', ')', 'if', 'escaped', 'else', 'filesdest', 'elif', 'os', '.', 'path', '.', 'exists', '(', 'localsfilesdest', ')', ':', 'return', 'salt', '.', 'utils', '.', 'url', '.', 'escape', '(', 'localsfilesdest', ')', 'if', 'escaped', 'else', 'localsfilesdest', 'elif', 'os', '.', 'path', '.', 'exists', '(', 'extrndest', ')', ':', 'return', 'extrndest', 'return', "''"]
Returns the full path to a file if it is cached locally on the minion otherwise returns a blank string
['Returns', 'the', 'full', 'path', 'to', 'a', 'file', 'if', 'it', 'is', 'cached', 'locally', 'on', 'the', 'minion', 'otherwise', 'returns', 'a', 'blank', 'string']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileclient.py#L314-L342
6,159
hardbyte/python-can
can/interfaces/virtual.py
VirtualBus._detect_available_configs
def _detect_available_configs(): """ Returns all currently used channels as well as one other currently unused channel. .. note:: This method will run into problems if thousands of autodetected busses are used at once. """ with channels_lock: available_channels = list(channels.keys()) # find a currently unused channel get_extra = lambda: "channel-{}".format(randint(0, 9999)) extra = get_extra() while extra in available_channels: extra = get_extra() available_channels += [extra] return [ {'interface': 'virtual', 'channel': channel} for channel in available_channels ]
python
def _detect_available_configs(): """ Returns all currently used channels as well as one other currently unused channel. .. note:: This method will run into problems if thousands of autodetected busses are used at once. """ with channels_lock: available_channels = list(channels.keys()) # find a currently unused channel get_extra = lambda: "channel-{}".format(randint(0, 9999)) extra = get_extra() while extra in available_channels: extra = get_extra() available_channels += [extra] return [ {'interface': 'virtual', 'channel': channel} for channel in available_channels ]
['def', '_detect_available_configs', '(', ')', ':', 'with', 'channels_lock', ':', 'available_channels', '=', 'list', '(', 'channels', '.', 'keys', '(', ')', ')', '# find a currently unused channel', 'get_extra', '=', 'lambda', ':', '"channel-{}"', '.', 'format', '(', 'randint', '(', '0', ',', '9999', ')', ')', 'extra', '=', 'get_extra', '(', ')', 'while', 'extra', 'in', 'available_channels', ':', 'extra', '=', 'get_extra', '(', ')', 'available_channels', '+=', '[', 'extra', ']', 'return', '[', '{', "'interface'", ':', "'virtual'", ',', "'channel'", ':', 'channel', '}', 'for', 'channel', 'in', 'available_channels', ']']
Returns all currently used channels as well as one other currently unused channel. .. note:: This method will run into problems if thousands of autodetected busses are used at once.
['Returns', 'all', 'currently', 'used', 'channels', 'as', 'well', 'as', 'one', 'other', 'currently', 'unused', 'channel', '.']
train
https://github.com/hardbyte/python-can/blob/cdc5254d96072df7739263623f3e920628a7d214/can/interfaces/virtual.py#L116-L141
6,160
dsoprea/PySvn
svn/common.py
CommonClient.properties
def properties(self, rel_path=None): """ Return a dictionary with all svn-properties associated with a relative path. :param rel_path: relative path in the svn repo to query the properties from :returns: a dictionary with the property name as key and the content as value """ full_url_or_path = self.__url_or_path if rel_path is not None: full_url_or_path += '/' + rel_path result = self.run_command( 'proplist', ['--xml', full_url_or_path], do_combine=True) # query the proper list of this path root = xml.etree.ElementTree.fromstring(result) target_elem = root.find('target') property_names = [p.attrib["name"] for p in target_elem.findall('property')] # now query the content of each propery property_dict = {} for property_name in property_names: result = self.run_command( 'propget', ['--xml', property_name, full_url_or_path, ], do_combine=True) root = xml.etree.ElementTree.fromstring(result) target_elem = root.find('target') property_elem = target_elem.find('property') property_dict[property_name] = property_elem.text return property_dict
python
def properties(self, rel_path=None): """ Return a dictionary with all svn-properties associated with a relative path. :param rel_path: relative path in the svn repo to query the properties from :returns: a dictionary with the property name as key and the content as value """ full_url_or_path = self.__url_or_path if rel_path is not None: full_url_or_path += '/' + rel_path result = self.run_command( 'proplist', ['--xml', full_url_or_path], do_combine=True) # query the proper list of this path root = xml.etree.ElementTree.fromstring(result) target_elem = root.find('target') property_names = [p.attrib["name"] for p in target_elem.findall('property')] # now query the content of each propery property_dict = {} for property_name in property_names: result = self.run_command( 'propget', ['--xml', property_name, full_url_or_path, ], do_combine=True) root = xml.etree.ElementTree.fromstring(result) target_elem = root.find('target') property_elem = target_elem.find('property') property_dict[property_name] = property_elem.text return property_dict
['def', 'properties', '(', 'self', ',', 'rel_path', '=', 'None', ')', ':', 'full_url_or_path', '=', 'self', '.', '__url_or_path', 'if', 'rel_path', 'is', 'not', 'None', ':', 'full_url_or_path', '+=', "'/'", '+', 'rel_path', 'result', '=', 'self', '.', 'run_command', '(', "'proplist'", ',', '[', "'--xml'", ',', 'full_url_or_path', ']', ',', 'do_combine', '=', 'True', ')', '# query the proper list of this path', 'root', '=', 'xml', '.', 'etree', '.', 'ElementTree', '.', 'fromstring', '(', 'result', ')', 'target_elem', '=', 'root', '.', 'find', '(', "'target'", ')', 'property_names', '=', '[', 'p', '.', 'attrib', '[', '"name"', ']', 'for', 'p', 'in', 'target_elem', '.', 'findall', '(', "'property'", ')', ']', '# now query the content of each propery', 'property_dict', '=', '{', '}', 'for', 'property_name', 'in', 'property_names', ':', 'result', '=', 'self', '.', 'run_command', '(', "'propget'", ',', '[', "'--xml'", ',', 'property_name', ',', 'full_url_or_path', ',', ']', ',', 'do_combine', '=', 'True', ')', 'root', '=', 'xml', '.', 'etree', '.', 'ElementTree', '.', 'fromstring', '(', 'result', ')', 'target_elem', '=', 'root', '.', 'find', '(', "'target'", ')', 'property_elem', '=', 'target_elem', '.', 'find', '(', "'property'", ')', 'property_dict', '[', 'property_name', ']', '=', 'property_elem', '.', 'text', 'return', 'property_dict']
Return a dictionary with all svn-properties associated with a relative path. :param rel_path: relative path in the svn repo to query the properties from :returns: a dictionary with the property name as key and the content as value
['Return', 'a', 'dictionary', 'with', 'all', 'svn', '-', 'properties', 'associated', 'with', 'a', 'relative', 'path', '.', ':', 'param', 'rel_path', ':', 'relative', 'path', 'in', 'the', 'svn', 'repo', 'to', 'query', 'the', 'properties', 'from', ':', 'returns', ':', 'a', 'dictionary', 'with', 'the', 'property', 'name', 'as', 'key', 'and', 'the', 'content', 'as', 'value']
train
https://github.com/dsoprea/PySvn/blob/0c222a9a49b25d1fcfbc170ab9bc54288efe7f49/svn/common.py#L139-L176
6,161
tjvr/kurt
kurt/__init__.py
Waveform.contents
def contents(self): """The raw file contents as a string.""" if not self._contents: if self._path: # Read file into memory so we don't run out of file descriptors f = open(self._path, "rb") self._contents = f.read() f.close() return self._contents
python
def contents(self): """The raw file contents as a string.""" if not self._contents: if self._path: # Read file into memory so we don't run out of file descriptors f = open(self._path, "rb") self._contents = f.read() f.close() return self._contents
['def', 'contents', '(', 'self', ')', ':', 'if', 'not', 'self', '.', '_contents', ':', 'if', 'self', '.', '_path', ':', "# Read file into memory so we don't run out of file descriptors", 'f', '=', 'open', '(', 'self', '.', '_path', ',', '"rb"', ')', 'self', '.', '_contents', '=', 'f', '.', 'read', '(', ')', 'f', '.', 'close', '(', ')', 'return', 'self', '.', '_contents']
The raw file contents as a string.
['The', 'raw', 'file', 'contents', 'as', 'a', 'string', '.']
train
https://github.com/tjvr/kurt/blob/fcccd80cae11dc233f6dd02b40ec9a388c62f259/kurt/__init__.py#L2405-L2413
6,162
sosreport/sos
sos/plugins/networking.py
Networking.collect_iptable
def collect_iptable(self, tablename): """ When running the iptables command, it unfortunately auto-loads the modules before trying to get output. Some people explicitly don't want this, so check if the modules are loaded before running the command. If they aren't loaded, there can't possibly be any relevant rules in that table """ modname = "iptable_"+tablename if self.check_ext_prog("grep -q %s /proc/modules" % modname): cmd = "iptables -t "+tablename+" -nvL" self.add_cmd_output(cmd)
python
def collect_iptable(self, tablename): """ When running the iptables command, it unfortunately auto-loads the modules before trying to get output. Some people explicitly don't want this, so check if the modules are loaded before running the command. If they aren't loaded, there can't possibly be any relevant rules in that table """ modname = "iptable_"+tablename if self.check_ext_prog("grep -q %s /proc/modules" % modname): cmd = "iptables -t "+tablename+" -nvL" self.add_cmd_output(cmd)
['def', 'collect_iptable', '(', 'self', ',', 'tablename', ')', ':', 'modname', '=', '"iptable_"', '+', 'tablename', 'if', 'self', '.', 'check_ext_prog', '(', '"grep -q %s /proc/modules"', '%', 'modname', ')', ':', 'cmd', '=', '"iptables -t "', '+', 'tablename', '+', '" -nvL"', 'self', '.', 'add_cmd_output', '(', 'cmd', ')']
When running the iptables command, it unfortunately auto-loads the modules before trying to get output. Some people explicitly don't want this, so check if the modules are loaded before running the command. If they aren't loaded, there can't possibly be any relevant rules in that table
['When', 'running', 'the', 'iptables', 'command', 'it', 'unfortunately', 'auto', '-', 'loads', 'the', 'modules', 'before', 'trying', 'to', 'get', 'output', '.', 'Some', 'people', 'explicitly', 'don', 't', 'want', 'this', 'so', 'check', 'if', 'the', 'modules', 'are', 'loaded', 'before', 'running', 'the', 'command', '.', 'If', 'they', 'aren', 't', 'loaded', 'there', 'can', 't', 'possibly', 'be', 'any', 'relevant', 'rules', 'in', 'that', 'table']
train
https://github.com/sosreport/sos/blob/2ebc04da53dc871c8dd5243567afa4f8592dca29/sos/plugins/networking.py#L46-L56
6,163
mosesschwartz/scrypture
scrypture/scrypture.py
run_script
def run_script(module_name): '''Take script input (from script_input above), run the run() function, and render the results in the appropriate template''' filename = '' file_stream = '' if len(request.files) > 0: # Get the name of the uploaded file f = request.files['file_upload'] # Make the filename safe, remove unsupported chars filename = secure_filename(f.filename) file_stream = f.stream try: form = werkzeug.datastructures.MultiDict(request.form) form['HTTP_AUTHORIZATION'] = get_authorization() form['filename'] = filename form['file_stream'] = file_stream result = registered_modules[module_name].WebAPI().run(form) except Exception: if app.config['LOCAL_DEV'] == True: raise # pass along to be caught by Flask's debugger return render_template('error.html', scripts=registered_modules, module_name=module_name, error_message=traceback.format_exc()) output = result['output'] if 'output_type' in result: output_type = result['output_type'] else: if isinstance(output, basestring): output_type = 'simple' else: output_type = 'table' if result['output_type'] == 'custom': return render_template('result_custom.html', custom_output=Markup(result['output']), scripts=registered_modules, module_name=module_name) elif result['output_type'] == 'simple': return render_template('result.html', output=result['output'], scripts=registered_modules, module_name=module_name) elif result['output_type'] == 'file': return Response(result['output'], mimetype='application/octet-stream', headers={'Content-Disposition': 'attachment;filename='+result['filename']}) elif result['output_type'] == 'table': return render_template('result_table.html', output=result['output'], scripts=registered_modules, module_name=module_name, headers=result['headers'])
python
def run_script(module_name): '''Take script input (from script_input above), run the run() function, and render the results in the appropriate template''' filename = '' file_stream = '' if len(request.files) > 0: # Get the name of the uploaded file f = request.files['file_upload'] # Make the filename safe, remove unsupported chars filename = secure_filename(f.filename) file_stream = f.stream try: form = werkzeug.datastructures.MultiDict(request.form) form['HTTP_AUTHORIZATION'] = get_authorization() form['filename'] = filename form['file_stream'] = file_stream result = registered_modules[module_name].WebAPI().run(form) except Exception: if app.config['LOCAL_DEV'] == True: raise # pass along to be caught by Flask's debugger return render_template('error.html', scripts=registered_modules, module_name=module_name, error_message=traceback.format_exc()) output = result['output'] if 'output_type' in result: output_type = result['output_type'] else: if isinstance(output, basestring): output_type = 'simple' else: output_type = 'table' if result['output_type'] == 'custom': return render_template('result_custom.html', custom_output=Markup(result['output']), scripts=registered_modules, module_name=module_name) elif result['output_type'] == 'simple': return render_template('result.html', output=result['output'], scripts=registered_modules, module_name=module_name) elif result['output_type'] == 'file': return Response(result['output'], mimetype='application/octet-stream', headers={'Content-Disposition': 'attachment;filename='+result['filename']}) elif result['output_type'] == 'table': return render_template('result_table.html', output=result['output'], scripts=registered_modules, module_name=module_name, headers=result['headers'])
['def', 'run_script', '(', 'module_name', ')', ':', 'filename', '=', "''", 'file_stream', '=', "''", 'if', 'len', '(', 'request', '.', 'files', ')', '>', '0', ':', '# Get the name of the uploaded file', 'f', '=', 'request', '.', 'files', '[', "'file_upload'", ']', '# Make the filename safe, remove unsupported chars', 'filename', '=', 'secure_filename', '(', 'f', '.', 'filename', ')', 'file_stream', '=', 'f', '.', 'stream', 'try', ':', 'form', '=', 'werkzeug', '.', 'datastructures', '.', 'MultiDict', '(', 'request', '.', 'form', ')', 'form', '[', "'HTTP_AUTHORIZATION'", ']', '=', 'get_authorization', '(', ')', 'form', '[', "'filename'", ']', '=', 'filename', 'form', '[', "'file_stream'", ']', '=', 'file_stream', 'result', '=', 'registered_modules', '[', 'module_name', ']', '.', 'WebAPI', '(', ')', '.', 'run', '(', 'form', ')', 'except', 'Exception', ':', 'if', 'app', '.', 'config', '[', "'LOCAL_DEV'", ']', '==', 'True', ':', 'raise', "# pass along to be caught by Flask's debugger", 'return', 'render_template', '(', "'error.html'", ',', 'scripts', '=', 'registered_modules', ',', 'module_name', '=', 'module_name', ',', 'error_message', '=', 'traceback', '.', 'format_exc', '(', ')', ')', 'output', '=', 'result', '[', "'output'", ']', 'if', "'output_type'", 'in', 'result', ':', 'output_type', '=', 'result', '[', "'output_type'", ']', 'else', ':', 'if', 'isinstance', '(', 'output', ',', 'basestring', ')', ':', 'output_type', '=', "'simple'", 'else', ':', 'output_type', '=', "'table'", 'if', 'result', '[', "'output_type'", ']', '==', "'custom'", ':', 'return', 'render_template', '(', "'result_custom.html'", ',', 'custom_output', '=', 'Markup', '(', 'result', '[', "'output'", ']', ')', ',', 'scripts', '=', 'registered_modules', ',', 'module_name', '=', 'module_name', ')', 'elif', 'result', '[', "'output_type'", ']', '==', "'simple'", ':', 'return', 'render_template', '(', "'result.html'", ',', 'output', '=', 'result', '[', "'output'", ']', ',', 'scripts', '=', 'registered_modules', ',', 'module_name', '=', 'module_name', ')', 'elif', 'result', '[', "'output_type'", ']', '==', "'file'", ':', 'return', 'Response', '(', 'result', '[', "'output'", ']', ',', 'mimetype', '=', "'application/octet-stream'", ',', 'headers', '=', '{', "'Content-Disposition'", ':', "'attachment;filename='", '+', 'result', '[', "'filename'", ']', '}', ')', 'elif', 'result', '[', "'output_type'", ']', '==', "'table'", ':', 'return', 'render_template', '(', "'result_table.html'", ',', 'output', '=', 'result', '[', "'output'", ']', ',', 'scripts', '=', 'registered_modules', ',', 'module_name', '=', 'module_name', ',', 'headers', '=', 'result', '[', "'headers'", ']', ')']
Take script input (from script_input above), run the run() function, and render the results in the appropriate template
['Take', 'script', 'input', '(', 'from', 'script_input', 'above', ')', 'run', 'the', 'run', '()', 'function', 'and', 'render', 'the', 'results', 'in', 'the', 'appropriate', 'template']
train
https://github.com/mosesschwartz/scrypture/blob/d51eb0c9835a5122a655078268185ce8ab9ec86a/scrypture/scrypture.py#L186-L239
6,164
python-diamond/Diamond
src/collectors/memcached_slab/memcached_slab.py
MemcachedSlabCollector.get_slab_stats
def get_slab_stats(self): """Retrieve slab stats from memcached.""" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.host, self.port)) s.send("stats slabs\n") try: data = "" while True: data += s.recv(4096) if data.endswith('END\r\n'): break return data finally: s.close()
python
def get_slab_stats(self): """Retrieve slab stats from memcached.""" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.host, self.port)) s.send("stats slabs\n") try: data = "" while True: data += s.recv(4096) if data.endswith('END\r\n'): break return data finally: s.close()
['def', 'get_slab_stats', '(', 'self', ')', ':', 's', '=', 'socket', '.', 'socket', '(', 'socket', '.', 'AF_INET', ',', 'socket', '.', 'SOCK_STREAM', ')', 's', '.', 'connect', '(', '(', 'self', '.', 'host', ',', 'self', '.', 'port', ')', ')', 's', '.', 'send', '(', '"stats slabs\\n"', ')', 'try', ':', 'data', '=', '""', 'while', 'True', ':', 'data', '+=', 's', '.', 'recv', '(', '4096', ')', 'if', 'data', '.', 'endswith', '(', "'END\\r\\n'", ')', ':', 'break', 'return', 'data', 'finally', ':', 's', '.', 'close', '(', ')']
Retrieve slab stats from memcached.
['Retrieve', 'slab', 'stats', 'from', 'memcached', '.']
train
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/memcached_slab/memcached_slab.py#L99-L112
6,165
rameshg87/pyremotevbox
pyremotevbox/ZSI/parse.py
ParsedSoap.WhatMustIUnderstand
def WhatMustIUnderstand(self): '''Return a list of (uri,localname) tuples for all elements in the header that have mustUnderstand set. ''' return [ ( E.namespaceURI, E.localName ) for E in self.header_elements if _find_mu(E) == "1" ]
python
def WhatMustIUnderstand(self): '''Return a list of (uri,localname) tuples for all elements in the header that have mustUnderstand set. ''' return [ ( E.namespaceURI, E.localName ) for E in self.header_elements if _find_mu(E) == "1" ]
['def', 'WhatMustIUnderstand', '(', 'self', ')', ':', 'return', '[', '(', 'E', '.', 'namespaceURI', ',', 'E', '.', 'localName', ')', 'for', 'E', 'in', 'self', '.', 'header_elements', 'if', '_find_mu', '(', 'E', ')', '==', '"1"', ']']
Return a list of (uri,localname) tuples for all elements in the header that have mustUnderstand set.
['Return', 'a', 'list', 'of', '(', 'uri', 'localname', ')', 'tuples', 'for', 'all', 'elements', 'in', 'the', 'header', 'that', 'have', 'mustUnderstand', 'set', '.']
train
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/parse.py#L328-L333
6,166
dropbox/stone
stone/frontend/ir_generator.py
IRGenerator._filter_namespaces_by_route_whitelist
def _filter_namespaces_by_route_whitelist(self): """ Given a parsed API in IR form, filter the user-defined datatypes so that they include only the route datatypes and their direct dependencies. """ assert self._routes is not None, "Missing route whitelist" assert 'route_whitelist' in self._routes assert 'datatype_whitelist' in self._routes # Get route whitelist in canonical form route_whitelist = {} for namespace_name, route_reprs in self._routes['route_whitelist'].items(): new_route_reprs = [] if route_reprs == ['*']: namespace = self.api.namespaces[namespace_name] new_route_reprs = [route.name_with_version() for route in namespace.routes] else: for route_repr in route_reprs: route_name, version = parse_route_name_and_version(route_repr) if version > 1: new_route_reprs.append('{}:{}'.format(route_name, version)) else: new_route_reprs.append(route_name) route_whitelist[namespace_name] = new_route_reprs # Parse the route whitelist and populate any starting data types route_data_types = [] for namespace_name, route_reprs in route_whitelist.items(): # Error out if user supplied nonexistent namespace if namespace_name not in self.api.namespaces: raise AssertionError('Namespace %s is not defined!' % namespace_name) namespace = self.api.namespaces[namespace_name] # Parse namespace doc refs and add them to the starting data types if namespace.doc is not None: route_data_types.extend( parse_data_types_from_doc_ref(self.api, namespace.doc, namespace_name)) # Parse user-specified routes and add them to the starting data types # Note that this may add duplicates, but that's okay, as the recursion # keeps track of visited data types. assert '*' not in route_reprs for routes_repr in route_reprs: route_name, version = parse_route_name_and_version(routes_repr) if route_name not in namespace.routes_by_name or \ version not in namespace.routes_by_name[route_name].at_version: raise AssertionError('Route %s at version %d is not defined!' % (route_name, version)) route = namespace.routes_by_name[route_name].at_version[version] route_data_types.extend(namespace.get_route_io_data_types_for_route(route)) if route.doc is not None: route_data_types.extend( parse_data_types_from_doc_ref(self.api, route.doc, namespace_name)) # Parse the datatype whitelist and populate any starting data types for namespace_name, datatype_names in self._routes['datatype_whitelist'].items(): if namespace_name not in self.api.namespaces: raise AssertionError('Namespace %s is not defined!' % namespace_name) # Parse namespace doc refs and add them to the starting data types namespace = self.api.namespaces[namespace_name] if namespace.doc is not None: route_data_types.extend( parse_data_types_from_doc_ref(self.api, namespace.doc, namespace_name)) for datatype_name in datatype_names: if datatype_name not in self.api.namespaces[namespace_name].data_type_by_name: raise AssertionError('Datatype %s is not defined!' % datatype_name) data_type = self.api.namespaces[namespace_name].data_type_by_name[datatype_name] route_data_types.append(data_type) # Recurse on dependencies output_types_by_ns, output_routes_by_ns = self._find_dependencies(route_data_types) # Update the IR representation. This involves editing the data types and # routes for each namespace. for namespace in self.api.namespaces.values(): data_types = list(set(output_types_by_ns[namespace.name])) # defaults to empty list namespace.data_types = data_types namespace.data_type_by_name = {d.name: d for d in data_types} output_route_reprs = [output_route.name_with_version() for output_route in output_routes_by_ns[namespace.name]] if namespace.name in route_whitelist: whitelisted_route_reprs = route_whitelist[namespace.name] route_reprs = list(set(whitelisted_route_reprs + output_route_reprs)) else: route_reprs = output_route_reprs routes = [] for route_repr in route_reprs: route_name, version = parse_route_name_and_version(route_repr) route = namespace.routes_by_name[route_name].at_version[version] routes.append(route) namespace.routes = [] namespace.route_by_name = {} namespace.routes_by_name = {} for route in routes: namespace.add_route(route)
python
def _filter_namespaces_by_route_whitelist(self): """ Given a parsed API in IR form, filter the user-defined datatypes so that they include only the route datatypes and their direct dependencies. """ assert self._routes is not None, "Missing route whitelist" assert 'route_whitelist' in self._routes assert 'datatype_whitelist' in self._routes # Get route whitelist in canonical form route_whitelist = {} for namespace_name, route_reprs in self._routes['route_whitelist'].items(): new_route_reprs = [] if route_reprs == ['*']: namespace = self.api.namespaces[namespace_name] new_route_reprs = [route.name_with_version() for route in namespace.routes] else: for route_repr in route_reprs: route_name, version = parse_route_name_and_version(route_repr) if version > 1: new_route_reprs.append('{}:{}'.format(route_name, version)) else: new_route_reprs.append(route_name) route_whitelist[namespace_name] = new_route_reprs # Parse the route whitelist and populate any starting data types route_data_types = [] for namespace_name, route_reprs in route_whitelist.items(): # Error out if user supplied nonexistent namespace if namespace_name not in self.api.namespaces: raise AssertionError('Namespace %s is not defined!' % namespace_name) namespace = self.api.namespaces[namespace_name] # Parse namespace doc refs and add them to the starting data types if namespace.doc is not None: route_data_types.extend( parse_data_types_from_doc_ref(self.api, namespace.doc, namespace_name)) # Parse user-specified routes and add them to the starting data types # Note that this may add duplicates, but that's okay, as the recursion # keeps track of visited data types. assert '*' not in route_reprs for routes_repr in route_reprs: route_name, version = parse_route_name_and_version(routes_repr) if route_name not in namespace.routes_by_name or \ version not in namespace.routes_by_name[route_name].at_version: raise AssertionError('Route %s at version %d is not defined!' % (route_name, version)) route = namespace.routes_by_name[route_name].at_version[version] route_data_types.extend(namespace.get_route_io_data_types_for_route(route)) if route.doc is not None: route_data_types.extend( parse_data_types_from_doc_ref(self.api, route.doc, namespace_name)) # Parse the datatype whitelist and populate any starting data types for namespace_name, datatype_names in self._routes['datatype_whitelist'].items(): if namespace_name not in self.api.namespaces: raise AssertionError('Namespace %s is not defined!' % namespace_name) # Parse namespace doc refs and add them to the starting data types namespace = self.api.namespaces[namespace_name] if namespace.doc is not None: route_data_types.extend( parse_data_types_from_doc_ref(self.api, namespace.doc, namespace_name)) for datatype_name in datatype_names: if datatype_name not in self.api.namespaces[namespace_name].data_type_by_name: raise AssertionError('Datatype %s is not defined!' % datatype_name) data_type = self.api.namespaces[namespace_name].data_type_by_name[datatype_name] route_data_types.append(data_type) # Recurse on dependencies output_types_by_ns, output_routes_by_ns = self._find_dependencies(route_data_types) # Update the IR representation. This involves editing the data types and # routes for each namespace. for namespace in self.api.namespaces.values(): data_types = list(set(output_types_by_ns[namespace.name])) # defaults to empty list namespace.data_types = data_types namespace.data_type_by_name = {d.name: d for d in data_types} output_route_reprs = [output_route.name_with_version() for output_route in output_routes_by_ns[namespace.name]] if namespace.name in route_whitelist: whitelisted_route_reprs = route_whitelist[namespace.name] route_reprs = list(set(whitelisted_route_reprs + output_route_reprs)) else: route_reprs = output_route_reprs routes = [] for route_repr in route_reprs: route_name, version = parse_route_name_and_version(route_repr) route = namespace.routes_by_name[route_name].at_version[version] routes.append(route) namespace.routes = [] namespace.route_by_name = {} namespace.routes_by_name = {} for route in routes: namespace.add_route(route)
['def', '_filter_namespaces_by_route_whitelist', '(', 'self', ')', ':', 'assert', 'self', '.', '_routes', 'is', 'not', 'None', ',', '"Missing route whitelist"', 'assert', "'route_whitelist'", 'in', 'self', '.', '_routes', 'assert', "'datatype_whitelist'", 'in', 'self', '.', '_routes', '# Get route whitelist in canonical form', 'route_whitelist', '=', '{', '}', 'for', 'namespace_name', ',', 'route_reprs', 'in', 'self', '.', '_routes', '[', "'route_whitelist'", ']', '.', 'items', '(', ')', ':', 'new_route_reprs', '=', '[', ']', 'if', 'route_reprs', '==', '[', "'*'", ']', ':', 'namespace', '=', 'self', '.', 'api', '.', 'namespaces', '[', 'namespace_name', ']', 'new_route_reprs', '=', '[', 'route', '.', 'name_with_version', '(', ')', 'for', 'route', 'in', 'namespace', '.', 'routes', ']', 'else', ':', 'for', 'route_repr', 'in', 'route_reprs', ':', 'route_name', ',', 'version', '=', 'parse_route_name_and_version', '(', 'route_repr', ')', 'if', 'version', '>', '1', ':', 'new_route_reprs', '.', 'append', '(', "'{}:{}'", '.', 'format', '(', 'route_name', ',', 'version', ')', ')', 'else', ':', 'new_route_reprs', '.', 'append', '(', 'route_name', ')', 'route_whitelist', '[', 'namespace_name', ']', '=', 'new_route_reprs', '# Parse the route whitelist and populate any starting data types', 'route_data_types', '=', '[', ']', 'for', 'namespace_name', ',', 'route_reprs', 'in', 'route_whitelist', '.', 'items', '(', ')', ':', '# Error out if user supplied nonexistent namespace', 'if', 'namespace_name', 'not', 'in', 'self', '.', 'api', '.', 'namespaces', ':', 'raise', 'AssertionError', '(', "'Namespace %s is not defined!'", '%', 'namespace_name', ')', 'namespace', '=', 'self', '.', 'api', '.', 'namespaces', '[', 'namespace_name', ']', '# Parse namespace doc refs and add them to the starting data types', 'if', 'namespace', '.', 'doc', 'is', 'not', 'None', ':', 'route_data_types', '.', 'extend', '(', 'parse_data_types_from_doc_ref', '(', 'self', '.', 'api', ',', 'namespace', '.', 'doc', ',', 'namespace_name', ')', ')', '# Parse user-specified routes and add them to the starting data types', "# Note that this may add duplicates, but that's okay, as the recursion", '# keeps track of visited data types.', 'assert', "'*'", 'not', 'in', 'route_reprs', 'for', 'routes_repr', 'in', 'route_reprs', ':', 'route_name', ',', 'version', '=', 'parse_route_name_and_version', '(', 'routes_repr', ')', 'if', 'route_name', 'not', 'in', 'namespace', '.', 'routes_by_name', 'or', 'version', 'not', 'in', 'namespace', '.', 'routes_by_name', '[', 'route_name', ']', '.', 'at_version', ':', 'raise', 'AssertionError', '(', "'Route %s at version %d is not defined!'", '%', '(', 'route_name', ',', 'version', ')', ')', 'route', '=', 'namespace', '.', 'routes_by_name', '[', 'route_name', ']', '.', 'at_version', '[', 'version', ']', 'route_data_types', '.', 'extend', '(', 'namespace', '.', 'get_route_io_data_types_for_route', '(', 'route', ')', ')', 'if', 'route', '.', 'doc', 'is', 'not', 'None', ':', 'route_data_types', '.', 'extend', '(', 'parse_data_types_from_doc_ref', '(', 'self', '.', 'api', ',', 'route', '.', 'doc', ',', 'namespace_name', ')', ')', '# Parse the datatype whitelist and populate any starting data types', 'for', 'namespace_name', ',', 'datatype_names', 'in', 'self', '.', '_routes', '[', "'datatype_whitelist'", ']', '.', 'items', '(', ')', ':', 'if', 'namespace_name', 'not', 'in', 'self', '.', 'api', '.', 'namespaces', ':', 'raise', 'AssertionError', '(', "'Namespace %s is not defined!'", '%', 'namespace_name', ')', '# Parse namespace doc refs and add them to the starting data types', 'namespace', '=', 'self', '.', 'api', '.', 'namespaces', '[', 'namespace_name', ']', 'if', 'namespace', '.', 'doc', 'is', 'not', 'None', ':', 'route_data_types', '.', 'extend', '(', 'parse_data_types_from_doc_ref', '(', 'self', '.', 'api', ',', 'namespace', '.', 'doc', ',', 'namespace_name', ')', ')', 'for', 'datatype_name', 'in', 'datatype_names', ':', 'if', 'datatype_name', 'not', 'in', 'self', '.', 'api', '.', 'namespaces', '[', 'namespace_name', ']', '.', 'data_type_by_name', ':', 'raise', 'AssertionError', '(', "'Datatype %s is not defined!'", '%', 'datatype_name', ')', 'data_type', '=', 'self', '.', 'api', '.', 'namespaces', '[', 'namespace_name', ']', '.', 'data_type_by_name', '[', 'datatype_name', ']', 'route_data_types', '.', 'append', '(', 'data_type', ')', '# Recurse on dependencies', 'output_types_by_ns', ',', 'output_routes_by_ns', '=', 'self', '.', '_find_dependencies', '(', 'route_data_types', ')', '# Update the IR representation. This involves editing the data types and', '# routes for each namespace.', 'for', 'namespace', 'in', 'self', '.', 'api', '.', 'namespaces', '.', 'values', '(', ')', ':', 'data_types', '=', 'list', '(', 'set', '(', 'output_types_by_ns', '[', 'namespace', '.', 'name', ']', ')', ')', '# defaults to empty list', 'namespace', '.', 'data_types', '=', 'data_types', 'namespace', '.', 'data_type_by_name', '=', '{', 'd', '.', 'name', ':', 'd', 'for', 'd', 'in', 'data_types', '}', 'output_route_reprs', '=', '[', 'output_route', '.', 'name_with_version', '(', ')', 'for', 'output_route', 'in', 'output_routes_by_ns', '[', 'namespace', '.', 'name', ']', ']', 'if', 'namespace', '.', 'name', 'in', 'route_whitelist', ':', 'whitelisted_route_reprs', '=', 'route_whitelist', '[', 'namespace', '.', 'name', ']', 'route_reprs', '=', 'list', '(', 'set', '(', 'whitelisted_route_reprs', '+', 'output_route_reprs', ')', ')', 'else', ':', 'route_reprs', '=', 'output_route_reprs', 'routes', '=', '[', ']', 'for', 'route_repr', 'in', 'route_reprs', ':', 'route_name', ',', 'version', '=', 'parse_route_name_and_version', '(', 'route_repr', ')', 'route', '=', 'namespace', '.', 'routes_by_name', '[', 'route_name', ']', '.', 'at_version', '[', 'version', ']', 'routes', '.', 'append', '(', 'route', ')', 'namespace', '.', 'routes', '=', '[', ']', 'namespace', '.', 'route_by_name', '=', '{', '}', 'namespace', '.', 'routes_by_name', '=', '{', '}', 'for', 'route', 'in', 'routes', ':', 'namespace', '.', 'add_route', '(', 'route', ')']
Given a parsed API in IR form, filter the user-defined datatypes so that they include only the route datatypes and their direct dependencies.
['Given', 'a', 'parsed', 'API', 'in', 'IR', 'form', 'filter', 'the', 'user', '-', 'defined', 'datatypes', 'so', 'that', 'they', 'include', 'only', 'the', 'route', 'datatypes', 'and', 'their', 'direct', 'dependencies', '.']
train
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/frontend/ir_generator.py#L1537-L1637
6,167
troeger/opensubmit
web/opensubmit/models/assignment.py
Assignment.has_perf_results
def has_perf_results(self): ''' Figure out if any submission for this assignment has performance data being available. ''' num_results = SubmissionTestResult.objects.filter(perf_data__isnull=False).filter(submission_file__submissions__assignment=self).count() return num_results != 0
python
def has_perf_results(self): ''' Figure out if any submission for this assignment has performance data being available. ''' num_results = SubmissionTestResult.objects.filter(perf_data__isnull=False).filter(submission_file__submissions__assignment=self).count() return num_results != 0
['def', 'has_perf_results', '(', 'self', ')', ':', 'num_results', '=', 'SubmissionTestResult', '.', 'objects', '.', 'filter', '(', 'perf_data__isnull', '=', 'False', ')', '.', 'filter', '(', 'submission_file__submissions__assignment', '=', 'self', ')', '.', 'count', '(', ')', 'return', 'num_results', '!=', '0']
Figure out if any submission for this assignment has performance data being available.
['Figure', 'out', 'if', 'any', 'submission', 'for', 'this', 'assignment', 'has', 'performance', 'data', 'being', 'available', '.']
train
https://github.com/troeger/opensubmit/blob/384a95b7c6fa41e3f949a129d25dafd9a1c54859/web/opensubmit/models/assignment.py#L87-L92
6,168
krukas/Trionyx
trionyx/trionyx/views/core.py
UpdateDialog.display_dialog
def display_dialog(self, *args, **kwargs): """Display form and success message when set""" form = kwargs.pop('form_instance', None) success_message = kwargs.pop('success_message', None) if not form: form = self.get_form_class()(initial=kwargs, instance=self.object) if not hasattr(form, "helper"): form.helper = FormHelper() form.helper.form_tag = False return { 'title': self.title.format( model_name=self.get_model_config().model_name, object=str(self.object) if self.object else '', ), 'content': self.render_to_string(self.template, { 'form': form, 'success_message': success_message, }), 'submit_label': self.submit_label, 'success': bool(success_message), }
python
def display_dialog(self, *args, **kwargs): """Display form and success message when set""" form = kwargs.pop('form_instance', None) success_message = kwargs.pop('success_message', None) if not form: form = self.get_form_class()(initial=kwargs, instance=self.object) if not hasattr(form, "helper"): form.helper = FormHelper() form.helper.form_tag = False return { 'title': self.title.format( model_name=self.get_model_config().model_name, object=str(self.object) if self.object else '', ), 'content': self.render_to_string(self.template, { 'form': form, 'success_message': success_message, }), 'submit_label': self.submit_label, 'success': bool(success_message), }
['def', 'display_dialog', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'form', '=', 'kwargs', '.', 'pop', '(', "'form_instance'", ',', 'None', ')', 'success_message', '=', 'kwargs', '.', 'pop', '(', "'success_message'", ',', 'None', ')', 'if', 'not', 'form', ':', 'form', '=', 'self', '.', 'get_form_class', '(', ')', '(', 'initial', '=', 'kwargs', ',', 'instance', '=', 'self', '.', 'object', ')', 'if', 'not', 'hasattr', '(', 'form', ',', '"helper"', ')', ':', 'form', '.', 'helper', '=', 'FormHelper', '(', ')', 'form', '.', 'helper', '.', 'form_tag', '=', 'False', 'return', '{', "'title'", ':', 'self', '.', 'title', '.', 'format', '(', 'model_name', '=', 'self', '.', 'get_model_config', '(', ')', '.', 'model_name', ',', 'object', '=', 'str', '(', 'self', '.', 'object', ')', 'if', 'self', '.', 'object', 'else', "''", ',', ')', ',', "'content'", ':', 'self', '.', 'render_to_string', '(', 'self', '.', 'template', ',', '{', "'form'", ':', 'form', ',', "'success_message'", ':', 'success_message', ',', '}', ')', ',', "'submit_label'", ':', 'self', '.', 'submit_label', ',', "'success'", ':', 'bool', '(', 'success_message', ')', ',', '}']
Display form and success message when set
['Display', 'form', 'and', 'success', 'message', 'when', 'set']
train
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/views/core.py#L802-L825
6,169
Spinmob/spinmob
egg/_gui.py
GridLayout.set_row_stretch
def set_row_stretch(self, row=0, stretch=10): """ Sets the row stretch. Larger numbers mean it will expand more to fill space. """ self._layout.setRowStretch(row, stretch) return self
python
def set_row_stretch(self, row=0, stretch=10): """ Sets the row stretch. Larger numbers mean it will expand more to fill space. """ self._layout.setRowStretch(row, stretch) return self
['def', 'set_row_stretch', '(', 'self', ',', 'row', '=', '0', ',', 'stretch', '=', '10', ')', ':', 'self', '.', '_layout', '.', 'setRowStretch', '(', 'row', ',', 'stretch', ')', 'return', 'self']
Sets the row stretch. Larger numbers mean it will expand more to fill space.
['Sets', 'the', 'row', 'stretch', '.', 'Larger', 'numbers', 'mean', 'it', 'will', 'expand', 'more', 'to', 'fill', 'space', '.']
train
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/egg/_gui.py#L447-L453
6,170
PMBio/limix-backup
limix/mtSet/core/simulator.py
CSimulator.genHidden
def genHidden(self,nHidden=10,vTot=0.5,vCommon=0.1): """ generate """ vSpecific = vTot-vCommon # generate hidden X = self.genWeights(self.N,nHidden) # common effect H = self.genWeights(nHidden,self.P) Bc = SP.dot(H,self.genTraitEffect()) Yc = SP.dot(X,Bc) Yc *= SP.sqrt(vCommon/Yc.var(0).mean()) # indipendent effect Bi = SP.randn(nHidden,self.P) Yi = SP.dot(X,Bi) Yi *= SP.sqrt(vSpecific/Yi.var(0).mean()) return Yc,Yi
python
def genHidden(self,nHidden=10,vTot=0.5,vCommon=0.1): """ generate """ vSpecific = vTot-vCommon # generate hidden X = self.genWeights(self.N,nHidden) # common effect H = self.genWeights(nHidden,self.P) Bc = SP.dot(H,self.genTraitEffect()) Yc = SP.dot(X,Bc) Yc *= SP.sqrt(vCommon/Yc.var(0).mean()) # indipendent effect Bi = SP.randn(nHidden,self.P) Yi = SP.dot(X,Bi) Yi *= SP.sqrt(vSpecific/Yi.var(0).mean()) return Yc,Yi
['def', 'genHidden', '(', 'self', ',', 'nHidden', '=', '10', ',', 'vTot', '=', '0.5', ',', 'vCommon', '=', '0.1', ')', ':', 'vSpecific', '=', 'vTot', '-', 'vCommon', '# generate hidden', 'X', '=', 'self', '.', 'genWeights', '(', 'self', '.', 'N', ',', 'nHidden', ')', '# common effect', 'H', '=', 'self', '.', 'genWeights', '(', 'nHidden', ',', 'self', '.', 'P', ')', 'Bc', '=', 'SP', '.', 'dot', '(', 'H', ',', 'self', '.', 'genTraitEffect', '(', ')', ')', 'Yc', '=', 'SP', '.', 'dot', '(', 'X', ',', 'Bc', ')', 'Yc', '*=', 'SP', '.', 'sqrt', '(', 'vCommon', '/', 'Yc', '.', 'var', '(', '0', ')', '.', 'mean', '(', ')', ')', '# indipendent effect', 'Bi', '=', 'SP', '.', 'randn', '(', 'nHidden', ',', 'self', '.', 'P', ')', 'Yi', '=', 'SP', '.', 'dot', '(', 'X', ',', 'Bi', ')', 'Yi', '*=', 'SP', '.', 'sqrt', '(', 'vSpecific', '/', 'Yi', '.', 'var', '(', '0', ')', '.', 'mean', '(', ')', ')', 'return', 'Yc', ',', 'Yi']
generate
['generate']
train
https://github.com/PMBio/limix-backup/blob/1e201fdb5c694d0d5506f207f3de65d8ef66146c/limix/mtSet/core/simulator.py#L257-L276
6,171
DAI-Lab/Copulas
copulas/multivariate/tree.py
Tree.get_likelihood
def get_likelihood(self, uni_matrix): """Compute likelihood of the tree given an U matrix. Args: uni_matrix(numpy.array): univariate matrix to evaluate likelihood on. Returns: tuple[float, numpy.array]: likelihood of the current tree, next level conditional univariate matrix """ uni_dim = uni_matrix.shape[1] num_edge = len(self.edges) values = np.zeros([1, num_edge]) new_uni_matrix = np.empty([uni_dim, uni_dim]) for i in range(num_edge): edge = self.edges[i] value, left_u, right_u = edge.get_likelihood(uni_matrix) new_uni_matrix[edge.L, edge.R] = left_u new_uni_matrix[edge.R, edge.L] = right_u values[0, i] = np.log(value) return np.sum(values), new_uni_matrix
python
def get_likelihood(self, uni_matrix): """Compute likelihood of the tree given an U matrix. Args: uni_matrix(numpy.array): univariate matrix to evaluate likelihood on. Returns: tuple[float, numpy.array]: likelihood of the current tree, next level conditional univariate matrix """ uni_dim = uni_matrix.shape[1] num_edge = len(self.edges) values = np.zeros([1, num_edge]) new_uni_matrix = np.empty([uni_dim, uni_dim]) for i in range(num_edge): edge = self.edges[i] value, left_u, right_u = edge.get_likelihood(uni_matrix) new_uni_matrix[edge.L, edge.R] = left_u new_uni_matrix[edge.R, edge.L] = right_u values[0, i] = np.log(value) return np.sum(values), new_uni_matrix
['def', 'get_likelihood', '(', 'self', ',', 'uni_matrix', ')', ':', 'uni_dim', '=', 'uni_matrix', '.', 'shape', '[', '1', ']', 'num_edge', '=', 'len', '(', 'self', '.', 'edges', ')', 'values', '=', 'np', '.', 'zeros', '(', '[', '1', ',', 'num_edge', ']', ')', 'new_uni_matrix', '=', 'np', '.', 'empty', '(', '[', 'uni_dim', ',', 'uni_dim', ']', ')', 'for', 'i', 'in', 'range', '(', 'num_edge', ')', ':', 'edge', '=', 'self', '.', 'edges', '[', 'i', ']', 'value', ',', 'left_u', ',', 'right_u', '=', 'edge', '.', 'get_likelihood', '(', 'uni_matrix', ')', 'new_uni_matrix', '[', 'edge', '.', 'L', ',', 'edge', '.', 'R', ']', '=', 'left_u', 'new_uni_matrix', '[', 'edge', '.', 'R', ',', 'edge', '.', 'L', ']', '=', 'right_u', 'values', '[', '0', ',', 'i', ']', '=', 'np', '.', 'log', '(', 'value', ')', 'return', 'np', '.', 'sum', '(', 'values', ')', ',', 'new_uni_matrix']
Compute likelihood of the tree given an U matrix. Args: uni_matrix(numpy.array): univariate matrix to evaluate likelihood on. Returns: tuple[float, numpy.array]: likelihood of the current tree, next level conditional univariate matrix
['Compute', 'likelihood', 'of', 'the', 'tree', 'given', 'an', 'U', 'matrix', '.']
train
https://github.com/DAI-Lab/Copulas/blob/821df61c3d36a6b81ef2883935f935c2eaaa862c/copulas/multivariate/tree.py#L213-L235
6,172
BerkeleyAutomation/visualization
visualization/visualizer3d.py
Visualizer3D.render
def render(n_frames=1, axis=np.array([0.,0.,1.]), clf=True, **kwargs): """Render frames from the viewer. Parameters ---------- n_frames : int Number of frames to render. If more than one, the scene will animate. axis : (3,) float or None If present, the animation will rotate about the given axis in world coordinates. Otherwise, the animation will rotate in azimuth. clf : bool If true, the Visualizer is cleared after rendering the figure. kwargs : dict Other keyword arguments for the SceneViewer instance. Returns ------- list of perception.ColorImage A list of ColorImages rendered from the viewer. """ v = SceneViewer(Visualizer3D._scene, size=Visualizer3D._init_size, animate=(n_frames > 1), animate_axis=axis, max_frames=n_frames, **kwargs) if clf: Visualizer3D.clf() return v.saved_frames
python
def render(n_frames=1, axis=np.array([0.,0.,1.]), clf=True, **kwargs): """Render frames from the viewer. Parameters ---------- n_frames : int Number of frames to render. If more than one, the scene will animate. axis : (3,) float or None If present, the animation will rotate about the given axis in world coordinates. Otherwise, the animation will rotate in azimuth. clf : bool If true, the Visualizer is cleared after rendering the figure. kwargs : dict Other keyword arguments for the SceneViewer instance. Returns ------- list of perception.ColorImage A list of ColorImages rendered from the viewer. """ v = SceneViewer(Visualizer3D._scene, size=Visualizer3D._init_size, animate=(n_frames > 1), animate_axis=axis, max_frames=n_frames, **kwargs) if clf: Visualizer3D.clf() return v.saved_frames
['def', 'render', '(', 'n_frames', '=', '1', ',', 'axis', '=', 'np', '.', 'array', '(', '[', '0.', ',', '0.', ',', '1.', ']', ')', ',', 'clf', '=', 'True', ',', '*', '*', 'kwargs', ')', ':', 'v', '=', 'SceneViewer', '(', 'Visualizer3D', '.', '_scene', ',', 'size', '=', 'Visualizer3D', '.', '_init_size', ',', 'animate', '=', '(', 'n_frames', '>', '1', ')', ',', 'animate_axis', '=', 'axis', ',', 'max_frames', '=', 'n_frames', ',', '*', '*', 'kwargs', ')', 'if', 'clf', ':', 'Visualizer3D', '.', 'clf', '(', ')', 'return', 'v', '.', 'saved_frames']
Render frames from the viewer. Parameters ---------- n_frames : int Number of frames to render. If more than one, the scene will animate. axis : (3,) float or None If present, the animation will rotate about the given axis in world coordinates. Otherwise, the animation will rotate in azimuth. clf : bool If true, the Visualizer is cleared after rendering the figure. kwargs : dict Other keyword arguments for the SceneViewer instance. Returns ------- list of perception.ColorImage A list of ColorImages rendered from the viewer.
['Render', 'frames', 'from', 'the', 'viewer', '.']
train
https://github.com/BerkeleyAutomation/visualization/blob/f8d038cc65c78f841ef27f99fb2a638f44fa72b6/visualization/visualizer3d.py#L76-L106
6,173
DoWhileGeek/authentise-services
authentise_services/config.py
Config.parse_config
def parse_config(path): """parse either the config file we found, or use some canned defaults""" config = configparser.ConfigParser() if path: # if user has config with user creds in it, this will grab it config.read(path) try: return {k: v for k, v in config["default"].items()} except KeyError: return {}
python
def parse_config(path): """parse either the config file we found, or use some canned defaults""" config = configparser.ConfigParser() if path: # if user has config with user creds in it, this will grab it config.read(path) try: return {k: v for k, v in config["default"].items()} except KeyError: return {}
['def', 'parse_config', '(', 'path', ')', ':', 'config', '=', 'configparser', '.', 'ConfigParser', '(', ')', 'if', 'path', ':', '# if user has config with user creds in it, this will grab it', 'config', '.', 'read', '(', 'path', ')', 'try', ':', 'return', '{', 'k', ':', 'v', 'for', 'k', ',', 'v', 'in', 'config', '[', '"default"', ']', '.', 'items', '(', ')', '}', 'except', 'KeyError', ':', 'return', '{', '}']
parse either the config file we found, or use some canned defaults
['parse', 'either', 'the', 'config', 'file', 'we', 'found', 'or', 'use', 'some', 'canned', 'defaults']
train
https://github.com/DoWhileGeek/authentise-services/blob/ee32bd7f7de15d3fb24c0a6374640d3a1ec8096d/authentise_services/config.py#L25-L35
6,174
couchbase/couchbase-python-client
couchbase/admin.py
Admin.user_remove
def user_remove(self, domain, userid): """ Remove a user :param AuthDomain domain: The authentication domain for the user. :param userid: The user ID to remove :raise: :exc:`couchbase.exceptions.HTTPError` if the user does not exist. :return: :class:`~.HttpResult` """ path = self._get_management_path(domain, userid) return self.http_request(path=path, method='DELETE')
python
def user_remove(self, domain, userid): """ Remove a user :param AuthDomain domain: The authentication domain for the user. :param userid: The user ID to remove :raise: :exc:`couchbase.exceptions.HTTPError` if the user does not exist. :return: :class:`~.HttpResult` """ path = self._get_management_path(domain, userid) return self.http_request(path=path, method='DELETE')
['def', 'user_remove', '(', 'self', ',', 'domain', ',', 'userid', ')', ':', 'path', '=', 'self', '.', '_get_management_path', '(', 'domain', ',', 'userid', ')', 'return', 'self', '.', 'http_request', '(', 'path', '=', 'path', ',', 'method', '=', "'DELETE'", ')']
Remove a user :param AuthDomain domain: The authentication domain for the user. :param userid: The user ID to remove :raise: :exc:`couchbase.exceptions.HTTPError` if the user does not exist. :return: :class:`~.HttpResult`
['Remove', 'a', 'user', ':', 'param', 'AuthDomain', 'domain', ':', 'The', 'authentication', 'domain', 'for', 'the', 'user', '.', ':', 'param', 'userid', ':', 'The', 'user', 'ID', 'to', 'remove', ':', 'raise', ':', ':', 'exc', ':', 'couchbase', '.', 'exceptions', '.', 'HTTPError', 'if', 'the', 'user', 'does', 'not', 'exist', '.', ':', 'return', ':', ':', 'class', ':', '~', '.', 'HttpResult']
train
https://github.com/couchbase/couchbase-python-client/blob/a7bada167785bf79a29c39f820d932a433a6a535/couchbase/admin.py#L431-L441
6,175
gwastro/pycbc
pycbc/strain/gate.py
add_gate_option_group
def add_gate_option_group(parser): """Adds the options needed to apply gates to data. Parameters ---------- parser : object ArgumentParser instance. """ gate_group = parser.add_argument_group("Options for gating data.") gate_group.add_argument("--gate", nargs="+", type=str, metavar="IFO:CENTRALTIME:HALFDUR:TAPERDUR", help="Apply one or more gates to the data before " "filtering.") gate_group.add_argument("--gate-overwhitened", action="store_true", help="Overwhiten data first, then apply the " "gates specified in --gate. Overwhitening " "allows for sharper tapers to be used, " "since lines are not blurred.") gate_group.add_argument("--psd-gate", nargs="+", type=str, metavar="IFO:CENTRALTIME:HALFDUR:TAPERDUR", help="Apply one or more gates to the data used " "for computing the PSD. Gates are applied " "prior to FFT-ing the data for PSD " "estimation.") return gate_group
python
def add_gate_option_group(parser): """Adds the options needed to apply gates to data. Parameters ---------- parser : object ArgumentParser instance. """ gate_group = parser.add_argument_group("Options for gating data.") gate_group.add_argument("--gate", nargs="+", type=str, metavar="IFO:CENTRALTIME:HALFDUR:TAPERDUR", help="Apply one or more gates to the data before " "filtering.") gate_group.add_argument("--gate-overwhitened", action="store_true", help="Overwhiten data first, then apply the " "gates specified in --gate. Overwhitening " "allows for sharper tapers to be used, " "since lines are not blurred.") gate_group.add_argument("--psd-gate", nargs="+", type=str, metavar="IFO:CENTRALTIME:HALFDUR:TAPERDUR", help="Apply one or more gates to the data used " "for computing the PSD. Gates are applied " "prior to FFT-ing the data for PSD " "estimation.") return gate_group
['def', 'add_gate_option_group', '(', 'parser', ')', ':', 'gate_group', '=', 'parser', '.', 'add_argument_group', '(', '"Options for gating data."', ')', 'gate_group', '.', 'add_argument', '(', '"--gate"', ',', 'nargs', '=', '"+"', ',', 'type', '=', 'str', ',', 'metavar', '=', '"IFO:CENTRALTIME:HALFDUR:TAPERDUR"', ',', 'help', '=', '"Apply one or more gates to the data before "', '"filtering."', ')', 'gate_group', '.', 'add_argument', '(', '"--gate-overwhitened"', ',', 'action', '=', '"store_true"', ',', 'help', '=', '"Overwhiten data first, then apply the "', '"gates specified in --gate. Overwhitening "', '"allows for sharper tapers to be used, "', '"since lines are not blurred."', ')', 'gate_group', '.', 'add_argument', '(', '"--psd-gate"', ',', 'nargs', '=', '"+"', ',', 'type', '=', 'str', ',', 'metavar', '=', '"IFO:CENTRALTIME:HALFDUR:TAPERDUR"', ',', 'help', '=', '"Apply one or more gates to the data used "', '"for computing the PSD. Gates are applied "', '"prior to FFT-ing the data for PSD "', '"estimation."', ')', 'return', 'gate_group']
Adds the options needed to apply gates to data. Parameters ---------- parser : object ArgumentParser instance.
['Adds', 'the', 'options', 'needed', 'to', 'apply', 'gates', 'to', 'data', '.']
train
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/strain/gate.py#L114-L139
6,176
quantum5/2048
_2048/game.py
Game2048._is_in_try_again
def _is_in_try_again(self, x, y): """Checks if the game is to be restarted.""" if self.won == 1: # Checks if in try button on won screen. x1, y1, x2, y2 = self._won_try_again return x1 <= x < x2 and y1 <= y < y2 elif self.lost: # Checks if in try button on lost screen. x1, y1, x2, y2 = self._lost_try_again return x1 <= x < x2 and y1 <= y < y2 # Otherwise just no. return False
python
def _is_in_try_again(self, x, y): """Checks if the game is to be restarted.""" if self.won == 1: # Checks if in try button on won screen. x1, y1, x2, y2 = self._won_try_again return x1 <= x < x2 and y1 <= y < y2 elif self.lost: # Checks if in try button on lost screen. x1, y1, x2, y2 = self._lost_try_again return x1 <= x < x2 and y1 <= y < y2 # Otherwise just no. return False
['def', '_is_in_try_again', '(', 'self', ',', 'x', ',', 'y', ')', ':', 'if', 'self', '.', 'won', '==', '1', ':', '# Checks if in try button on won screen.', 'x1', ',', 'y1', ',', 'x2', ',', 'y2', '=', 'self', '.', '_won_try_again', 'return', 'x1', '<=', 'x', '<', 'x2', 'and', 'y1', '<=', 'y', '<', 'y2', 'elif', 'self', '.', 'lost', ':', '# Checks if in try button on lost screen.', 'x1', ',', 'y1', ',', 'x2', ',', 'y2', '=', 'self', '.', '_lost_try_again', 'return', 'x1', '<=', 'x', '<', 'x2', 'and', 'y1', '<=', 'y', '<', 'y2', '# Otherwise just no.', 'return', 'False']
Checks if the game is to be restarted.
['Checks', 'if', 'the', 'game', 'is', 'to', 'be', 'restarted', '.']
train
https://github.com/quantum5/2048/blob/93ada2e3026eaf154e1bbee943d0500c9253e66f/_2048/game.py#L242-L253
6,177
log2timeline/dfvfs
dfvfs/lib/data_format.py
DataFormat._ReadString
def _ReadString( self, file_object, file_offset, data_type_map, description): """Reads a string. Args: file_object (FileIO): file-like object. file_offset (int): offset of the data relative from the start of the file-like object. data_type_map (dtfabric.DataTypeMap): data type map of the string. description (str): description of the string. Returns: object: structure values object. Raises: FileFormatError: if the string cannot be read. ValueError: if file-like object or date type map are invalid. """ # pylint: disable=protected-access element_data_size = ( data_type_map._element_data_type_definition.GetByteSize()) elements_terminator = ( data_type_map._data_type_definition.elements_terminator) byte_stream = [] element_data = file_object.read(element_data_size) byte_stream.append(element_data) while element_data and element_data != elements_terminator: element_data = file_object.read(element_data_size) byte_stream.append(element_data) byte_stream = b''.join(byte_stream) return self._ReadStructureFromByteStream( byte_stream, file_offset, data_type_map, description)
python
def _ReadString( self, file_object, file_offset, data_type_map, description): """Reads a string. Args: file_object (FileIO): file-like object. file_offset (int): offset of the data relative from the start of the file-like object. data_type_map (dtfabric.DataTypeMap): data type map of the string. description (str): description of the string. Returns: object: structure values object. Raises: FileFormatError: if the string cannot be read. ValueError: if file-like object or date type map are invalid. """ # pylint: disable=protected-access element_data_size = ( data_type_map._element_data_type_definition.GetByteSize()) elements_terminator = ( data_type_map._data_type_definition.elements_terminator) byte_stream = [] element_data = file_object.read(element_data_size) byte_stream.append(element_data) while element_data and element_data != elements_terminator: element_data = file_object.read(element_data_size) byte_stream.append(element_data) byte_stream = b''.join(byte_stream) return self._ReadStructureFromByteStream( byte_stream, file_offset, data_type_map, description)
['def', '_ReadString', '(', 'self', ',', 'file_object', ',', 'file_offset', ',', 'data_type_map', ',', 'description', ')', ':', '# pylint: disable=protected-access', 'element_data_size', '=', '(', 'data_type_map', '.', '_element_data_type_definition', '.', 'GetByteSize', '(', ')', ')', 'elements_terminator', '=', '(', 'data_type_map', '.', '_data_type_definition', '.', 'elements_terminator', ')', 'byte_stream', '=', '[', ']', 'element_data', '=', 'file_object', '.', 'read', '(', 'element_data_size', ')', 'byte_stream', '.', 'append', '(', 'element_data', ')', 'while', 'element_data', 'and', 'element_data', '!=', 'elements_terminator', ':', 'element_data', '=', 'file_object', '.', 'read', '(', 'element_data_size', ')', 'byte_stream', '.', 'append', '(', 'element_data', ')', 'byte_stream', '=', "b''", '.', 'join', '(', 'byte_stream', ')', 'return', 'self', '.', '_ReadStructureFromByteStream', '(', 'byte_stream', ',', 'file_offset', ',', 'data_type_map', ',', 'description', ')']
Reads a string. Args: file_object (FileIO): file-like object. file_offset (int): offset of the data relative from the start of the file-like object. data_type_map (dtfabric.DataTypeMap): data type map of the string. description (str): description of the string. Returns: object: structure values object. Raises: FileFormatError: if the string cannot be read. ValueError: if file-like object or date type map are invalid.
['Reads', 'a', 'string', '.']
train
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/lib/data_format.py#L56-L91
6,178
phaethon/kamene
kamene/contrib/gsm_um.py
routingAreaUpdateAccept
def routingAreaUpdateAccept(PTmsiSignature_presence=0, MobileId_presence=0, MobileId_presence1=0, ReceiveNpduNumbersList_presence=0, GprsTimer_presence=0, GmmCause_presence=0): """ROUTING AREA UPDATE ACCEPT Section 9.4.15""" a = TpPd(pd=0x3) b = MessageType(mesType=0x9) # 00001001 c = ForceToStandbyAndUpdateResult() e = GprsTimer() f = RoutingAreaIdentification() packet = a / b / c / e / f if PTmsiSignature_presence is 1: g = PTmsiSignature(ieiPTS=0x19) packet = packet / g if MobileId_presence is 1: h = MobileIdHdr(ieiMI=0x18, eightBitMI=0x0) packet = packet / h if MobileId_presence1 is 1: i = MobileIdHdr(ieiMI=0x23, eightBitMI=0x0) packet = packet / i if ReceiveNpduNumbersList_presence is 1: j = ReceiveNpduNumbersList(ieiRNNL=0x26) packet = packet / j if GprsTimer_presence is 1: k = GprsTimer(ieiGT=0x17) packet = packet / k if GmmCause_presence is 1: l = GmmCause(ieiGC=0x25) packet = packet / l return packet
python
def routingAreaUpdateAccept(PTmsiSignature_presence=0, MobileId_presence=0, MobileId_presence1=0, ReceiveNpduNumbersList_presence=0, GprsTimer_presence=0, GmmCause_presence=0): """ROUTING AREA UPDATE ACCEPT Section 9.4.15""" a = TpPd(pd=0x3) b = MessageType(mesType=0x9) # 00001001 c = ForceToStandbyAndUpdateResult() e = GprsTimer() f = RoutingAreaIdentification() packet = a / b / c / e / f if PTmsiSignature_presence is 1: g = PTmsiSignature(ieiPTS=0x19) packet = packet / g if MobileId_presence is 1: h = MobileIdHdr(ieiMI=0x18, eightBitMI=0x0) packet = packet / h if MobileId_presence1 is 1: i = MobileIdHdr(ieiMI=0x23, eightBitMI=0x0) packet = packet / i if ReceiveNpduNumbersList_presence is 1: j = ReceiveNpduNumbersList(ieiRNNL=0x26) packet = packet / j if GprsTimer_presence is 1: k = GprsTimer(ieiGT=0x17) packet = packet / k if GmmCause_presence is 1: l = GmmCause(ieiGC=0x25) packet = packet / l return packet
['def', 'routingAreaUpdateAccept', '(', 'PTmsiSignature_presence', '=', '0', ',', 'MobileId_presence', '=', '0', ',', 'MobileId_presence1', '=', '0', ',', 'ReceiveNpduNumbersList_presence', '=', '0', ',', 'GprsTimer_presence', '=', '0', ',', 'GmmCause_presence', '=', '0', ')', ':', 'a', '=', 'TpPd', '(', 'pd', '=', '0x3', ')', 'b', '=', 'MessageType', '(', 'mesType', '=', '0x9', ')', '# 00001001', 'c', '=', 'ForceToStandbyAndUpdateResult', '(', ')', 'e', '=', 'GprsTimer', '(', ')', 'f', '=', 'RoutingAreaIdentification', '(', ')', 'packet', '=', 'a', '/', 'b', '/', 'c', '/', 'e', '/', 'f', 'if', 'PTmsiSignature_presence', 'is', '1', ':', 'g', '=', 'PTmsiSignature', '(', 'ieiPTS', '=', '0x19', ')', 'packet', '=', 'packet', '/', 'g', 'if', 'MobileId_presence', 'is', '1', ':', 'h', '=', 'MobileIdHdr', '(', 'ieiMI', '=', '0x18', ',', 'eightBitMI', '=', '0x0', ')', 'packet', '=', 'packet', '/', 'h', 'if', 'MobileId_presence1', 'is', '1', ':', 'i', '=', 'MobileIdHdr', '(', 'ieiMI', '=', '0x23', ',', 'eightBitMI', '=', '0x0', ')', 'packet', '=', 'packet', '/', 'i', 'if', 'ReceiveNpduNumbersList_presence', 'is', '1', ':', 'j', '=', 'ReceiveNpduNumbersList', '(', 'ieiRNNL', '=', '0x26', ')', 'packet', '=', 'packet', '/', 'j', 'if', 'GprsTimer_presence', 'is', '1', ':', 'k', '=', 'GprsTimer', '(', 'ieiGT', '=', '0x17', ')', 'packet', '=', 'packet', '/', 'k', 'if', 'GmmCause_presence', 'is', '1', ':', 'l', '=', 'GmmCause', '(', 'ieiGC', '=', '0x25', ')', 'packet', '=', 'packet', '/', 'l', 'return', 'packet']
ROUTING AREA UPDATE ACCEPT Section 9.4.15
['ROUTING', 'AREA', 'UPDATE', 'ACCEPT', 'Section', '9', '.', '4', '.', '15']
train
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/contrib/gsm_um.py#L2537-L2566
6,179
datamachine/twx.botapi
twx/botapi/botapi.py
TelegramBot.set_chat_photo
def set_chat_photo(self, *args, **kwargs): """See :func:`set_chat_photo`""" return set_chat_photo(*args, **self._merge_overrides(**kwargs)).run()
python
def set_chat_photo(self, *args, **kwargs): """See :func:`set_chat_photo`""" return set_chat_photo(*args, **self._merge_overrides(**kwargs)).run()
['def', 'set_chat_photo', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'return', 'set_chat_photo', '(', '*', 'args', ',', '*', '*', 'self', '.', '_merge_overrides', '(', '*', '*', 'kwargs', ')', ')', '.', 'run', '(', ')']
See :func:`set_chat_photo`
['See', ':', 'func', ':', 'set_chat_photo']
train
https://github.com/datamachine/twx.botapi/blob/c85184da738169e8f9d6d8e62970540f427c486e/twx/botapi/botapi.py#L4266-L4268
6,180
mivade/tornadose
tornadose/handlers.py
WebSocketSubscriber.publish
async def publish(self, message): """Push a new message to the client. The data will be available as a JSON object with the key ``data``. """ try: self.write_message(dict(data=message)) except WebSocketClosedError: self._close()
python
async def publish(self, message): """Push a new message to the client. The data will be available as a JSON object with the key ``data``. """ try: self.write_message(dict(data=message)) except WebSocketClosedError: self._close()
['async', 'def', 'publish', '(', 'self', ',', 'message', ')', ':', 'try', ':', 'self', '.', 'write_message', '(', 'dict', '(', 'data', '=', 'message', ')', ')', 'except', 'WebSocketClosedError', ':', 'self', '.', '_close', '(', ')']
Push a new message to the client. The data will be available as a JSON object with the key ``data``.
['Push', 'a', 'new', 'message', 'to', 'the', 'client', '.', 'The', 'data', 'will', 'be', 'available', 'as', 'a', 'JSON', 'object', 'with', 'the', 'key', 'data', '.']
train
https://github.com/mivade/tornadose/blob/d220e0e3040d24c98997eee7a8a236602b4c5159/tornadose/handlers.py#L116-L124
6,181
jlmadurga/permabots
permabots/views/api/hook.py
HookDetail.get
def get(self, request, bot_id, id, format=None): """ Get hook by id --- serializer: HookSerializer responseMessages: - code: 401 message: Not authenticated """ return super(HookDetail, self).get(request, bot_id, id, format)
python
def get(self, request, bot_id, id, format=None): """ Get hook by id --- serializer: HookSerializer responseMessages: - code: 401 message: Not authenticated """ return super(HookDetail, self).get(request, bot_id, id, format)
['def', 'get', '(', 'self', ',', 'request', ',', 'bot_id', ',', 'id', ',', 'format', '=', 'None', ')', ':', 'return', 'super', '(', 'HookDetail', ',', 'self', ')', '.', 'get', '(', 'request', ',', 'bot_id', ',', 'id', ',', 'format', ')']
Get hook by id --- serializer: HookSerializer responseMessages: - code: 401 message: Not authenticated
['Get', 'hook', 'by', 'id', '---', 'serializer', ':', 'HookSerializer', 'responseMessages', ':', '-', 'code', ':', '401', 'message', ':', 'Not', 'authenticated']
train
https://github.com/jlmadurga/permabots/blob/781a91702529a23fe7bc2aa84c5d88e961412466/permabots/views/api/hook.py#L60-L69
6,182
fedora-infra/fmn.rules
fmn/rules/taskotron.py
taskotron_task_particular_or_changed_outcome
def taskotron_task_particular_or_changed_outcome(config, message, outcome='FAILED,NEEDS_INSPECTION'): """ Taskotron task any particular or changed outcome(s) With this rule, you can limit messages to only those task results with any particular outcome(s) (FAILED and NEEDS_INSPECTION by default) or those with changed outcomes. This rule is a handy way of filtering a very useful use case - being notified when either task requires your attention or the outcome has changed since the last time the task ran for the same item (e.g. a koji build). You can specify several outcomes by separating them with a comma ',', i.e.: ``PASSED,FAILED``. The full list of supported outcomes can be found in the libtaskotron `documentation <https://docs.qadevel.cloud.fedoraproject.org/ libtaskotron/latest/resultyaml.html#minimal-version>`_. """ return taskotron_task_outcome(config, message, outcome) or \ taskotron_changed_outcome(config, message)
python
def taskotron_task_particular_or_changed_outcome(config, message, outcome='FAILED,NEEDS_INSPECTION'): """ Taskotron task any particular or changed outcome(s) With this rule, you can limit messages to only those task results with any particular outcome(s) (FAILED and NEEDS_INSPECTION by default) or those with changed outcomes. This rule is a handy way of filtering a very useful use case - being notified when either task requires your attention or the outcome has changed since the last time the task ran for the same item (e.g. a koji build). You can specify several outcomes by separating them with a comma ',', i.e.: ``PASSED,FAILED``. The full list of supported outcomes can be found in the libtaskotron `documentation <https://docs.qadevel.cloud.fedoraproject.org/ libtaskotron/latest/resultyaml.html#minimal-version>`_. """ return taskotron_task_outcome(config, message, outcome) or \ taskotron_changed_outcome(config, message)
['def', 'taskotron_task_particular_or_changed_outcome', '(', 'config', ',', 'message', ',', 'outcome', '=', "'FAILED,NEEDS_INSPECTION'", ')', ':', 'return', 'taskotron_task_outcome', '(', 'config', ',', 'message', ',', 'outcome', ')', 'or', 'taskotron_changed_outcome', '(', 'config', ',', 'message', ')']
Taskotron task any particular or changed outcome(s) With this rule, you can limit messages to only those task results with any particular outcome(s) (FAILED and NEEDS_INSPECTION by default) or those with changed outcomes. This rule is a handy way of filtering a very useful use case - being notified when either task requires your attention or the outcome has changed since the last time the task ran for the same item (e.g. a koji build). You can specify several outcomes by separating them with a comma ',', i.e.: ``PASSED,FAILED``. The full list of supported outcomes can be found in the libtaskotron `documentation <https://docs.qadevel.cloud.fedoraproject.org/ libtaskotron/latest/resultyaml.html#minimal-version>`_.
['Taskotron', 'task', 'any', 'particular', 'or', 'changed', 'outcome', '(', 's', ')']
train
https://github.com/fedora-infra/fmn.rules/blob/f9ec790619fcc8b41803077c4dec094e5127fc24/fmn/rules/taskotron.py#L84-L104
6,183
log2timeline/dfdatetime
dfdatetime/interface.py
DateTimeValues._GetDayOfYear
def _GetDayOfYear(self, year, month, day_of_month): """Retrieves the day of the year for a specific day of a month in a year. Args: year (int): year e.g. 1970. month (int): month, where 1 represents January. day_of_month (int): day of the month, where 1 represents the first day. Returns: int: day of year. Raises: ValueError: if the month or day of month value is out of bounds. """ if month not in range(1, 13): raise ValueError('Month value out of bounds.') days_per_month = self._GetDaysPerMonth(year, month) if day_of_month < 1 or day_of_month > days_per_month: raise ValueError('Day of month value out of bounds.') day_of_year = day_of_month for past_month in range(1, month): day_of_year += self._GetDaysPerMonth(year, past_month) return day_of_year
python
def _GetDayOfYear(self, year, month, day_of_month): """Retrieves the day of the year for a specific day of a month in a year. Args: year (int): year e.g. 1970. month (int): month, where 1 represents January. day_of_month (int): day of the month, where 1 represents the first day. Returns: int: day of year. Raises: ValueError: if the month or day of month value is out of bounds. """ if month not in range(1, 13): raise ValueError('Month value out of bounds.') days_per_month = self._GetDaysPerMonth(year, month) if day_of_month < 1 or day_of_month > days_per_month: raise ValueError('Day of month value out of bounds.') day_of_year = day_of_month for past_month in range(1, month): day_of_year += self._GetDaysPerMonth(year, past_month) return day_of_year
['def', '_GetDayOfYear', '(', 'self', ',', 'year', ',', 'month', ',', 'day_of_month', ')', ':', 'if', 'month', 'not', 'in', 'range', '(', '1', ',', '13', ')', ':', 'raise', 'ValueError', '(', "'Month value out of bounds.'", ')', 'days_per_month', '=', 'self', '.', '_GetDaysPerMonth', '(', 'year', ',', 'month', ')', 'if', 'day_of_month', '<', '1', 'or', 'day_of_month', '>', 'days_per_month', ':', 'raise', 'ValueError', '(', "'Day of month value out of bounds.'", ')', 'day_of_year', '=', 'day_of_month', 'for', 'past_month', 'in', 'range', '(', '1', ',', 'month', ')', ':', 'day_of_year', '+=', 'self', '.', '_GetDaysPerMonth', '(', 'year', ',', 'past_month', ')', 'return', 'day_of_year']
Retrieves the day of the year for a specific day of a month in a year. Args: year (int): year e.g. 1970. month (int): month, where 1 represents January. day_of_month (int): day of the month, where 1 represents the first day. Returns: int: day of year. Raises: ValueError: if the month or day of month value is out of bounds.
['Retrieves', 'the', 'day', 'of', 'the', 'year', 'for', 'a', 'specific', 'day', 'of', 'a', 'month', 'in', 'a', 'year', '.']
train
https://github.com/log2timeline/dfdatetime/blob/141ca4ef1eff3d354b5deaac3d81cb08506f98d6/dfdatetime/interface.py#L644-L669
6,184
raamana/pyradigm
pyradigm/pyradigm.py
check_compatibility
def check_compatibility(datasets, reqd_num_features=None): """ Checks whether the given MLdataset instances are compatible i.e. with same set of subjects, each beloning to the same class in all instances. Checks the first dataset in the list against the rest, and returns a boolean array. Parameters ---------- datasets : Iterable A list of n datasets reqd_num_features : int The required number of features in each dataset. Helpful to ensure test sets are compatible with training set, as well as within themselves. Returns ------- all_are_compatible : bool Boolean flag indicating whether all datasets are compatible or not compatibility : list List indicating whether first dataset is compatible with the rest individually. This could be useful to select a subset of mutually compatible datasets. Length : n-1 dim_mismatch : bool Boolean flag indicating mismatch in dimensionality from that specified size_descriptor : tuple A tuple with values for (num_samples, reqd_num_features) - num_samples must be common for all datasets that are evaluated for compatibility - reqd_num_features is None (when no check on dimensionality is perfomed), or list of corresponding dimensionalities for each input dataset """ from collections import Iterable if not isinstance(datasets, Iterable): raise TypeError('Input must be an iterable ' 'i.e. (list/tuple) of MLdataset/similar instances') datasets = list(datasets) # to make it indexable if coming from a set num_datasets = len(datasets) check_dimensionality = False dim_mismatch = False if reqd_num_features is not None: if isinstance(reqd_num_features, Iterable): if len(reqd_num_features) != num_datasets: raise ValueError('Specify dimensionality for exactly {} datasets.' ' Given for a different number {}' ''.format(num_datasets, len(reqd_num_features))) reqd_num_features = list(map(int, reqd_num_features)) else: # same dimensionality for all reqd_num_features = [int(reqd_num_features)] * num_datasets check_dimensionality = True else: # to enable iteration reqd_num_features = [None,] * num_datasets pivot = datasets[0] if not isinstance(pivot, MLDataset): pivot = MLDataset(pivot) if check_dimensionality and pivot.num_features != reqd_num_features[0]: warnings.warn('Dimensionality mismatch! Expected {} whereas current {}.' ''.format(reqd_num_features[0], pivot.num_features)) dim_mismatch = True compatible = list() for ds, reqd_dim in zip(datasets[1:], reqd_num_features[1:]): if not isinstance(ds, MLDataset): ds = MLDataset(ds) is_compatible = True # compound bool will short-circuit, not optim required if pivot.num_samples != ds.num_samples \ or pivot.keys != ds.keys \ or pivot.classes != ds.classes: is_compatible = False if check_dimensionality and reqd_dim != ds.num_features: warnings.warn('Dimensionality mismatch! Expected {} whereas current {}.' ''.format(reqd_dim, ds.num_features)) dim_mismatch = True compatible.append(is_compatible) return all(compatible), compatible, dim_mismatch, \ (pivot.num_samples, reqd_num_features)
python
def check_compatibility(datasets, reqd_num_features=None): """ Checks whether the given MLdataset instances are compatible i.e. with same set of subjects, each beloning to the same class in all instances. Checks the first dataset in the list against the rest, and returns a boolean array. Parameters ---------- datasets : Iterable A list of n datasets reqd_num_features : int The required number of features in each dataset. Helpful to ensure test sets are compatible with training set, as well as within themselves. Returns ------- all_are_compatible : bool Boolean flag indicating whether all datasets are compatible or not compatibility : list List indicating whether first dataset is compatible with the rest individually. This could be useful to select a subset of mutually compatible datasets. Length : n-1 dim_mismatch : bool Boolean flag indicating mismatch in dimensionality from that specified size_descriptor : tuple A tuple with values for (num_samples, reqd_num_features) - num_samples must be common for all datasets that are evaluated for compatibility - reqd_num_features is None (when no check on dimensionality is perfomed), or list of corresponding dimensionalities for each input dataset """ from collections import Iterable if not isinstance(datasets, Iterable): raise TypeError('Input must be an iterable ' 'i.e. (list/tuple) of MLdataset/similar instances') datasets = list(datasets) # to make it indexable if coming from a set num_datasets = len(datasets) check_dimensionality = False dim_mismatch = False if reqd_num_features is not None: if isinstance(reqd_num_features, Iterable): if len(reqd_num_features) != num_datasets: raise ValueError('Specify dimensionality for exactly {} datasets.' ' Given for a different number {}' ''.format(num_datasets, len(reqd_num_features))) reqd_num_features = list(map(int, reqd_num_features)) else: # same dimensionality for all reqd_num_features = [int(reqd_num_features)] * num_datasets check_dimensionality = True else: # to enable iteration reqd_num_features = [None,] * num_datasets pivot = datasets[0] if not isinstance(pivot, MLDataset): pivot = MLDataset(pivot) if check_dimensionality and pivot.num_features != reqd_num_features[0]: warnings.warn('Dimensionality mismatch! Expected {} whereas current {}.' ''.format(reqd_num_features[0], pivot.num_features)) dim_mismatch = True compatible = list() for ds, reqd_dim in zip(datasets[1:], reqd_num_features[1:]): if not isinstance(ds, MLDataset): ds = MLDataset(ds) is_compatible = True # compound bool will short-circuit, not optim required if pivot.num_samples != ds.num_samples \ or pivot.keys != ds.keys \ or pivot.classes != ds.classes: is_compatible = False if check_dimensionality and reqd_dim != ds.num_features: warnings.warn('Dimensionality mismatch! Expected {} whereas current {}.' ''.format(reqd_dim, ds.num_features)) dim_mismatch = True compatible.append(is_compatible) return all(compatible), compatible, dim_mismatch, \ (pivot.num_samples, reqd_num_features)
['def', 'check_compatibility', '(', 'datasets', ',', 'reqd_num_features', '=', 'None', ')', ':', 'from', 'collections', 'import', 'Iterable', 'if', 'not', 'isinstance', '(', 'datasets', ',', 'Iterable', ')', ':', 'raise', 'TypeError', '(', "'Input must be an iterable '", "'i.e. (list/tuple) of MLdataset/similar instances'", ')', 'datasets', '=', 'list', '(', 'datasets', ')', '# to make it indexable if coming from a set', 'num_datasets', '=', 'len', '(', 'datasets', ')', 'check_dimensionality', '=', 'False', 'dim_mismatch', '=', 'False', 'if', 'reqd_num_features', 'is', 'not', 'None', ':', 'if', 'isinstance', '(', 'reqd_num_features', ',', 'Iterable', ')', ':', 'if', 'len', '(', 'reqd_num_features', ')', '!=', 'num_datasets', ':', 'raise', 'ValueError', '(', "'Specify dimensionality for exactly {} datasets.'", "' Given for a different number {}'", "''", '.', 'format', '(', 'num_datasets', ',', 'len', '(', 'reqd_num_features', ')', ')', ')', 'reqd_num_features', '=', 'list', '(', 'map', '(', 'int', ',', 'reqd_num_features', ')', ')', 'else', ':', '# same dimensionality for all', 'reqd_num_features', '=', '[', 'int', '(', 'reqd_num_features', ')', ']', '*', 'num_datasets', 'check_dimensionality', '=', 'True', 'else', ':', '# to enable iteration', 'reqd_num_features', '=', '[', 'None', ',', ']', '*', 'num_datasets', 'pivot', '=', 'datasets', '[', '0', ']', 'if', 'not', 'isinstance', '(', 'pivot', ',', 'MLDataset', ')', ':', 'pivot', '=', 'MLDataset', '(', 'pivot', ')', 'if', 'check_dimensionality', 'and', 'pivot', '.', 'num_features', '!=', 'reqd_num_features', '[', '0', ']', ':', 'warnings', '.', 'warn', '(', "'Dimensionality mismatch! Expected {} whereas current {}.'", "''", '.', 'format', '(', 'reqd_num_features', '[', '0', ']', ',', 'pivot', '.', 'num_features', ')', ')', 'dim_mismatch', '=', 'True', 'compatible', '=', 'list', '(', ')', 'for', 'ds', ',', 'reqd_dim', 'in', 'zip', '(', 'datasets', '[', '1', ':', ']', ',', 'reqd_num_features', '[', '1', ':', ']', ')', ':', 'if', 'not', 'isinstance', '(', 'ds', ',', 'MLDataset', ')', ':', 'ds', '=', 'MLDataset', '(', 'ds', ')', 'is_compatible', '=', 'True', '# compound bool will short-circuit, not optim required', 'if', 'pivot', '.', 'num_samples', '!=', 'ds', '.', 'num_samples', 'or', 'pivot', '.', 'keys', '!=', 'ds', '.', 'keys', 'or', 'pivot', '.', 'classes', '!=', 'ds', '.', 'classes', ':', 'is_compatible', '=', 'False', 'if', 'check_dimensionality', 'and', 'reqd_dim', '!=', 'ds', '.', 'num_features', ':', 'warnings', '.', 'warn', '(', "'Dimensionality mismatch! Expected {} whereas current {}.'", "''", '.', 'format', '(', 'reqd_dim', ',', 'ds', '.', 'num_features', ')', ')', 'dim_mismatch', '=', 'True', 'compatible', '.', 'append', '(', 'is_compatible', ')', 'return', 'all', '(', 'compatible', ')', ',', 'compatible', ',', 'dim_mismatch', ',', '(', 'pivot', '.', 'num_samples', ',', 'reqd_num_features', ')']
Checks whether the given MLdataset instances are compatible i.e. with same set of subjects, each beloning to the same class in all instances. Checks the first dataset in the list against the rest, and returns a boolean array. Parameters ---------- datasets : Iterable A list of n datasets reqd_num_features : int The required number of features in each dataset. Helpful to ensure test sets are compatible with training set, as well as within themselves. Returns ------- all_are_compatible : bool Boolean flag indicating whether all datasets are compatible or not compatibility : list List indicating whether first dataset is compatible with the rest individually. This could be useful to select a subset of mutually compatible datasets. Length : n-1 dim_mismatch : bool Boolean flag indicating mismatch in dimensionality from that specified size_descriptor : tuple A tuple with values for (num_samples, reqd_num_features) - num_samples must be common for all datasets that are evaluated for compatibility - reqd_num_features is None (when no check on dimensionality is perfomed), or list of corresponding dimensionalities for each input dataset
['Checks', 'whether', 'the', 'given', 'MLdataset', 'instances', 'are', 'compatible']
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L1480-L1573
6,185
atlassian-api/atlassian-python-api
atlassian/bitbucket.py
Bitbucket.disable_branching_model
def disable_branching_model(self, project, repository): """ Disable branching model :param project: :param repository: :return: """ url = 'rest/branch-utils/1.0/projects/{project}/repos/{repository}/branchmodel/configuration'.format( project=project, repository=repository) return self.delete(url)
python
def disable_branching_model(self, project, repository): """ Disable branching model :param project: :param repository: :return: """ url = 'rest/branch-utils/1.0/projects/{project}/repos/{repository}/branchmodel/configuration'.format( project=project, repository=repository) return self.delete(url)
['def', 'disable_branching_model', '(', 'self', ',', 'project', ',', 'repository', ')', ':', 'url', '=', "'rest/branch-utils/1.0/projects/{project}/repos/{repository}/branchmodel/configuration'", '.', 'format', '(', 'project', '=', 'project', ',', 'repository', '=', 'repository', ')', 'return', 'self', '.', 'delete', '(', 'url', ')']
Disable branching model :param project: :param repository: :return:
['Disable', 'branching', 'model', ':', 'param', 'project', ':', ':', 'param', 'repository', ':', ':', 'return', ':']
train
https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/bitbucket.py#L866-L876
6,186
django-cumulus/django-cumulus
cumulus/storage.py
sync_headers
def sync_headers(cloud_obj, headers=None, header_patterns=HEADER_PATTERNS): """ Overwrites the given cloud_obj's headers with the ones given as ``headers` and adds additional headers as defined in the HEADERS setting depending on the cloud_obj's file name. """ if headers is None: headers = {} # don't set headers on directories content_type = getattr(cloud_obj, "content_type", None) if content_type == "application/directory": return matched_headers = {} for pattern, pattern_headers in header_patterns: if pattern.match(cloud_obj.name): matched_headers.update(pattern_headers.copy()) # preserve headers already set matched_headers.update(cloud_obj.headers) # explicitly set headers overwrite matches and already set headers matched_headers.update(headers) if matched_headers != cloud_obj.headers: cloud_obj.headers = matched_headers cloud_obj.sync_metadata()
python
def sync_headers(cloud_obj, headers=None, header_patterns=HEADER_PATTERNS): """ Overwrites the given cloud_obj's headers with the ones given as ``headers` and adds additional headers as defined in the HEADERS setting depending on the cloud_obj's file name. """ if headers is None: headers = {} # don't set headers on directories content_type = getattr(cloud_obj, "content_type", None) if content_type == "application/directory": return matched_headers = {} for pattern, pattern_headers in header_patterns: if pattern.match(cloud_obj.name): matched_headers.update(pattern_headers.copy()) # preserve headers already set matched_headers.update(cloud_obj.headers) # explicitly set headers overwrite matches and already set headers matched_headers.update(headers) if matched_headers != cloud_obj.headers: cloud_obj.headers = matched_headers cloud_obj.sync_metadata()
['def', 'sync_headers', '(', 'cloud_obj', ',', 'headers', '=', 'None', ',', 'header_patterns', '=', 'HEADER_PATTERNS', ')', ':', 'if', 'headers', 'is', 'None', ':', 'headers', '=', '{', '}', "# don't set headers on directories", 'content_type', '=', 'getattr', '(', 'cloud_obj', ',', '"content_type"', ',', 'None', ')', 'if', 'content_type', '==', '"application/directory"', ':', 'return', 'matched_headers', '=', '{', '}', 'for', 'pattern', ',', 'pattern_headers', 'in', 'header_patterns', ':', 'if', 'pattern', '.', 'match', '(', 'cloud_obj', '.', 'name', ')', ':', 'matched_headers', '.', 'update', '(', 'pattern_headers', '.', 'copy', '(', ')', ')', '# preserve headers already set', 'matched_headers', '.', 'update', '(', 'cloud_obj', '.', 'headers', ')', '# explicitly set headers overwrite matches and already set headers', 'matched_headers', '.', 'update', '(', 'headers', ')', 'if', 'matched_headers', '!=', 'cloud_obj', '.', 'headers', ':', 'cloud_obj', '.', 'headers', '=', 'matched_headers', 'cloud_obj', '.', 'sync_metadata', '(', ')']
Overwrites the given cloud_obj's headers with the ones given as ``headers` and adds additional headers as defined in the HEADERS setting depending on the cloud_obj's file name.
['Overwrites', 'the', 'given', 'cloud_obj', 's', 'headers', 'with', 'the', 'ones', 'given', 'as', 'headers', 'and', 'adds', 'additional', 'headers', 'as', 'defined', 'in', 'the', 'HEADERS', 'setting', 'depending', 'on', 'the', 'cloud_obj', 's', 'file', 'name', '.']
train
https://github.com/django-cumulus/django-cumulus/blob/64feb07b857af28f226be4899e875c29405e261d/cumulus/storage.py#L67-L90
6,187
wonambi-python/wonambi
wonambi/widgets/utils.py
FormMenu.get_value
def get_value(self, default=None): """Get selection from widget. Parameters ---------- default : str str for use by widget Returns ------- str selected item from the combobox """ if default is None: default = '' try: text = self.currentText() except ValueError: lg.debug('Cannot convert "' + str(text) + '" to list. ' + 'Using default ' + str(default)) text = default self.set_value(text) return text
python
def get_value(self, default=None): """Get selection from widget. Parameters ---------- default : str str for use by widget Returns ------- str selected item from the combobox """ if default is None: default = '' try: text = self.currentText() except ValueError: lg.debug('Cannot convert "' + str(text) + '" to list. ' + 'Using default ' + str(default)) text = default self.set_value(text) return text
['def', 'get_value', '(', 'self', ',', 'default', '=', 'None', ')', ':', 'if', 'default', 'is', 'None', ':', 'default', '=', "''", 'try', ':', 'text', '=', 'self', '.', 'currentText', '(', ')', 'except', 'ValueError', ':', 'lg', '.', 'debug', '(', '\'Cannot convert "\'', '+', 'str', '(', 'text', ')', '+', '\'" to list. \'', '+', "'Using default '", '+', 'str', '(', 'default', ')', ')', 'text', '=', 'default', 'self', '.', 'set_value', '(', 'text', ')', 'return', 'text']
Get selection from widget. Parameters ---------- default : str str for use by widget Returns ------- str selected item from the combobox
['Get', 'selection', 'from', 'widget', '.']
train
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/utils.py#L572-L598
6,188
sacrud/sacrud
sacrud/action.py
CRUD.update
def update(self, pk, data, **kwargs): """ Updates the object by primary_key: .. code-block:: python DBSession.sacrud(Users).update(1, {'name': 'Petya'}) DBSession.sacrud(Users).update('1', {'name': 'Petya'}) DBSession.sacrud(User2Groups).update({'user_id': 4, 'group_id': 2}, {'group_id': 1}) JSON support: .. code-block:: python DBSession.sacrud(Users).update(1, '{"name": "Petya"}') DBSession.sacrud(User2Groups).update( '{"user_id": 4, "group_id": 2}', # primary_key '{"group_id": 1}' # data ) Default it run ``session.commit() or transaction.commit()``. If it is not necessary use attribute ``commit=False``. """ pk = unjson(pk) data = unjson(data) obj = get_obj(self.session, self.table, pk) return self._add(obj, data, **kwargs)
python
def update(self, pk, data, **kwargs): """ Updates the object by primary_key: .. code-block:: python DBSession.sacrud(Users).update(1, {'name': 'Petya'}) DBSession.sacrud(Users).update('1', {'name': 'Petya'}) DBSession.sacrud(User2Groups).update({'user_id': 4, 'group_id': 2}, {'group_id': 1}) JSON support: .. code-block:: python DBSession.sacrud(Users).update(1, '{"name": "Petya"}') DBSession.sacrud(User2Groups).update( '{"user_id": 4, "group_id": 2}', # primary_key '{"group_id": 1}' # data ) Default it run ``session.commit() or transaction.commit()``. If it is not necessary use attribute ``commit=False``. """ pk = unjson(pk) data = unjson(data) obj = get_obj(self.session, self.table, pk) return self._add(obj, data, **kwargs)
['def', 'update', '(', 'self', ',', 'pk', ',', 'data', ',', '*', '*', 'kwargs', ')', ':', 'pk', '=', 'unjson', '(', 'pk', ')', 'data', '=', 'unjson', '(', 'data', ')', 'obj', '=', 'get_obj', '(', 'self', '.', 'session', ',', 'self', '.', 'table', ',', 'pk', ')', 'return', 'self', '.', '_add', '(', 'obj', ',', 'data', ',', '*', '*', 'kwargs', ')']
Updates the object by primary_key: .. code-block:: python DBSession.sacrud(Users).update(1, {'name': 'Petya'}) DBSession.sacrud(Users).update('1', {'name': 'Petya'}) DBSession.sacrud(User2Groups).update({'user_id': 4, 'group_id': 2}, {'group_id': 1}) JSON support: .. code-block:: python DBSession.sacrud(Users).update(1, '{"name": "Petya"}') DBSession.sacrud(User2Groups).update( '{"user_id": 4, "group_id": 2}', # primary_key '{"group_id": 1}' # data ) Default it run ``session.commit() or transaction.commit()``. If it is not necessary use attribute ``commit=False``.
['Updates', 'the', 'object', 'by', 'primary_key', ':']
train
https://github.com/sacrud/sacrud/blob/40dcbb22083cb1ad4c1f626843397b89c2ce18f5/sacrud/action.py#L119-L146
6,189
raiden-network/raiden
raiden/network/rpc/client.py
patched_web3_eth_estimate_gas
def patched_web3_eth_estimate_gas(self, transaction, block_identifier=None): """ Temporary workaround until next web3.py release (5.X.X) Current master of web3.py has this implementation already: https://github.com/ethereum/web3.py/blob/2a67ea9f0ab40bb80af2b803dce742d6cad5943e/web3/eth.py#L311 """ if 'from' not in transaction and is_checksum_address(self.defaultAccount): transaction = assoc(transaction, 'from', self.defaultAccount) if block_identifier is None: params = [transaction] else: params = [transaction, block_identifier] try: result = self.web3.manager.request_blocking( 'eth_estimateGas', params, ) except ValueError as e: if check_value_error_for_parity(e, ParityCallType.ESTIMATE_GAS): result = None else: # else the error is not denoting estimate gas failure and is something else raise e return result
python
def patched_web3_eth_estimate_gas(self, transaction, block_identifier=None): """ Temporary workaround until next web3.py release (5.X.X) Current master of web3.py has this implementation already: https://github.com/ethereum/web3.py/blob/2a67ea9f0ab40bb80af2b803dce742d6cad5943e/web3/eth.py#L311 """ if 'from' not in transaction and is_checksum_address(self.defaultAccount): transaction = assoc(transaction, 'from', self.defaultAccount) if block_identifier is None: params = [transaction] else: params = [transaction, block_identifier] try: result = self.web3.manager.request_blocking( 'eth_estimateGas', params, ) except ValueError as e: if check_value_error_for_parity(e, ParityCallType.ESTIMATE_GAS): result = None else: # else the error is not denoting estimate gas failure and is something else raise e return result
['def', 'patched_web3_eth_estimate_gas', '(', 'self', ',', 'transaction', ',', 'block_identifier', '=', 'None', ')', ':', 'if', "'from'", 'not', 'in', 'transaction', 'and', 'is_checksum_address', '(', 'self', '.', 'defaultAccount', ')', ':', 'transaction', '=', 'assoc', '(', 'transaction', ',', "'from'", ',', 'self', '.', 'defaultAccount', ')', 'if', 'block_identifier', 'is', 'None', ':', 'params', '=', '[', 'transaction', ']', 'else', ':', 'params', '=', '[', 'transaction', ',', 'block_identifier', ']', 'try', ':', 'result', '=', 'self', '.', 'web3', '.', 'manager', '.', 'request_blocking', '(', "'eth_estimateGas'", ',', 'params', ',', ')', 'except', 'ValueError', 'as', 'e', ':', 'if', 'check_value_error_for_parity', '(', 'e', ',', 'ParityCallType', '.', 'ESTIMATE_GAS', ')', ':', 'result', '=', 'None', 'else', ':', '# else the error is not denoting estimate gas failure and is something else', 'raise', 'e', 'return', 'result']
Temporary workaround until next web3.py release (5.X.X) Current master of web3.py has this implementation already: https://github.com/ethereum/web3.py/blob/2a67ea9f0ab40bb80af2b803dce742d6cad5943e/web3/eth.py#L311
['Temporary', 'workaround', 'until', 'next', 'web3', '.', 'py', 'release', '(', '5', '.', 'X', '.', 'X', ')']
train
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/network/rpc/client.py#L298-L324
6,190
vtkiorg/vtki
vtki/plotting.py
system_supports_plotting
def system_supports_plotting(): """ Check if x server is running Returns ------- system_supports_plotting : bool True when on Linux and running an xserver. Returns None when on a non-linux platform. """ try: if os.environ['ALLOW_PLOTTING'].lower() == 'true': return True except KeyError: pass try: p = Popen(["xset", "-q"], stdout=PIPE, stderr=PIPE) p.communicate() return p.returncode == 0 except: return False
python
def system_supports_plotting(): """ Check if x server is running Returns ------- system_supports_plotting : bool True when on Linux and running an xserver. Returns None when on a non-linux platform. """ try: if os.environ['ALLOW_PLOTTING'].lower() == 'true': return True except KeyError: pass try: p = Popen(["xset", "-q"], stdout=PIPE, stderr=PIPE) p.communicate() return p.returncode == 0 except: return False
['def', 'system_supports_plotting', '(', ')', ':', 'try', ':', 'if', 'os', '.', 'environ', '[', "'ALLOW_PLOTTING'", ']', '.', 'lower', '(', ')', '==', "'true'", ':', 'return', 'True', 'except', 'KeyError', ':', 'pass', 'try', ':', 'p', '=', 'Popen', '(', '[', '"xset"', ',', '"-q"', ']', ',', 'stdout', '=', 'PIPE', ',', 'stderr', '=', 'PIPE', ')', 'p', '.', 'communicate', '(', ')', 'return', 'p', '.', 'returncode', '==', '0', 'except', ':', 'return', 'False']
Check if x server is running Returns ------- system_supports_plotting : bool True when on Linux and running an xserver. Returns None when on a non-linux platform.
['Check', 'if', 'x', 'server', 'is', 'running']
train
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/plotting.py#L287-L308
6,191
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_1/cix/cix_client.py
CixClient.render_template
def render_template(self, template_parameters, template_id): """RenderTemplate. [Preview API] :param :class:`<TemplateParameters> <azure.devops.v5_1.cix.models.TemplateParameters>` template_parameters: :param str template_id: :rtype: :class:`<Template> <azure.devops.v5_1.cix.models.Template>` """ route_values = {} if template_id is not None: route_values['templateId'] = self._serialize.url('template_id', template_id, 'str') content = self._serialize.body(template_parameters, 'TemplateParameters') response = self._send(http_method='POST', location_id='eb5d6d1d-98a2-4bbd-9028-f9a6b2d66515', version='5.1-preview.1', route_values=route_values, content=content) return self._deserialize('Template', response)
python
def render_template(self, template_parameters, template_id): """RenderTemplate. [Preview API] :param :class:`<TemplateParameters> <azure.devops.v5_1.cix.models.TemplateParameters>` template_parameters: :param str template_id: :rtype: :class:`<Template> <azure.devops.v5_1.cix.models.Template>` """ route_values = {} if template_id is not None: route_values['templateId'] = self._serialize.url('template_id', template_id, 'str') content = self._serialize.body(template_parameters, 'TemplateParameters') response = self._send(http_method='POST', location_id='eb5d6d1d-98a2-4bbd-9028-f9a6b2d66515', version='5.1-preview.1', route_values=route_values, content=content) return self._deserialize('Template', response)
['def', 'render_template', '(', 'self', ',', 'template_parameters', ',', 'template_id', ')', ':', 'route_values', '=', '{', '}', 'if', 'template_id', 'is', 'not', 'None', ':', 'route_values', '[', "'templateId'", ']', '=', 'self', '.', '_serialize', '.', 'url', '(', "'template_id'", ',', 'template_id', ',', "'str'", ')', 'content', '=', 'self', '.', '_serialize', '.', 'body', '(', 'template_parameters', ',', "'TemplateParameters'", ')', 'response', '=', 'self', '.', '_send', '(', 'http_method', '=', "'POST'", ',', 'location_id', '=', "'eb5d6d1d-98a2-4bbd-9028-f9a6b2d66515'", ',', 'version', '=', "'5.1-preview.1'", ',', 'route_values', '=', 'route_values', ',', 'content', '=', 'content', ')', 'return', 'self', '.', '_deserialize', '(', "'Template'", ',', 'response', ')']
RenderTemplate. [Preview API] :param :class:`<TemplateParameters> <azure.devops.v5_1.cix.models.TemplateParameters>` template_parameters: :param str template_id: :rtype: :class:`<Template> <azure.devops.v5_1.cix.models.Template>`
['RenderTemplate', '.', '[', 'Preview', 'API', ']', ':', 'param', ':', 'class', ':', '<TemplateParameters', '>', '<azure', '.', 'devops', '.', 'v5_1', '.', 'cix', '.', 'models', '.', 'TemplateParameters', '>', 'template_parameters', ':', ':', 'param', 'str', 'template_id', ':', ':', 'rtype', ':', ':', 'class', ':', '<Template', '>', '<azure', '.', 'devops', '.', 'v5_1', '.', 'cix', '.', 'models', '.', 'Template', '>']
train
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/cix/cix_client.py#L154-L170
6,192
hannes-brt/hebel
hebel/pycuda_ops/cublas.py
cublasDsyr2
def cublasDsyr2(handle, uplo, n, alpha, x, incx, y, incy, A, lda): """ Rank-2 operation on real symmetric matrix. """ status = _libcublas.cublasDsyr2_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(ctypes.c_double(alpha)), int(x), incx, int(y), incy, int(A), lda) cublasCheckStatus(status)
python
def cublasDsyr2(handle, uplo, n, alpha, x, incx, y, incy, A, lda): """ Rank-2 operation on real symmetric matrix. """ status = _libcublas.cublasDsyr2_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(ctypes.c_double(alpha)), int(x), incx, int(y), incy, int(A), lda) cublasCheckStatus(status)
['def', 'cublasDsyr2', '(', 'handle', ',', 'uplo', ',', 'n', ',', 'alpha', ',', 'x', ',', 'incx', ',', 'y', ',', 'incy', ',', 'A', ',', 'lda', ')', ':', 'status', '=', '_libcublas', '.', 'cublasDsyr2_v2', '(', 'handle', ',', '_CUBLAS_FILL_MODE', '[', 'uplo', ']', ',', 'n', ',', 'ctypes', '.', 'byref', '(', 'ctypes', '.', 'c_double', '(', 'alpha', ')', ')', ',', 'int', '(', 'x', ')', ',', 'incx', ',', 'int', '(', 'y', ')', ',', 'incy', ',', 'int', '(', 'A', ')', ',', 'lda', ')', 'cublasCheckStatus', '(', 'status', ')']
Rank-2 operation on real symmetric matrix.
['Rank', '-', '2', 'operation', 'on', 'real', 'symmetric', 'matrix', '.']
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/cublas.py#L2927-L2938
6,193
wummel/linkchecker
linkcheck/fileutil.py
pathencode
def pathencode (path): """Encode a path string with the platform file system encoding.""" if isinstance(path, unicode) and not os.path.supports_unicode_filenames: path = path.encode(FSCODING, "replace") return path
python
def pathencode (path): """Encode a path string with the platform file system encoding.""" if isinstance(path, unicode) and not os.path.supports_unicode_filenames: path = path.encode(FSCODING, "replace") return path
['def', 'pathencode', '(', 'path', ')', ':', 'if', 'isinstance', '(', 'path', ',', 'unicode', ')', 'and', 'not', 'os', '.', 'path', '.', 'supports_unicode_filenames', ':', 'path', '=', 'path', '.', 'encode', '(', 'FSCODING', ',', '"replace"', ')', 'return', 'path']
Encode a path string with the platform file system encoding.
['Encode', 'a', 'path', 'string', 'with', 'the', 'platform', 'file', 'system', 'encoding', '.']
train
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/fileutil.py#L167-L171
6,194
IdentityPython/pysaml2
src/saml2/mcache.py
Cache.get_identity
def get_identity(self, subject_id, entities=None): """ Get all the identity information that has been received and are still valid about the subject. :param subject_id: The identifier of the subject :param entities: The identifiers of the entities whoes assertions are interesting. If the list is empty all entities are interesting. :return: A 2-tuple consisting of the identity information (a dictionary of attributes and values) and the list of entities whoes information has timed out. """ if not entities: entities = self.entities(subject_id) if not entities: return {}, [] res = {} oldees = [] for (entity_id, item) in self._cache.get_multi(entities, subject_id+'_').items(): try: info = self.get_info(item) except ToOld: oldees.append(entity_id) continue for key, vals in info["ava"].items(): try: tmp = set(res[key]).union(set(vals)) res[key] = list(tmp) except KeyError: res[key] = vals return res, oldees
python
def get_identity(self, subject_id, entities=None): """ Get all the identity information that has been received and are still valid about the subject. :param subject_id: The identifier of the subject :param entities: The identifiers of the entities whoes assertions are interesting. If the list is empty all entities are interesting. :return: A 2-tuple consisting of the identity information (a dictionary of attributes and values) and the list of entities whoes information has timed out. """ if not entities: entities = self.entities(subject_id) if not entities: return {}, [] res = {} oldees = [] for (entity_id, item) in self._cache.get_multi(entities, subject_id+'_').items(): try: info = self.get_info(item) except ToOld: oldees.append(entity_id) continue for key, vals in info["ava"].items(): try: tmp = set(res[key]).union(set(vals)) res[key] = list(tmp) except KeyError: res[key] = vals return res, oldees
['def', 'get_identity', '(', 'self', ',', 'subject_id', ',', 'entities', '=', 'None', ')', ':', 'if', 'not', 'entities', ':', 'entities', '=', 'self', '.', 'entities', '(', 'subject_id', ')', 'if', 'not', 'entities', ':', 'return', '{', '}', ',', '[', ']', 'res', '=', '{', '}', 'oldees', '=', '[', ']', 'for', '(', 'entity_id', ',', 'item', ')', 'in', 'self', '.', '_cache', '.', 'get_multi', '(', 'entities', ',', 'subject_id', '+', "'_'", ')', '.', 'items', '(', ')', ':', 'try', ':', 'info', '=', 'self', '.', 'get_info', '(', 'item', ')', 'except', 'ToOld', ':', 'oldees', '.', 'append', '(', 'entity_id', ')', 'continue', 'for', 'key', ',', 'vals', 'in', 'info', '[', '"ava"', ']', '.', 'items', '(', ')', ':', 'try', ':', 'tmp', '=', 'set', '(', 'res', '[', 'key', ']', ')', '.', 'union', '(', 'set', '(', 'vals', ')', ')', 'res', '[', 'key', ']', '=', 'list', '(', 'tmp', ')', 'except', 'KeyError', ':', 'res', '[', 'key', ']', '=', 'vals', 'return', 'res', ',', 'oldees']
Get all the identity information that has been received and are still valid about the subject. :param subject_id: The identifier of the subject :param entities: The identifiers of the entities whoes assertions are interesting. If the list is empty all entities are interesting. :return: A 2-tuple consisting of the identity information (a dictionary of attributes and values) and the list of entities whoes information has timed out.
['Get', 'all', 'the', 'identity', 'information', 'that', 'has', 'been', 'received', 'and', 'are', 'still', 'valid', 'about', 'the', 'subject', '.']
train
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/mcache.py#L37-L68
6,195
nint8835/jigsaw
jigsaw/PluginLoader.py
PluginLoader.reload_all_manifests
def reload_all_manifests(self): """ Reloads all loaded manifests, and loads any new manifests """ self._logger.debug("Reloading all manifests.") self._manifests = [] self.load_manifests() self._logger.debug("All manifests reloaded.")
python
def reload_all_manifests(self): """ Reloads all loaded manifests, and loads any new manifests """ self._logger.debug("Reloading all manifests.") self._manifests = [] self.load_manifests() self._logger.debug("All manifests reloaded.")
['def', 'reload_all_manifests', '(', 'self', ')', ':', 'self', '.', '_logger', '.', 'debug', '(', '"Reloading all manifests."', ')', 'self', '.', '_manifests', '=', '[', ']', 'self', '.', 'load_manifests', '(', ')', 'self', '.', '_logger', '.', 'debug', '(', '"All manifests reloaded."', ')']
Reloads all loaded manifests, and loads any new manifests
['Reloads', 'all', 'loaded', 'manifests', 'and', 'loads', 'any', 'new', 'manifests']
train
https://github.com/nint8835/jigsaw/blob/109e62801a0334652e88ea972a95a341ccc96621/jigsaw/PluginLoader.py#L223-L230
6,196
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_tunnels.py
brocade_tunnels.overlay_gateway_site_bfd_params_bfd_shutdown
def overlay_gateway_site_bfd_params_bfd_shutdown(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels") name_key = ET.SubElement(overlay_gateway, "name") name_key.text = kwargs.pop('name') site = ET.SubElement(overlay_gateway, "site") name_key = ET.SubElement(site, "name") name_key.text = kwargs.pop('name') bfd = ET.SubElement(site, "bfd") params = ET.SubElement(bfd, "params") bfd_shutdown = ET.SubElement(params, "bfd-shutdown") callback = kwargs.pop('callback', self._callback) return callback(config)
python
def overlay_gateway_site_bfd_params_bfd_shutdown(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels") name_key = ET.SubElement(overlay_gateway, "name") name_key.text = kwargs.pop('name') site = ET.SubElement(overlay_gateway, "site") name_key = ET.SubElement(site, "name") name_key.text = kwargs.pop('name') bfd = ET.SubElement(site, "bfd") params = ET.SubElement(bfd, "params") bfd_shutdown = ET.SubElement(params, "bfd-shutdown") callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'overlay_gateway_site_bfd_params_bfd_shutdown', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'overlay_gateway', '=', 'ET', '.', 'SubElement', '(', 'config', ',', '"overlay-gateway"', ',', 'xmlns', '=', '"urn:brocade.com:mgmt:brocade-tunnels"', ')', 'name_key', '=', 'ET', '.', 'SubElement', '(', 'overlay_gateway', ',', '"name"', ')', 'name_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'name'", ')', 'site', '=', 'ET', '.', 'SubElement', '(', 'overlay_gateway', ',', '"site"', ')', 'name_key', '=', 'ET', '.', 'SubElement', '(', 'site', ',', '"name"', ')', 'name_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'name'", ')', 'bfd', '=', 'ET', '.', 'SubElement', '(', 'site', ',', '"bfd"', ')', 'params', '=', 'ET', '.', 'SubElement', '(', 'bfd', ',', '"params"', ')', 'bfd_shutdown', '=', 'ET', '.', 'SubElement', '(', 'params', ',', '"bfd-shutdown"', ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_tunnels.py#L429-L444
6,197
dmwm/DBS
Server/Python/src/dbs/web/DBSReaderModel.py
DBSReaderModel.dumpBlock
def dumpBlock(self, block_name): """ API the list all information related with the block_name :param block_name: Name of block to be dumped (Required) :type block_name: str """ try: return self.dbsBlock.dumpBlock(block_name) except HTTPError as he: raise he except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError) except Exception as ex: sError = "DBSReaderModel/dumpBlock. %s\n. Exception trace: \n %s" \ % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', ex.message, self.logger.exception, sError)
python
def dumpBlock(self, block_name): """ API the list all information related with the block_name :param block_name: Name of block to be dumped (Required) :type block_name: str """ try: return self.dbsBlock.dumpBlock(block_name) except HTTPError as he: raise he except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError) except Exception as ex: sError = "DBSReaderModel/dumpBlock. %s\n. Exception trace: \n %s" \ % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', ex.message, self.logger.exception, sError)
['def', 'dumpBlock', '(', 'self', ',', 'block_name', ')', ':', 'try', ':', 'return', 'self', '.', 'dbsBlock', '.', 'dumpBlock', '(', 'block_name', ')', 'except', 'HTTPError', 'as', 'he', ':', 'raise', 'he', 'except', 'dbsException', 'as', 'de', ':', 'dbsExceptionHandler', '(', 'de', '.', 'eCode', ',', 'de', '.', 'message', ',', 'self', '.', 'logger', '.', 'exception', ',', 'de', '.', 'serverError', ')', 'except', 'Exception', 'as', 'ex', ':', 'sError', '=', '"DBSReaderModel/dumpBlock. %s\\n. Exception trace: \\n %s"', '%', '(', 'ex', ',', 'traceback', '.', 'format_exc', '(', ')', ')', 'dbsExceptionHandler', '(', "'dbsException-server-error'", ',', 'ex', '.', 'message', ',', 'self', '.', 'logger', '.', 'exception', ',', 'sError', ')']
API the list all information related with the block_name :param block_name: Name of block to be dumped (Required) :type block_name: str
['API', 'the', 'list', 'all', 'information', 'related', 'with', 'the', 'block_name']
train
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/web/DBSReaderModel.py#L1507-L1524
6,198
phaethon/kamene
kamene/contrib/gsm_um.py
locationUpdatingAccept
def locationUpdatingAccept(MobileId_presence=0, FollowOnProceed_presence=0, CtsPermission_presence=0): """LOCATION UPDATING ACCEPT Section 9.2.13""" a = TpPd(pd=0x5) b = MessageType(mesType=0x02) # 00000010 c = LocalAreaId() packet = a / b / c if MobileId_presence is 1: d = MobileIdHdr(ieiMI=0x17, eightBitMI=0x0) packet = packet / d if FollowOnProceed_presence is 1: e = FollowOnProceed(ieiFOP=0xA1) packet = packet / e if CtsPermission_presence is 1: f = CtsPermissionHdr(ieiCP=0xA2, eightBitCP=0x0) packet = packet / f return packet
python
def locationUpdatingAccept(MobileId_presence=0, FollowOnProceed_presence=0, CtsPermission_presence=0): """LOCATION UPDATING ACCEPT Section 9.2.13""" a = TpPd(pd=0x5) b = MessageType(mesType=0x02) # 00000010 c = LocalAreaId() packet = a / b / c if MobileId_presence is 1: d = MobileIdHdr(ieiMI=0x17, eightBitMI=0x0) packet = packet / d if FollowOnProceed_presence is 1: e = FollowOnProceed(ieiFOP=0xA1) packet = packet / e if CtsPermission_presence is 1: f = CtsPermissionHdr(ieiCP=0xA2, eightBitCP=0x0) packet = packet / f return packet
['def', 'locationUpdatingAccept', '(', 'MobileId_presence', '=', '0', ',', 'FollowOnProceed_presence', '=', '0', ',', 'CtsPermission_presence', '=', '0', ')', ':', 'a', '=', 'TpPd', '(', 'pd', '=', '0x5', ')', 'b', '=', 'MessageType', '(', 'mesType', '=', '0x02', ')', '# 00000010', 'c', '=', 'LocalAreaId', '(', ')', 'packet', '=', 'a', '/', 'b', '/', 'c', 'if', 'MobileId_presence', 'is', '1', ':', 'd', '=', 'MobileIdHdr', '(', 'ieiMI', '=', '0x17', ',', 'eightBitMI', '=', '0x0', ')', 'packet', '=', 'packet', '/', 'd', 'if', 'FollowOnProceed_presence', 'is', '1', ':', 'e', '=', 'FollowOnProceed', '(', 'ieiFOP', '=', '0xA1', ')', 'packet', '=', 'packet', '/', 'e', 'if', 'CtsPermission_presence', 'is', '1', ':', 'f', '=', 'CtsPermissionHdr', '(', 'ieiCP', '=', '0xA2', ',', 'eightBitCP', '=', '0x0', ')', 'packet', '=', 'packet', '/', 'f', 'return', 'packet']
LOCATION UPDATING ACCEPT Section 9.2.13
['LOCATION', 'UPDATING', 'ACCEPT', 'Section', '9', '.', '2', '.', '13']
train
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/contrib/gsm_um.py#L1449-L1466
6,199
datastore/datastore
datastore/core/basic.py
LoggingDatastore.contains
def contains(self, key): '''Returns whether the object named by `key` exists. LoggingDatastore logs the access. ''' self.logger.info('%s: contains %s' % (self, key)) return super(LoggingDatastore, self).contains(key)
python
def contains(self, key): '''Returns whether the object named by `key` exists. LoggingDatastore logs the access. ''' self.logger.info('%s: contains %s' % (self, key)) return super(LoggingDatastore, self).contains(key)
['def', 'contains', '(', 'self', ',', 'key', ')', ':', 'self', '.', 'logger', '.', 'info', '(', "'%s: contains %s'", '%', '(', 'self', ',', 'key', ')', ')', 'return', 'super', '(', 'LoggingDatastore', ',', 'self', ')', '.', 'contains', '(', 'key', ')']
Returns whether the object named by `key` exists. LoggingDatastore logs the access.
['Returns', 'whether', 'the', 'object', 'named', 'by', 'key', 'exists', '.', 'LoggingDatastore', 'logs', 'the', 'access', '.']
train
https://github.com/datastore/datastore/blob/7ccf0cd4748001d3dbf5e6dda369b0f63e0269d3/datastore/core/basic.py#L444-L449