Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
3,400
dbcli/athenacli
athenacli/packages/filepaths.py
suggest_path
def suggest_path(root_dir): """List all files and subdirectories in a directory. If the directory is not specified, suggest root directory, user directory, current and parent directory. :param root_dir: string: directory to list :return: list """ if not root_dir: return [os.path.abspath(os.sep), '~', os.curdir, os.pardir] if '~' in root_dir: root_dir = os.path.expanduser(root_dir) if not os.path.exists(root_dir): root_dir, _ = os.path.split(root_dir) return list_path(root_dir)
python
def suggest_path(root_dir): """List all files and subdirectories in a directory. If the directory is not specified, suggest root directory, user directory, current and parent directory. :param root_dir: string: directory to list :return: list """ if not root_dir: return [os.path.abspath(os.sep), '~', os.curdir, os.pardir] if '~' in root_dir: root_dir = os.path.expanduser(root_dir) if not os.path.exists(root_dir): root_dir, _ = os.path.split(root_dir) return list_path(root_dir)
['def', 'suggest_path', '(', 'root_dir', ')', ':', 'if', 'not', 'root_dir', ':', 'return', '[', 'os', '.', 'path', '.', 'abspath', '(', 'os', '.', 'sep', ')', ',', "'~'", ',', 'os', '.', 'curdir', ',', 'os', '.', 'pardir', ']', 'if', "'~'", 'in', 'root_dir', ':', 'root_dir', '=', 'os', '.', 'path', '.', 'expanduser', '(', 'root_dir', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'root_dir', ')', ':', 'root_dir', ',', '_', '=', 'os', '.', 'path', '.', 'split', '(', 'root_dir', ')', 'return', 'list_path', '(', 'root_dir', ')']
List all files and subdirectories in a directory. If the directory is not specified, suggest root directory, user directory, current and parent directory. :param root_dir: string: directory to list :return: list
['List', 'all', 'files', 'and', 'subdirectories', 'in', 'a', 'directory', '.', 'If', 'the', 'directory', 'is', 'not', 'specified', 'suggest', 'root', 'directory', 'user', 'directory', 'current', 'and', 'parent', 'directory', '.', ':', 'param', 'root_dir', ':', 'string', ':', 'directory', 'to', 'list', ':', 'return', ':', 'list']
train
https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/packages/filepaths.py#L44-L60
3,401
MillionIntegrals/vel
vel/phase/cycle.py
CycleCallback.set_lr
def set_lr(self, lr): """ Set a learning rate for the optimizer """ if isinstance(lr, list): for group_lr, param_group in zip(lr, self.optimizer.param_groups): param_group['lr'] = group_lr else: for param_group in self.optimizer.param_groups: param_group['lr'] = lr
python
def set_lr(self, lr): """ Set a learning rate for the optimizer """ if isinstance(lr, list): for group_lr, param_group in zip(lr, self.optimizer.param_groups): param_group['lr'] = group_lr else: for param_group in self.optimizer.param_groups: param_group['lr'] = lr
['def', 'set_lr', '(', 'self', ',', 'lr', ')', ':', 'if', 'isinstance', '(', 'lr', ',', 'list', ')', ':', 'for', 'group_lr', ',', 'param_group', 'in', 'zip', '(', 'lr', ',', 'self', '.', 'optimizer', '.', 'param_groups', ')', ':', 'param_group', '[', "'lr'", ']', '=', 'group_lr', 'else', ':', 'for', 'param_group', 'in', 'self', '.', 'optimizer', '.', 'param_groups', ':', 'param_group', '[', "'lr'", ']', '=', 'lr']
Set a learning rate for the optimizer
['Set', 'a', 'learning', 'rate', 'for', 'the', 'optimizer']
train
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/phase/cycle.py#L75-L82
3,402
Microsoft/knack
knack/introspection.py
extract_full_summary_from_signature
def extract_full_summary_from_signature(operation): """ Extract the summary from the docstring of the command. """ lines = inspect.getdoc(operation) regex = r'\s*(:param)\s+(.+?)\s*:(.*)' summary = '' if lines: match = re.search(regex, lines) summary = lines[:match.regs[0][0]] if match else lines summary = summary.replace('\n', ' ').replace('\r', '') return summary
python
def extract_full_summary_from_signature(operation): """ Extract the summary from the docstring of the command. """ lines = inspect.getdoc(operation) regex = r'\s*(:param)\s+(.+?)\s*:(.*)' summary = '' if lines: match = re.search(regex, lines) summary = lines[:match.regs[0][0]] if match else lines summary = summary.replace('\n', ' ').replace('\r', '') return summary
['def', 'extract_full_summary_from_signature', '(', 'operation', ')', ':', 'lines', '=', 'inspect', '.', 'getdoc', '(', 'operation', ')', 'regex', '=', "r'\\s*(:param)\\s+(.+?)\\s*:(.*)'", 'summary', '=', "''", 'if', 'lines', ':', 'match', '=', 're', '.', 'search', '(', 'regex', ',', 'lines', ')', 'summary', '=', 'lines', '[', ':', 'match', '.', 'regs', '[', '0', ']', '[', '0', ']', ']', 'if', 'match', 'else', 'lines', 'summary', '=', 'summary', '.', 'replace', '(', "'\\n'", ',', "' '", ')', '.', 'replace', '(', "'\\r'", ',', "''", ')', 'return', 'summary']
Extract the summary from the docstring of the command.
['Extract', 'the', 'summary', 'from', 'the', 'docstring', 'of', 'the', 'command', '.']
train
https://github.com/Microsoft/knack/blob/5f1a480a33f103e2688c46eef59fb2d9eaf2baad/knack/introspection.py#L15-L25
3,403
saltstack/salt
salt/modules/boto_iam.py
list_policies
def list_policies(region=None, key=None, keyid=None, profile=None): ''' List policies. CLI Example: .. code-block:: bash salt myminion boto_iam.list_policies ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: policies = [] for ret in __utils__['boto.paged_call'](conn.list_policies): policies.append(ret.get('list_policies_response', {}).get('list_policies_result', {}).get('policies')) return policies except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to list policy versions.' log.error(msg) return []
python
def list_policies(region=None, key=None, keyid=None, profile=None): ''' List policies. CLI Example: .. code-block:: bash salt myminion boto_iam.list_policies ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: policies = [] for ret in __utils__['boto.paged_call'](conn.list_policies): policies.append(ret.get('list_policies_response', {}).get('list_policies_result', {}).get('policies')) return policies except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to list policy versions.' log.error(msg) return []
['def', 'list_policies', '(', 'region', '=', 'None', ',', 'key', '=', 'None', ',', 'keyid', '=', 'None', ',', 'profile', '=', 'None', ')', ':', 'conn', '=', '_get_conn', '(', 'region', '=', 'region', ',', 'key', '=', 'key', ',', 'keyid', '=', 'keyid', ',', 'profile', '=', 'profile', ')', 'try', ':', 'policies', '=', '[', ']', 'for', 'ret', 'in', '__utils__', '[', "'boto.paged_call'", ']', '(', 'conn', '.', 'list_policies', ')', ':', 'policies', '.', 'append', '(', 'ret', '.', 'get', '(', "'list_policies_response'", ',', '{', '}', ')', '.', 'get', '(', "'list_policies_result'", ',', '{', '}', ')', '.', 'get', '(', "'policies'", ')', ')', 'return', 'policies', 'except', 'boto', '.', 'exception', '.', 'BotoServerError', 'as', 'e', ':', 'log', '.', 'debug', '(', 'e', ')', 'msg', '=', "'Failed to list policy versions.'", 'log', '.', 'error', '(', 'msg', ')', 'return', '[', ']']
List policies. CLI Example: .. code-block:: bash salt myminion boto_iam.list_policies
['List', 'policies', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_iam.py#L1806-L1827
3,404
saltstack/salt
salt/cli/support/__init__.py
_render_profile
def _render_profile(path, caller, runner): ''' Render profile as Jinja2. :param path: :return: ''' env = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(path)), trim_blocks=False) return env.get_template(os.path.basename(path)).render(salt=caller, runners=runner).strip()
python
def _render_profile(path, caller, runner): ''' Render profile as Jinja2. :param path: :return: ''' env = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(path)), trim_blocks=False) return env.get_template(os.path.basename(path)).render(salt=caller, runners=runner).strip()
['def', '_render_profile', '(', 'path', ',', 'caller', ',', 'runner', ')', ':', 'env', '=', 'jinja2', '.', 'Environment', '(', 'loader', '=', 'jinja2', '.', 'FileSystemLoader', '(', 'os', '.', 'path', '.', 'dirname', '(', 'path', ')', ')', ',', 'trim_blocks', '=', 'False', ')', 'return', 'env', '.', 'get_template', '(', 'os', '.', 'path', '.', 'basename', '(', 'path', ')', ')', '.', 'render', '(', 'salt', '=', 'caller', ',', 'runners', '=', 'runner', ')', '.', 'strip', '(', ')']
Render profile as Jinja2. :param path: :return:
['Render', 'profile', 'as', 'Jinja2', '.', ':', 'param', 'path', ':', ':', 'return', ':']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/support/__init__.py#L15-L22
3,405
marcotcr/lime
lime/lime_tabular.py
TableDomainMapper.map_exp_ids
def map_exp_ids(self, exp): """Maps ids to feature names. Args: exp: list of tuples [(id, weight), (id,weight)] Returns: list of tuples (feature_name, weight) """ names = self.exp_feature_names if self.discretized_feature_names is not None: names = self.discretized_feature_names return [(names[x[0]], x[1]) for x in exp]
python
def map_exp_ids(self, exp): """Maps ids to feature names. Args: exp: list of tuples [(id, weight), (id,weight)] Returns: list of tuples (feature_name, weight) """ names = self.exp_feature_names if self.discretized_feature_names is not None: names = self.discretized_feature_names return [(names[x[0]], x[1]) for x in exp]
['def', 'map_exp_ids', '(', 'self', ',', 'exp', ')', ':', 'names', '=', 'self', '.', 'exp_feature_names', 'if', 'self', '.', 'discretized_feature_names', 'is', 'not', 'None', ':', 'names', '=', 'self', '.', 'discretized_feature_names', 'return', '[', '(', 'names', '[', 'x', '[', '0', ']', ']', ',', 'x', '[', '1', ']', ')', 'for', 'x', 'in', 'exp', ']']
Maps ids to feature names. Args: exp: list of tuples [(id, weight), (id,weight)] Returns: list of tuples (feature_name, weight)
['Maps', 'ids', 'to', 'feature', 'names', '.']
train
https://github.com/marcotcr/lime/blob/08133d47df00ed918e22005e0c98f6eefd5a1d71/lime/lime_tabular.py#L45-L57
3,406
resync/resync
resync/resource_set.py
ResourceSet.add
def add(self, resource, replace=False): """Add just a single resource.""" uri = resource.uri if (uri in self and not replace): raise ResourceSetDupeError( "Attempt to add resource already in this set") self[uri] = resource
python
def add(self, resource, replace=False): """Add just a single resource.""" uri = resource.uri if (uri in self and not replace): raise ResourceSetDupeError( "Attempt to add resource already in this set") self[uri] = resource
['def', 'add', '(', 'self', ',', 'resource', ',', 'replace', '=', 'False', ')', ':', 'uri', '=', 'resource', '.', 'uri', 'if', '(', 'uri', 'in', 'self', 'and', 'not', 'replace', ')', ':', 'raise', 'ResourceSetDupeError', '(', '"Attempt to add resource already in this set"', ')', 'self', '[', 'uri', ']', '=', 'resource']
Add just a single resource.
['Add', 'just', 'a', 'single', 'resource', '.']
train
https://github.com/resync/resync/blob/98292c17b2c00f2d6f5191c6ab51fef8c292a018/resync/resource_set.py#L30-L36
3,407
diffeo/rejester
rejester/_registry.py
Registry.popitem_move
def popitem_move(self, from_dict, to_dict, priority_min='-inf', priority_max='+inf'): '''Select an item and move it to another dictionary. The item comes from `from_dict`, and has the lowest score at least `priority_min` and at most `priority_max`. If some item is found, remove it from `from_dict`, add it to `to_dict`, and return it. This runs as a single atomic operation but still requires a session lock. :param str from_dict: source dictionary :param str to_dict: destination dictionary :param float priority_min: lowest score :param float priority_max: highest score :return: pair of (key, value) if an item was moved, or :const:`None` ''' if self._session_lock_identifier is None: raise ProgrammerError('must acquire lock first') conn = redis.Redis(connection_pool=self.pool) script = conn.register_script(''' if redis.call("get", KEYS[1]) == ARGV[1] then -- find the next key and priority local next_items = redis.call("zrangebyscore", KEYS[3], ARGV[2], ARGV[3], "WITHSCORES", "LIMIT", 0, 1) local next_key = next_items[1] local next_priority = next_items[2] if not next_key then return {} end -- remove next item of from_dict redis.call("zrem", KEYS[3], next_key) local next_val = redis.call("hget", KEYS[2], next_key) -- zrem removed it from list, so also remove from hash redis.call("hdel", KEYS[2], next_key) -- put it in to_dict redis.call("hset", KEYS[4], next_key, next_val) redis.call("zadd", KEYS[5], next_priority, next_key) return {next_key, next_val, next_priority} else -- ERROR: No longer own the lock return -1 end ''') key_value = script(keys=[self._lock_name, self._namespace(from_dict), self._namespace(from_dict) + 'keys', self._namespace(to_dict), self._namespace(to_dict) + 'keys'], args=[self._session_lock_identifier, priority_min, priority_max]) if key_value == []: return None if None in key_value or key_value == -1: raise KeyError( 'Registry.popitem_move(%r, %r) --> %r' % (from_dict, to_dict, key_value)) return self._decode(key_value[0]), self._decode(key_value[1])
python
def popitem_move(self, from_dict, to_dict, priority_min='-inf', priority_max='+inf'): '''Select an item and move it to another dictionary. The item comes from `from_dict`, and has the lowest score at least `priority_min` and at most `priority_max`. If some item is found, remove it from `from_dict`, add it to `to_dict`, and return it. This runs as a single atomic operation but still requires a session lock. :param str from_dict: source dictionary :param str to_dict: destination dictionary :param float priority_min: lowest score :param float priority_max: highest score :return: pair of (key, value) if an item was moved, or :const:`None` ''' if self._session_lock_identifier is None: raise ProgrammerError('must acquire lock first') conn = redis.Redis(connection_pool=self.pool) script = conn.register_script(''' if redis.call("get", KEYS[1]) == ARGV[1] then -- find the next key and priority local next_items = redis.call("zrangebyscore", KEYS[3], ARGV[2], ARGV[3], "WITHSCORES", "LIMIT", 0, 1) local next_key = next_items[1] local next_priority = next_items[2] if not next_key then return {} end -- remove next item of from_dict redis.call("zrem", KEYS[3], next_key) local next_val = redis.call("hget", KEYS[2], next_key) -- zrem removed it from list, so also remove from hash redis.call("hdel", KEYS[2], next_key) -- put it in to_dict redis.call("hset", KEYS[4], next_key, next_val) redis.call("zadd", KEYS[5], next_priority, next_key) return {next_key, next_val, next_priority} else -- ERROR: No longer own the lock return -1 end ''') key_value = script(keys=[self._lock_name, self._namespace(from_dict), self._namespace(from_dict) + 'keys', self._namespace(to_dict), self._namespace(to_dict) + 'keys'], args=[self._session_lock_identifier, priority_min, priority_max]) if key_value == []: return None if None in key_value or key_value == -1: raise KeyError( 'Registry.popitem_move(%r, %r) --> %r' % (from_dict, to_dict, key_value)) return self._decode(key_value[0]), self._decode(key_value[1])
['def', 'popitem_move', '(', 'self', ',', 'from_dict', ',', 'to_dict', ',', 'priority_min', '=', "'-inf'", ',', 'priority_max', '=', "'+inf'", ')', ':', 'if', 'self', '.', '_session_lock_identifier', 'is', 'None', ':', 'raise', 'ProgrammerError', '(', "'must acquire lock first'", ')', 'conn', '=', 'redis', '.', 'Redis', '(', 'connection_pool', '=', 'self', '.', 'pool', ')', 'script', '=', 'conn', '.', 'register_script', '(', '\'\'\'\n if redis.call("get", KEYS[1]) == ARGV[1]\n then\n -- find the next key and priority\n local next_items = redis.call("zrangebyscore", KEYS[3],\n ARGV[2], ARGV[3], "WITHSCORES", "LIMIT", 0, 1)\n local next_key = next_items[1]\n local next_priority = next_items[2]\n \n if not next_key then\n return {}\n end\n\n -- remove next item of from_dict\n redis.call("zrem", KEYS[3], next_key)\n\n local next_val = redis.call("hget", KEYS[2], next_key)\n -- zrem removed it from list, so also remove from hash\n redis.call("hdel", KEYS[2], next_key)\n\n -- put it in to_dict\n redis.call("hset", KEYS[4], next_key, next_val)\n redis.call("zadd", KEYS[5], next_priority, next_key)\n\n return {next_key, next_val, next_priority}\n else\n -- ERROR: No longer own the lock\n return -1\n end\n \'\'\'', ')', 'key_value', '=', 'script', '(', 'keys', '=', '[', 'self', '.', '_lock_name', ',', 'self', '.', '_namespace', '(', 'from_dict', ')', ',', 'self', '.', '_namespace', '(', 'from_dict', ')', '+', "'keys'", ',', 'self', '.', '_namespace', '(', 'to_dict', ')', ',', 'self', '.', '_namespace', '(', 'to_dict', ')', '+', "'keys'", ']', ',', 'args', '=', '[', 'self', '.', '_session_lock_identifier', ',', 'priority_min', ',', 'priority_max', ']', ')', 'if', 'key_value', '==', '[', ']', ':', 'return', 'None', 'if', 'None', 'in', 'key_value', 'or', 'key_value', '==', '-', '1', ':', 'raise', 'KeyError', '(', "'Registry.popitem_move(%r, %r) --> %r'", '%', '(', 'from_dict', ',', 'to_dict', ',', 'key_value', ')', ')', 'return', 'self', '.', '_decode', '(', 'key_value', '[', '0', ']', ')', ',', 'self', '.', '_decode', '(', 'key_value', '[', '1', ']', ')']
Select an item and move it to another dictionary. The item comes from `from_dict`, and has the lowest score at least `priority_min` and at most `priority_max`. If some item is found, remove it from `from_dict`, add it to `to_dict`, and return it. This runs as a single atomic operation but still requires a session lock. :param str from_dict: source dictionary :param str to_dict: destination dictionary :param float priority_min: lowest score :param float priority_max: highest score :return: pair of (key, value) if an item was moved, or :const:`None`
['Select', 'an', 'item', 'and', 'move', 'it', 'to', 'another', 'dictionary', '.']
train
https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/_registry.py#L581-L650
3,408
mitsei/dlkit
dlkit/handcar/learning/sessions.py
ObjectiveBankAdminSession._record_extension
def _record_extension(self, bank_id, key, value): """ To structure a record extension property bean """ record_bean = { 'value': value, 'displayName': self._text_bean(key), 'description': self._text_bean(key), 'displayLabel': self._text_bean(key), 'associatedId': str(bank_id) } return record_bean
python
def _record_extension(self, bank_id, key, value): """ To structure a record extension property bean """ record_bean = { 'value': value, 'displayName': self._text_bean(key), 'description': self._text_bean(key), 'displayLabel': self._text_bean(key), 'associatedId': str(bank_id) } return record_bean
['def', '_record_extension', '(', 'self', ',', 'bank_id', ',', 'key', ',', 'value', ')', ':', 'record_bean', '=', '{', "'value'", ':', 'value', ',', "'displayName'", ':', 'self', '.', '_text_bean', '(', 'key', ')', ',', "'description'", ':', 'self', '.', '_text_bean', '(', 'key', ')', ',', "'displayLabel'", ':', 'self', '.', '_text_bean', '(', 'key', ')', ',', "'associatedId'", ':', 'str', '(', 'bank_id', ')', '}', 'return', 'record_bean']
To structure a record extension property bean
['To', 'structure', 'a', 'record', 'extension', 'property', 'bean']
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/learning/sessions.py#L3667-L3678
3,409
dls-controls/pymalcolm
malcolm/core/serializable.py
camel_to_title
def camel_to_title(name): """Takes a camelCaseFieldName and returns an Title Case Field Name Args: name (str): E.g. camelCaseFieldName Returns: str: Title Case converted name. E.g. Camel Case Field Name """ split = re.findall(r"[A-Z]?[a-z0-9]+|[A-Z]+(?=[A-Z]|$)", name) ret = " ".join(split) ret = ret[0].upper() + ret[1:] return ret
python
def camel_to_title(name): """Takes a camelCaseFieldName and returns an Title Case Field Name Args: name (str): E.g. camelCaseFieldName Returns: str: Title Case converted name. E.g. Camel Case Field Name """ split = re.findall(r"[A-Z]?[a-z0-9]+|[A-Z]+(?=[A-Z]|$)", name) ret = " ".join(split) ret = ret[0].upper() + ret[1:] return ret
['def', 'camel_to_title', '(', 'name', ')', ':', 'split', '=', 're', '.', 'findall', '(', 'r"[A-Z]?[a-z0-9]+|[A-Z]+(?=[A-Z]|$)"', ',', 'name', ')', 'ret', '=', '" "', '.', 'join', '(', 'split', ')', 'ret', '=', 'ret', '[', '0', ']', '.', 'upper', '(', ')', '+', 'ret', '[', '1', ':', ']', 'return', 'ret']
Takes a camelCaseFieldName and returns an Title Case Field Name Args: name (str): E.g. camelCaseFieldName Returns: str: Title Case converted name. E.g. Camel Case Field Name
['Takes', 'a', 'camelCaseFieldName', 'and', 'returns', 'an', 'Title', 'Case', 'Field', 'Name']
train
https://github.com/dls-controls/pymalcolm/blob/80ea667e4da26365a6cebc0249f52fdc744bd983/malcolm/core/serializable.py#L55-L67
3,410
QualiSystems/vCenterShell
package/cloudshell/cp/vcenter/commands/command_orchestrator.py
CommandOrchestrator._parse_remote_model
def _parse_remote_model(self, context): """ parse the remote resource model and adds its full name :type context: models.QualiDriverModels.ResourceRemoteCommandContext """ if not context.remote_endpoints: raise Exception('no remote resources found in context: {0}', jsonpickle.encode(context, unpicklable=False)) resource = context.remote_endpoints[0] dictionary = jsonpickle.decode(resource.app_context.deployed_app_json) holder = DeployDataHolder(dictionary) app_resource_detail = GenericDeployedAppResourceModel() app_resource_detail.vm_uuid = holder.vmdetails.uid app_resource_detail.cloud_provider = context.resource.fullname app_resource_detail.fullname = resource.fullname if hasattr(holder.vmdetails, 'vmCustomParams'): app_resource_detail.vm_custom_params = holder.vmdetails.vmCustomParams return app_resource_detail
python
def _parse_remote_model(self, context): """ parse the remote resource model and adds its full name :type context: models.QualiDriverModels.ResourceRemoteCommandContext """ if not context.remote_endpoints: raise Exception('no remote resources found in context: {0}', jsonpickle.encode(context, unpicklable=False)) resource = context.remote_endpoints[0] dictionary = jsonpickle.decode(resource.app_context.deployed_app_json) holder = DeployDataHolder(dictionary) app_resource_detail = GenericDeployedAppResourceModel() app_resource_detail.vm_uuid = holder.vmdetails.uid app_resource_detail.cloud_provider = context.resource.fullname app_resource_detail.fullname = resource.fullname if hasattr(holder.vmdetails, 'vmCustomParams'): app_resource_detail.vm_custom_params = holder.vmdetails.vmCustomParams return app_resource_detail
['def', '_parse_remote_model', '(', 'self', ',', 'context', ')', ':', 'if', 'not', 'context', '.', 'remote_endpoints', ':', 'raise', 'Exception', '(', "'no remote resources found in context: {0}'", ',', 'jsonpickle', '.', 'encode', '(', 'context', ',', 'unpicklable', '=', 'False', ')', ')', 'resource', '=', 'context', '.', 'remote_endpoints', '[', '0', ']', 'dictionary', '=', 'jsonpickle', '.', 'decode', '(', 'resource', '.', 'app_context', '.', 'deployed_app_json', ')', 'holder', '=', 'DeployDataHolder', '(', 'dictionary', ')', 'app_resource_detail', '=', 'GenericDeployedAppResourceModel', '(', ')', 'app_resource_detail', '.', 'vm_uuid', '=', 'holder', '.', 'vmdetails', '.', 'uid', 'app_resource_detail', '.', 'cloud_provider', '=', 'context', '.', 'resource', '.', 'fullname', 'app_resource_detail', '.', 'fullname', '=', 'resource', '.', 'fullname', 'if', 'hasattr', '(', 'holder', '.', 'vmdetails', ',', "'vmCustomParams'", ')', ':', 'app_resource_detail', '.', 'vm_custom_params', '=', 'holder', '.', 'vmdetails', '.', 'vmCustomParams', 'return', 'app_resource_detail']
parse the remote resource model and adds its full name :type context: models.QualiDriverModels.ResourceRemoteCommandContext
['parse', 'the', 'remote', 'resource', 'model', 'and', 'adds', 'its', 'full', 'name', ':', 'type', 'context', ':', 'models', '.', 'QualiDriverModels', '.', 'ResourceRemoteCommandContext']
train
https://github.com/QualiSystems/vCenterShell/blob/e2e24cd938a92a68f4a8e6a860810d3ef72aae6d/package/cloudshell/cp/vcenter/commands/command_orchestrator.py#L432-L450
3,411
cbrand/vpnchooser
src/vpnchooser/resources/user.py
UserResource.delete
def delete(self, user_name: str): """ Deletes the resource with the given name. """ user = self._get_or_abort(user_name) session.delete(user) session.commit() return '', 204
python
def delete(self, user_name: str): """ Deletes the resource with the given name. """ user = self._get_or_abort(user_name) session.delete(user) session.commit() return '', 204
['def', 'delete', '(', 'self', ',', 'user_name', ':', 'str', ')', ':', 'user', '=', 'self', '.', '_get_or_abort', '(', 'user_name', ')', 'session', '.', 'delete', '(', 'user', ')', 'session', '.', 'commit', '(', ')', 'return', "''", ',', '204']
Deletes the resource with the given name.
['Deletes', 'the', 'resource', 'with', 'the', 'given', 'name', '.']
train
https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/resources/user.py#L111-L118
3,412
nuagenetworks/bambou
bambou/nurest_fetcher.py
NURESTFetcher._send_content
def _send_content(self, content, connection): """ Send a content array from the connection """ if connection: if connection.async: callback = connection.callbacks['remote'] if callback: callback(self, self.parent_object, content) self.current_connection.reset() self.current_connection = None else: return (self, self.parent_object, content)
python
def _send_content(self, content, connection): """ Send a content array from the connection """ if connection: if connection.async: callback = connection.callbacks['remote'] if callback: callback(self, self.parent_object, content) self.current_connection.reset() self.current_connection = None else: return (self, self.parent_object, content)
['def', '_send_content', '(', 'self', ',', 'content', ',', 'connection', ')', ':', 'if', 'connection', ':', 'if', 'connection', '.', 'async', ':', 'callback', '=', 'connection', '.', 'callbacks', '[', "'remote'", ']', 'if', 'callback', ':', 'callback', '(', 'self', ',', 'self', '.', 'parent_object', ',', 'content', ')', 'self', '.', 'current_connection', '.', 'reset', '(', ')', 'self', '.', 'current_connection', '=', 'None', 'else', ':', 'return', '(', 'self', ',', 'self', '.', 'parent_object', ',', 'content', ')']
Send a content array from the connection
['Send', 'a', 'content', 'array', 'from', 'the', 'connection']
train
https://github.com/nuagenetworks/bambou/blob/d334fea23e384d3df8e552fe1849ad707941c666/bambou/nurest_fetcher.py#L481-L495
3,413
akfullfo/taskforce
taskforce/task.py
legion.task_del
def task_del(self, t): """ Remove a task in this legion. If the task has active processes, an attempt is made to stop them before the task is deleted. """ name = t._name if name in self._tasknames: del self._tasknames[name] self._tasks.discard(t) self._tasks_scoped.discard(t) try: t.stop() except: log = self._params.get('log', self._discard) log.error("Failed to stop processes for task %r -- %s", name, e, exc_info=log.isEnabledFor(logging.DEBUG)) for pid in t.get_pids(): self.proc_del(pid)
python
def task_del(self, t): """ Remove a task in this legion. If the task has active processes, an attempt is made to stop them before the task is deleted. """ name = t._name if name in self._tasknames: del self._tasknames[name] self._tasks.discard(t) self._tasks_scoped.discard(t) try: t.stop() except: log = self._params.get('log', self._discard) log.error("Failed to stop processes for task %r -- %s", name, e, exc_info=log.isEnabledFor(logging.DEBUG)) for pid in t.get_pids(): self.proc_del(pid)
['def', 'task_del', '(', 'self', ',', 't', ')', ':', 'name', '=', 't', '.', '_name', 'if', 'name', 'in', 'self', '.', '_tasknames', ':', 'del', 'self', '.', '_tasknames', '[', 'name', ']', 'self', '.', '_tasks', '.', 'discard', '(', 't', ')', 'self', '.', '_tasks_scoped', '.', 'discard', '(', 't', ')', 'try', ':', 't', '.', 'stop', '(', ')', 'except', ':', 'log', '=', 'self', '.', '_params', '.', 'get', '(', "'log'", ',', 'self', '.', '_discard', ')', 'log', '.', 'error', '(', '"Failed to stop processes for task %r -- %s"', ',', 'name', ',', 'e', ',', 'exc_info', '=', 'log', '.', 'isEnabledFor', '(', 'logging', '.', 'DEBUG', ')', ')', 'for', 'pid', 'in', 't', '.', 'get_pids', '(', ')', ':', 'self', '.', 'proc_del', '(', 'pid', ')']
Remove a task in this legion. If the task has active processes, an attempt is made to stop them before the task is deleted.
['Remove', 'a', 'task', 'in', 'this', 'legion', '.', 'If', 'the', 'task', 'has', 'active', 'processes', 'an', 'attempt', 'is', 'made', 'to', 'stop', 'them', 'before', 'the', 'task', 'is', 'deleted', '.']
train
https://github.com/akfullfo/taskforce/blob/bc6dd744bd33546447d085dbd18a350532220193/taskforce/task.py#L1233-L1250
3,414
avelkoski/FRB
fred/clients/releases.py
ReleasesClient.all_releases
def all_releases(self,response_type=None,params=None): """ Function to request all releases of economic data. `<https://research.stlouisfed.org/docs/api/fred/releases.html>`_ :arg str response_type: File extension of response. Options are 'xml', 'json', 'dict','df','numpy','csv','tab,'pipe'. Required. :arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD" :arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD" :arg int limit: The maximum number of results to return. Options 1 to 1000 :arg int offset: Data offset. Options >=0 :arg str order_by: Order results by values of the specified attribute. Options are 'release_id', 'name', 'press_release', 'realtime_start', 'realtime_end' :arg str sort_order: Sort results for attribute values specified by order_by. Options are 'asc','desc' :arg bool ssl_verify: To verify HTTPs. """ path='/releases?' response_type = response_type if response_type else self.response_type if response_type != 'xml': params['file_type'] = 'json' response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify) return response
python
def all_releases(self,response_type=None,params=None): """ Function to request all releases of economic data. `<https://research.stlouisfed.org/docs/api/fred/releases.html>`_ :arg str response_type: File extension of response. Options are 'xml', 'json', 'dict','df','numpy','csv','tab,'pipe'. Required. :arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD" :arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD" :arg int limit: The maximum number of results to return. Options 1 to 1000 :arg int offset: Data offset. Options >=0 :arg str order_by: Order results by values of the specified attribute. Options are 'release_id', 'name', 'press_release', 'realtime_start', 'realtime_end' :arg str sort_order: Sort results for attribute values specified by order_by. Options are 'asc','desc' :arg bool ssl_verify: To verify HTTPs. """ path='/releases?' response_type = response_type if response_type else self.response_type if response_type != 'xml': params['file_type'] = 'json' response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify) return response
['def', 'all_releases', '(', 'self', ',', 'response_type', '=', 'None', ',', 'params', '=', 'None', ')', ':', 'path', '=', "'/releases?'", 'response_type', '=', 'response_type', 'if', 'response_type', 'else', 'self', '.', 'response_type', 'if', 'response_type', '!=', "'xml'", ':', 'params', '[', "'file_type'", ']', '=', "'json'", 'response', '=', '_get_request', '(', 'self', '.', 'url_root', ',', 'self', '.', 'api_key', ',', 'path', ',', 'response_type', ',', 'params', ',', 'self', '.', 'ssl_verify', ')', 'return', 'response']
Function to request all releases of economic data. `<https://research.stlouisfed.org/docs/api/fred/releases.html>`_ :arg str response_type: File extension of response. Options are 'xml', 'json', 'dict','df','numpy','csv','tab,'pipe'. Required. :arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD" :arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD" :arg int limit: The maximum number of results to return. Options 1 to 1000 :arg int offset: Data offset. Options >=0 :arg str order_by: Order results by values of the specified attribute. Options are 'release_id', 'name', 'press_release', 'realtime_start', 'realtime_end' :arg str sort_order: Sort results for attribute values specified by order_by. Options are 'asc','desc' :arg bool ssl_verify: To verify HTTPs.
['Function', 'to', 'request', 'all', 'releases', 'of', 'economic', 'data', '.', '<https', ':', '//', 'research', '.', 'stlouisfed', '.', 'org', '/', 'docs', '/', 'api', '/', 'fred', '/', 'releases', '.', 'html', '>', '_']
train
https://github.com/avelkoski/FRB/blob/692bcf576e17bd1a81db2b7644f4f61aeb39e5c7/fred/clients/releases.py#L12-L32
3,415
StanfordVL/robosuite
robosuite/environments/baxter.py
BaxterEnv._get_observation
def _get_observation(self): """ Returns an OrderedDict containing observations [(name_string, np.array), ...]. Important keys: robot-state: contains robot-centric information. """ di = super()._get_observation() # proprioceptive features di["joint_pos"] = np.array( [self.sim.data.qpos[x] for x in self._ref_joint_pos_indexes] ) di["joint_vel"] = np.array( [self.sim.data.qvel[x] for x in self._ref_joint_vel_indexes] ) robot_states = [ np.sin(di["joint_pos"]), np.cos(di["joint_pos"]), di["joint_vel"], ] if self.has_gripper_right: di["right_gripper_qpos"] = np.array( [ self.sim.data.qpos[x] for x in self._ref_gripper_right_joint_pos_indexes ] ) di["right_gripper_qvel"] = np.array( [ self.sim.data.qvel[x] for x in self._ref_gripper_right_joint_vel_indexes ] ) di["right_eef_pos"] = self.sim.data.site_xpos[self.right_eef_site_id] di["right_eef_quat"] = T.convert_quat( self.sim.data.get_body_xquat("right_hand"), to="xyzw" ) robot_states.extend( [di["right_gripper_qpos"], di["right_eef_pos"], di["right_eef_quat"]] ) if self.has_gripper_left: di["left_gripper_qpos"] = np.array( [ self.sim.data.qpos[x] for x in self._ref_gripper_left_joint_pos_indexes ] ) di["left_gripper_qvel"] = np.array( [ self.sim.data.qvel[x] for x in self._ref_gripper_left_joint_vel_indexes ] ) di["left_eef_pos"] = self.sim.data.site_xpos[self.left_eef_site_id] di["left_eef_quat"] = T.convert_quat( self.sim.data.get_body_xquat("left_hand"), to="xyzw" ) robot_states.extend( [di["left_gripper_qpos"], di["left_eef_pos"], di["left_eef_quat"]] ) di["robot-state"] = np.concatenate(robot_states) return di
python
def _get_observation(self): """ Returns an OrderedDict containing observations [(name_string, np.array), ...]. Important keys: robot-state: contains robot-centric information. """ di = super()._get_observation() # proprioceptive features di["joint_pos"] = np.array( [self.sim.data.qpos[x] for x in self._ref_joint_pos_indexes] ) di["joint_vel"] = np.array( [self.sim.data.qvel[x] for x in self._ref_joint_vel_indexes] ) robot_states = [ np.sin(di["joint_pos"]), np.cos(di["joint_pos"]), di["joint_vel"], ] if self.has_gripper_right: di["right_gripper_qpos"] = np.array( [ self.sim.data.qpos[x] for x in self._ref_gripper_right_joint_pos_indexes ] ) di["right_gripper_qvel"] = np.array( [ self.sim.data.qvel[x] for x in self._ref_gripper_right_joint_vel_indexes ] ) di["right_eef_pos"] = self.sim.data.site_xpos[self.right_eef_site_id] di["right_eef_quat"] = T.convert_quat( self.sim.data.get_body_xquat("right_hand"), to="xyzw" ) robot_states.extend( [di["right_gripper_qpos"], di["right_eef_pos"], di["right_eef_quat"]] ) if self.has_gripper_left: di["left_gripper_qpos"] = np.array( [ self.sim.data.qpos[x] for x in self._ref_gripper_left_joint_pos_indexes ] ) di["left_gripper_qvel"] = np.array( [ self.sim.data.qvel[x] for x in self._ref_gripper_left_joint_vel_indexes ] ) di["left_eef_pos"] = self.sim.data.site_xpos[self.left_eef_site_id] di["left_eef_quat"] = T.convert_quat( self.sim.data.get_body_xquat("left_hand"), to="xyzw" ) robot_states.extend( [di["left_gripper_qpos"], di["left_eef_pos"], di["left_eef_quat"]] ) di["robot-state"] = np.concatenate(robot_states) return di
['def', '_get_observation', '(', 'self', ')', ':', 'di', '=', 'super', '(', ')', '.', '_get_observation', '(', ')', '# proprioceptive features', 'di', '[', '"joint_pos"', ']', '=', 'np', '.', 'array', '(', '[', 'self', '.', 'sim', '.', 'data', '.', 'qpos', '[', 'x', ']', 'for', 'x', 'in', 'self', '.', '_ref_joint_pos_indexes', ']', ')', 'di', '[', '"joint_vel"', ']', '=', 'np', '.', 'array', '(', '[', 'self', '.', 'sim', '.', 'data', '.', 'qvel', '[', 'x', ']', 'for', 'x', 'in', 'self', '.', '_ref_joint_vel_indexes', ']', ')', 'robot_states', '=', '[', 'np', '.', 'sin', '(', 'di', '[', '"joint_pos"', ']', ')', ',', 'np', '.', 'cos', '(', 'di', '[', '"joint_pos"', ']', ')', ',', 'di', '[', '"joint_vel"', ']', ',', ']', 'if', 'self', '.', 'has_gripper_right', ':', 'di', '[', '"right_gripper_qpos"', ']', '=', 'np', '.', 'array', '(', '[', 'self', '.', 'sim', '.', 'data', '.', 'qpos', '[', 'x', ']', 'for', 'x', 'in', 'self', '.', '_ref_gripper_right_joint_pos_indexes', ']', ')', 'di', '[', '"right_gripper_qvel"', ']', '=', 'np', '.', 'array', '(', '[', 'self', '.', 'sim', '.', 'data', '.', 'qvel', '[', 'x', ']', 'for', 'x', 'in', 'self', '.', '_ref_gripper_right_joint_vel_indexes', ']', ')', 'di', '[', '"right_eef_pos"', ']', '=', 'self', '.', 'sim', '.', 'data', '.', 'site_xpos', '[', 'self', '.', 'right_eef_site_id', ']', 'di', '[', '"right_eef_quat"', ']', '=', 'T', '.', 'convert_quat', '(', 'self', '.', 'sim', '.', 'data', '.', 'get_body_xquat', '(', '"right_hand"', ')', ',', 'to', '=', '"xyzw"', ')', 'robot_states', '.', 'extend', '(', '[', 'di', '[', '"right_gripper_qpos"', ']', ',', 'di', '[', '"right_eef_pos"', ']', ',', 'di', '[', '"right_eef_quat"', ']', ']', ')', 'if', 'self', '.', 'has_gripper_left', ':', 'di', '[', '"left_gripper_qpos"', ']', '=', 'np', '.', 'array', '(', '[', 'self', '.', 'sim', '.', 'data', '.', 'qpos', '[', 'x', ']', 'for', 'x', 'in', 'self', '.', '_ref_gripper_left_joint_pos_indexes', ']', ')', 'di', '[', '"left_gripper_qvel"', ']', '=', 'np', '.', 'array', '(', '[', 'self', '.', 'sim', '.', 'data', '.', 'qvel', '[', 'x', ']', 'for', 'x', 'in', 'self', '.', '_ref_gripper_left_joint_vel_indexes', ']', ')', 'di', '[', '"left_eef_pos"', ']', '=', 'self', '.', 'sim', '.', 'data', '.', 'site_xpos', '[', 'self', '.', 'left_eef_site_id', ']', 'di', '[', '"left_eef_quat"', ']', '=', 'T', '.', 'convert_quat', '(', 'self', '.', 'sim', '.', 'data', '.', 'get_body_xquat', '(', '"left_hand"', ')', ',', 'to', '=', '"xyzw"', ')', 'robot_states', '.', 'extend', '(', '[', 'di', '[', '"left_gripper_qpos"', ']', ',', 'di', '[', '"left_eef_pos"', ']', ',', 'di', '[', '"left_eef_quat"', ']', ']', ')', 'di', '[', '"robot-state"', ']', '=', 'np', '.', 'concatenate', '(', 'robot_states', ')', 'return', 'di']
Returns an OrderedDict containing observations [(name_string, np.array), ...]. Important keys: robot-state: contains robot-centric information.
['Returns', 'an', 'OrderedDict', 'containing', 'observations', '[', '(', 'name_string', 'np', '.', 'array', ')', '...', ']', '.', 'Important', 'keys', ':', 'robot', '-', 'state', ':', 'contains', 'robot', '-', 'centric', 'information', '.']
train
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/environments/baxter.py#L248-L312
3,416
fitnr/twitter_bot_utils
twitter_bot_utils/args.py
add_logger
def add_logger(name, level=None, format=None): ''' Set up a stdout logger. Args: name (str): name of the logger level: defaults to logging.INFO format (str): format string for logging output. defaults to ``%(filename)-11s %(lineno)-3d: %(message)s``. Returns: The logger object. ''' format = format or '%(filename)-11s %(lineno)-3d: %(message)s' log = logging.getLogger(name) # Set logging level. log.setLevel(level or logging.INFO) ch = logging.StreamHandler(sys.stdout) ch.setFormatter(logging.Formatter(format)) log.addHandler(ch) return log
python
def add_logger(name, level=None, format=None): ''' Set up a stdout logger. Args: name (str): name of the logger level: defaults to logging.INFO format (str): format string for logging output. defaults to ``%(filename)-11s %(lineno)-3d: %(message)s``. Returns: The logger object. ''' format = format or '%(filename)-11s %(lineno)-3d: %(message)s' log = logging.getLogger(name) # Set logging level. log.setLevel(level or logging.INFO) ch = logging.StreamHandler(sys.stdout) ch.setFormatter(logging.Formatter(format)) log.addHandler(ch) return log
['def', 'add_logger', '(', 'name', ',', 'level', '=', 'None', ',', 'format', '=', 'None', ')', ':', 'format', '=', 'format', 'or', "'%(filename)-11s %(lineno)-3d: %(message)s'", 'log', '=', 'logging', '.', 'getLogger', '(', 'name', ')', '# Set logging level.', 'log', '.', 'setLevel', '(', 'level', 'or', 'logging', '.', 'INFO', ')', 'ch', '=', 'logging', '.', 'StreamHandler', '(', 'sys', '.', 'stdout', ')', 'ch', '.', 'setFormatter', '(', 'logging', '.', 'Formatter', '(', 'format', ')', ')', 'log', '.', 'addHandler', '(', 'ch', ')', 'return', 'log']
Set up a stdout logger. Args: name (str): name of the logger level: defaults to logging.INFO format (str): format string for logging output. defaults to ``%(filename)-11s %(lineno)-3d: %(message)s``. Returns: The logger object.
['Set', 'up', 'a', 'stdout', 'logger', '.']
train
https://github.com/fitnr/twitter_bot_utils/blob/21f35afa5048cd3efa54db8cb87d405f69a78a62/twitter_bot_utils/args.py#L69-L92
3,417
saltstack/salt
salt/utils/user.py
get_uid
def get_uid(user=None): ''' Get the uid for a given user name. If no user given, the current euid will be returned. If the user does not exist, None will be returned. On systems which do not support pwd or os.geteuid, None will be returned. ''' if not HAS_PWD: return None elif user is None: try: return os.geteuid() except AttributeError: return None else: try: return pwd.getpwnam(user).pw_uid except KeyError: return None
python
def get_uid(user=None): ''' Get the uid for a given user name. If no user given, the current euid will be returned. If the user does not exist, None will be returned. On systems which do not support pwd or os.geteuid, None will be returned. ''' if not HAS_PWD: return None elif user is None: try: return os.geteuid() except AttributeError: return None else: try: return pwd.getpwnam(user).pw_uid except KeyError: return None
['def', 'get_uid', '(', 'user', '=', 'None', ')', ':', 'if', 'not', 'HAS_PWD', ':', 'return', 'None', 'elif', 'user', 'is', 'None', ':', 'try', ':', 'return', 'os', '.', 'geteuid', '(', ')', 'except', 'AttributeError', ':', 'return', 'None', 'else', ':', 'try', ':', 'return', 'pwd', '.', 'getpwnam', '(', 'user', ')', '.', 'pw_uid', 'except', 'KeyError', ':', 'return', 'None']
Get the uid for a given user name. If no user given, the current euid will be returned. If the user does not exist, None will be returned. On systems which do not support pwd or os.geteuid, None will be returned.
['Get', 'the', 'uid', 'for', 'a', 'given', 'user', 'name', '.', 'If', 'no', 'user', 'given', 'the', 'current', 'euid', 'will', 'be', 'returned', '.', 'If', 'the', 'user', 'does', 'not', 'exist', 'None', 'will', 'be', 'returned', '.', 'On', 'systems', 'which', 'do', 'not', 'support', 'pwd', 'or', 'os', '.', 'geteuid', 'None', 'will', 'be', 'returned', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/user.py#L69-L86
3,418
saltstack/salt
salt/renderers/gpg.py
_get_key_dir
def _get_key_dir(): ''' return the location of the GPG key directory ''' gpg_keydir = None if 'config.get' in __salt__: gpg_keydir = __salt__['config.get']('gpg_keydir') if not gpg_keydir: gpg_keydir = __opts__.get( 'gpg_keydir', os.path.join( __opts__.get( 'config_dir', os.path.dirname(__opts__['conf_file']), ), 'gpgkeys' )) return gpg_keydir
python
def _get_key_dir(): ''' return the location of the GPG key directory ''' gpg_keydir = None if 'config.get' in __salt__: gpg_keydir = __salt__['config.get']('gpg_keydir') if not gpg_keydir: gpg_keydir = __opts__.get( 'gpg_keydir', os.path.join( __opts__.get( 'config_dir', os.path.dirname(__opts__['conf_file']), ), 'gpgkeys' )) return gpg_keydir
['def', '_get_key_dir', '(', ')', ':', 'gpg_keydir', '=', 'None', 'if', "'config.get'", 'in', '__salt__', ':', 'gpg_keydir', '=', '__salt__', '[', "'config.get'", ']', '(', "'gpg_keydir'", ')', 'if', 'not', 'gpg_keydir', ':', 'gpg_keydir', '=', '__opts__', '.', 'get', '(', "'gpg_keydir'", ',', 'os', '.', 'path', '.', 'join', '(', '__opts__', '.', 'get', '(', "'config_dir'", ',', 'os', '.', 'path', '.', 'dirname', '(', '__opts__', '[', "'conf_file'", ']', ')', ',', ')', ',', "'gpgkeys'", ')', ')', 'return', 'gpg_keydir']
return the location of the GPG key directory
['return', 'the', 'location', 'of', 'the', 'GPG', 'key', 'directory']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/renderers/gpg.py#L307-L326
3,419
shazow/unstdlib.py
unstdlib/standard/string_.py
number_to_string
def number_to_string(n, alphabet): """ Given an non-negative integer ``n``, convert it to a string composed of the given ``alphabet`` mapping, where the position of each element in ``alphabet`` is its radix value. Examples:: >>> number_to_string(12345678, '01') '101111000110000101001110' >>> number_to_string(12345678, 'ab') 'babbbbaaabbaaaababaabbba' >>> number_to_string(12345678, string.ascii_letters + string.digits) 'ZXP0' >>> number_to_string(12345, ['zero ', 'one ', 'two ', 'three ', 'four ', 'five ', 'six ', 'seven ', 'eight ', 'nine ']) 'one two three four five ' """ result = '' base = len(alphabet) current = int(n) if current < 0: raise ValueError("invalid n (must be non-negative): %s", n) while current: result = alphabet[current % base] + result current = current // base return result
python
def number_to_string(n, alphabet): """ Given an non-negative integer ``n``, convert it to a string composed of the given ``alphabet`` mapping, where the position of each element in ``alphabet`` is its radix value. Examples:: >>> number_to_string(12345678, '01') '101111000110000101001110' >>> number_to_string(12345678, 'ab') 'babbbbaaabbaaaababaabbba' >>> number_to_string(12345678, string.ascii_letters + string.digits) 'ZXP0' >>> number_to_string(12345, ['zero ', 'one ', 'two ', 'three ', 'four ', 'five ', 'six ', 'seven ', 'eight ', 'nine ']) 'one two three four five ' """ result = '' base = len(alphabet) current = int(n) if current < 0: raise ValueError("invalid n (must be non-negative): %s", n) while current: result = alphabet[current % base] + result current = current // base return result
['def', 'number_to_string', '(', 'n', ',', 'alphabet', ')', ':', 'result', '=', "''", 'base', '=', 'len', '(', 'alphabet', ')', 'current', '=', 'int', '(', 'n', ')', 'if', 'current', '<', '0', ':', 'raise', 'ValueError', '(', '"invalid n (must be non-negative): %s"', ',', 'n', ')', 'while', 'current', ':', 'result', '=', 'alphabet', '[', 'current', '%', 'base', ']', '+', 'result', 'current', '=', 'current', '//', 'base', 'return', 'result']
Given an non-negative integer ``n``, convert it to a string composed of the given ``alphabet`` mapping, where the position of each element in ``alphabet`` is its radix value. Examples:: >>> number_to_string(12345678, '01') '101111000110000101001110' >>> number_to_string(12345678, 'ab') 'babbbbaaabbaaaababaabbba' >>> number_to_string(12345678, string.ascii_letters + string.digits) 'ZXP0' >>> number_to_string(12345, ['zero ', 'one ', 'two ', 'three ', 'four ', 'five ', 'six ', 'seven ', 'eight ', 'nine ']) 'one two three four five '
['Given', 'an', 'non', '-', 'negative', 'integer', 'n', 'convert', 'it', 'to', 'a', 'string', 'composed', 'of', 'the', 'given', 'alphabet', 'mapping', 'where', 'the', 'position', 'of', 'each', 'element', 'in', 'alphabet', 'is', 'its', 'radix', 'value', '.']
train
https://github.com/shazow/unstdlib.py/blob/e0632fe165cfbfdb5a7e4bc7b412c9d6f2ebad83/unstdlib/standard/string_.py#L53-L83
3,420
Workiva/furious
furious/async.py
Async.context_id
def context_id(self): """Return this Async's Context Id if it exists.""" if not self._context_id: self._context_id = self._get_context_id() self.update_options(context_id=self._context_id) return self._context_id
python
def context_id(self): """Return this Async's Context Id if it exists.""" if not self._context_id: self._context_id = self._get_context_id() self.update_options(context_id=self._context_id) return self._context_id
['def', 'context_id', '(', 'self', ')', ':', 'if', 'not', 'self', '.', '_context_id', ':', 'self', '.', '_context_id', '=', 'self', '.', '_get_context_id', '(', ')', 'self', '.', 'update_options', '(', 'context_id', '=', 'self', '.', '_context_id', ')', 'return', 'self', '.', '_context_id']
Return this Async's Context Id if it exists.
['Return', 'this', 'Async', 's', 'Context', 'Id', 'if', 'it', 'exists', '.']
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/async.py#L515-L521
3,421
dpgaspar/Flask-AppBuilder
flask_appbuilder/cli.py
list_users
def list_users(): """ List all users on the database """ echo_header("List of users") for user in current_app.appbuilder.sm.get_all_users(): click.echo( "username:{0} | email:{1} | role:{2}".format( user.username, user.email, user.roles ) )
python
def list_users(): """ List all users on the database """ echo_header("List of users") for user in current_app.appbuilder.sm.get_all_users(): click.echo( "username:{0} | email:{1} | role:{2}".format( user.username, user.email, user.roles ) )
['def', 'list_users', '(', ')', ':', 'echo_header', '(', '"List of users"', ')', 'for', 'user', 'in', 'current_app', '.', 'appbuilder', '.', 'sm', '.', 'get_all_users', '(', ')', ':', 'click', '.', 'echo', '(', '"username:{0} | email:{1} | role:{2}"', '.', 'format', '(', 'user', '.', 'username', ',', 'user', '.', 'email', ',', 'user', '.', 'roles', ')', ')']
List all users on the database
['List', 'all', 'users', 'on', 'the', 'database']
train
https://github.com/dpgaspar/Flask-AppBuilder/blob/c293734c1b86e176a3ba57ee2deab6676d125576/flask_appbuilder/cli.py#L163-L173
3,422
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ardupilotmega.py
MAVLink.fence_fetch_point_send
def fence_fetch_point_send(self, target_system, target_component, idx, force_mavlink1=False): ''' Request a current fence point from MAV target_system : System ID (uint8_t) target_component : Component ID (uint8_t) idx : point index (first point is 1, 0 is for return point) (uint8_t) ''' return self.send(self.fence_fetch_point_encode(target_system, target_component, idx), force_mavlink1=force_mavlink1)
python
def fence_fetch_point_send(self, target_system, target_component, idx, force_mavlink1=False): ''' Request a current fence point from MAV target_system : System ID (uint8_t) target_component : Component ID (uint8_t) idx : point index (first point is 1, 0 is for return point) (uint8_t) ''' return self.send(self.fence_fetch_point_encode(target_system, target_component, idx), force_mavlink1=force_mavlink1)
['def', 'fence_fetch_point_send', '(', 'self', ',', 'target_system', ',', 'target_component', ',', 'idx', ',', 'force_mavlink1', '=', 'False', ')', ':', 'return', 'self', '.', 'send', '(', 'self', '.', 'fence_fetch_point_encode', '(', 'target_system', ',', 'target_component', ',', 'idx', ')', ',', 'force_mavlink1', '=', 'force_mavlink1', ')']
Request a current fence point from MAV target_system : System ID (uint8_t) target_component : Component ID (uint8_t) idx : point index (first point is 1, 0 is for return point) (uint8_t)
['Request', 'a', 'current', 'fence', 'point', 'from', 'MAV']
train
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ardupilotmega.py#L10005-L10014
3,423
StackStorm/pybind
pybind/nos/v6_0_2f/interface/port_channel/switchport/private_vlan/__init__.py
private_vlan._set_association
def _set_association(self, v, load=False): """ Setter method for association, mapped from YANG variable /interface/port_channel/switchport/private_vlan/association (container) If this variable is read-only (config: false) in the source YANG file, then _set_association is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_association() directly. YANG Description: Association """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=association.association, is_container='container', presence=False, yang_name="association", rest_name="association", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'trunk-association', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_PVLAN_ASSOCIATION'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """association must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=association.association, is_container='container', presence=False, yang_name="association", rest_name="association", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'trunk-association', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_PVLAN_ASSOCIATION'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""", }) self.__association = t if hasattr(self, '_set'): self._set()
python
def _set_association(self, v, load=False): """ Setter method for association, mapped from YANG variable /interface/port_channel/switchport/private_vlan/association (container) If this variable is read-only (config: false) in the source YANG file, then _set_association is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_association() directly. YANG Description: Association """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=association.association, is_container='container', presence=False, yang_name="association", rest_name="association", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'trunk-association', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_PVLAN_ASSOCIATION'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """association must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=association.association, is_container='container', presence=False, yang_name="association", rest_name="association", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'trunk-association', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_PVLAN_ASSOCIATION'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""", }) self.__association = t if hasattr(self, '_set'): self._set()
['def', '_set_association', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'association', '.', 'association', ',', 'is_container', '=', "'container'", ',', 'presence', '=', 'False', ',', 'yang_name', '=', '"association"', ',', 'rest_name', '=', '"association"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'trunk-association'", ',', "u'sort-priority'", ':', "u'RUNNCFG_INTERFACE_LEVEL_PVLAN_ASSOCIATION'", '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-interface'", ',', 'defining_module', '=', "'brocade-interface'", ',', 'yang_type', '=', "'container'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""association must be of a type compatible with container"""', ',', "'defined-type'", ':', '"container"', ',', "'generated-type'", ':', '"""YANGDynClass(base=association.association, is_container=\'container\', presence=False, yang_name="association", rest_name="association", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'trunk-association\', u\'sort-priority\': u\'RUNNCFG_INTERFACE_LEVEL_PVLAN_ASSOCIATION\'}}, namespace=\'urn:brocade.com:mgmt:brocade-interface\', defining_module=\'brocade-interface\', yang_type=\'container\', is_config=True)"""', ',', '}', ')', 'self', '.', '__association', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')']
Setter method for association, mapped from YANG variable /interface/port_channel/switchport/private_vlan/association (container) If this variable is read-only (config: false) in the source YANG file, then _set_association is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_association() directly. YANG Description: Association
['Setter', 'method', 'for', 'association', 'mapped', 'from', 'YANG', 'variable', '/', 'interface', '/', 'port_channel', '/', 'switchport', '/', 'private_vlan', '/', 'association', '(', 'container', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_association', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_association', '()', 'directly', '.']
train
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/interface/port_channel/switchport/private_vlan/__init__.py#L176-L199
3,424
libtcod/python-tcod
tcod/path.py
_get_pathcost_func
def _get_pathcost_func( name: str ) -> Callable[[int, int, int, int, Any], float]: """Return a properly cast PathCostArray callback.""" return ffi.cast( # type: ignore "TCOD_path_func_t", ffi.addressof(lib, name) )
python
def _get_pathcost_func( name: str ) -> Callable[[int, int, int, int, Any], float]: """Return a properly cast PathCostArray callback.""" return ffi.cast( # type: ignore "TCOD_path_func_t", ffi.addressof(lib, name) )
['def', '_get_pathcost_func', '(', 'name', ':', 'str', ')', '->', 'Callable', '[', '[', 'int', ',', 'int', ',', 'int', ',', 'int', ',', 'Any', ']', ',', 'float', ']', ':', 'return', 'ffi', '.', 'cast', '(', '# type: ignore', '"TCOD_path_func_t"', ',', 'ffi', '.', 'addressof', '(', 'lib', ',', 'name', ')', ')']
Return a properly cast PathCostArray callback.
['Return', 'a', 'properly', 'cast', 'PathCostArray', 'callback', '.']
train
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/path.py#L82-L88
3,425
dmcc/PyStanfordDependencies
StanfordDependencies/CoNLL.py
Token.from_conll
def from_conll(this_class, text): """Construct a Token from a line in CoNLL-X format.""" fields = text.split('\t') fields[0] = int(fields[0]) # index fields[6] = int(fields[6]) # head index if fields[5] != '_': # feats fields[5] = tuple(fields[5].split('|')) fields = [value if value != '_' else None for value in fields] fields.append(None) # for extra return this_class(**dict(zip(FIELD_NAMES_PLUS, fields)))
python
def from_conll(this_class, text): """Construct a Token from a line in CoNLL-X format.""" fields = text.split('\t') fields[0] = int(fields[0]) # index fields[6] = int(fields[6]) # head index if fields[5] != '_': # feats fields[5] = tuple(fields[5].split('|')) fields = [value if value != '_' else None for value in fields] fields.append(None) # for extra return this_class(**dict(zip(FIELD_NAMES_PLUS, fields)))
['def', 'from_conll', '(', 'this_class', ',', 'text', ')', ':', 'fields', '=', 'text', '.', 'split', '(', "'\\t'", ')', 'fields', '[', '0', ']', '=', 'int', '(', 'fields', '[', '0', ']', ')', '# index', 'fields', '[', '6', ']', '=', 'int', '(', 'fields', '[', '6', ']', ')', '# head index', 'if', 'fields', '[', '5', ']', '!=', "'_'", ':', '# feats', 'fields', '[', '5', ']', '=', 'tuple', '(', 'fields', '[', '5', ']', '.', 'split', '(', "'|'", ')', ')', 'fields', '=', '[', 'value', 'if', 'value', '!=', "'_'", 'else', 'None', 'for', 'value', 'in', 'fields', ']', 'fields', '.', 'append', '(', 'None', ')', '# for extra', 'return', 'this_class', '(', '*', '*', 'dict', '(', 'zip', '(', 'FIELD_NAMES_PLUS', ',', 'fields', ')', ')', ')']
Construct a Token from a line in CoNLL-X format.
['Construct', 'a', 'Token', 'from', 'a', 'line', 'in', 'CoNLL', '-', 'X', 'format', '.']
train
https://github.com/dmcc/PyStanfordDependencies/blob/43d8f38a19e40087f273330087918c87df6d4d8f/StanfordDependencies/CoNLL.py#L85-L94
3,426
bjodah/chempy
chempy/equilibria.py
EqSystem.dissolved
def dissolved(self, concs): """ Return dissolved concentrations """ new_concs = concs.copy() for r in self.rxns: if r.has_precipitates(self.substances): net_stoich = np.asarray(r.net_stoich(self.substances)) s_net, s_stoich, s_idx = r.precipitate_stoich(self.substances) new_concs -= new_concs[s_idx]/s_stoich * net_stoich return new_concs
python
def dissolved(self, concs): """ Return dissolved concentrations """ new_concs = concs.copy() for r in self.rxns: if r.has_precipitates(self.substances): net_stoich = np.asarray(r.net_stoich(self.substances)) s_net, s_stoich, s_idx = r.precipitate_stoich(self.substances) new_concs -= new_concs[s_idx]/s_stoich * net_stoich return new_concs
['def', 'dissolved', '(', 'self', ',', 'concs', ')', ':', 'new_concs', '=', 'concs', '.', 'copy', '(', ')', 'for', 'r', 'in', 'self', '.', 'rxns', ':', 'if', 'r', '.', 'has_precipitates', '(', 'self', '.', 'substances', ')', ':', 'net_stoich', '=', 'np', '.', 'asarray', '(', 'r', '.', 'net_stoich', '(', 'self', '.', 'substances', ')', ')', 's_net', ',', 's_stoich', ',', 's_idx', '=', 'r', '.', 'precipitate_stoich', '(', 'self', '.', 'substances', ')', 'new_concs', '-=', 'new_concs', '[', 's_idx', ']', '/', 's_stoich', '*', 'net_stoich', 'return', 'new_concs']
Return dissolved concentrations
['Return', 'dissolved', 'concentrations']
train
https://github.com/bjodah/chempy/blob/bd62c3e1f7cb797782471203acd3bcf23b21c47e/chempy/equilibria.py#L93-L101
3,427
oasis-open/cti-stix-validator
stix2validator/output.py
print_results
def print_results(results): """Print `results` (the results of validation) to stdout. Args: results: A list of FileValidationResults or ObjectValidationResults instances. """ if not isinstance(results, list): results = [results] for r in results: try: r.log() except AttributeError: raise ValueError('Argument to print_results() must be a list of ' 'FileValidationResults or ObjectValidationResults.')
python
def print_results(results): """Print `results` (the results of validation) to stdout. Args: results: A list of FileValidationResults or ObjectValidationResults instances. """ if not isinstance(results, list): results = [results] for r in results: try: r.log() except AttributeError: raise ValueError('Argument to print_results() must be a list of ' 'FileValidationResults or ObjectValidationResults.')
['def', 'print_results', '(', 'results', ')', ':', 'if', 'not', 'isinstance', '(', 'results', ',', 'list', ')', ':', 'results', '=', '[', 'results', ']', 'for', 'r', 'in', 'results', ':', 'try', ':', 'r', '.', 'log', '(', ')', 'except', 'AttributeError', ':', 'raise', 'ValueError', '(', "'Argument to print_results() must be a list of '", "'FileValidationResults or ObjectValidationResults.'", ')']
Print `results` (the results of validation) to stdout. Args: results: A list of FileValidationResults or ObjectValidationResults instances.
['Print', 'results', '(', 'the', 'results', 'of', 'validation', ')', 'to', 'stdout', '.']
train
https://github.com/oasis-open/cti-stix-validator/blob/a607014e3fa500a7678f8b61b278456ca581f9d0/stix2validator/output.py#L191-L207
3,428
gwpy/gwpy
gwpy/plot/colorbar.py
find_mappable
def find_mappable(*axes): """Find the most recently added mappable layer in the given axes Parameters ---------- *axes : `~matplotlib.axes.Axes` one or more axes to search for a mappable """ for ax in axes: for aset in ('collections', 'images'): try: return getattr(ax, aset)[-1] except (AttributeError, IndexError): continue raise ValueError("Cannot determine mappable layer on any axes " "for this colorbar")
python
def find_mappable(*axes): """Find the most recently added mappable layer in the given axes Parameters ---------- *axes : `~matplotlib.axes.Axes` one or more axes to search for a mappable """ for ax in axes: for aset in ('collections', 'images'): try: return getattr(ax, aset)[-1] except (AttributeError, IndexError): continue raise ValueError("Cannot determine mappable layer on any axes " "for this colorbar")
['def', 'find_mappable', '(', '*', 'axes', ')', ':', 'for', 'ax', 'in', 'axes', ':', 'for', 'aset', 'in', '(', "'collections'", ',', "'images'", ')', ':', 'try', ':', 'return', 'getattr', '(', 'ax', ',', 'aset', ')', '[', '-', '1', ']', 'except', '(', 'AttributeError', ',', 'IndexError', ')', ':', 'continue', 'raise', 'ValueError', '(', '"Cannot determine mappable layer on any axes "', '"for this colorbar"', ')']
Find the most recently added mappable layer in the given axes Parameters ---------- *axes : `~matplotlib.axes.Axes` one or more axes to search for a mappable
['Find', 'the', 'most', 'recently', 'added', 'mappable', 'layer', 'in', 'the', 'given', 'axes']
train
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/colorbar.py#L99-L114
3,429
networks-lab/metaknowledge
metaknowledge/graphHelpers.py
getWeight
def getWeight(grph, nd1, nd2, weightString = "weight", returnType = int): """ A way of getting the weight of an edge with or without weight as a parameter returns a the value of the weight parameter converted to returnType if it is given or 1 (also converted) if not """ if not weightString: return returnType(1) else: return returnType(grph.edges[nd1, nd2][weightString])
python
def getWeight(grph, nd1, nd2, weightString = "weight", returnType = int): """ A way of getting the weight of an edge with or without weight as a parameter returns a the value of the weight parameter converted to returnType if it is given or 1 (also converted) if not """ if not weightString: return returnType(1) else: return returnType(grph.edges[nd1, nd2][weightString])
['def', 'getWeight', '(', 'grph', ',', 'nd1', ',', 'nd2', ',', 'weightString', '=', '"weight"', ',', 'returnType', '=', 'int', ')', ':', 'if', 'not', 'weightString', ':', 'return', 'returnType', '(', '1', ')', 'else', ':', 'return', 'returnType', '(', 'grph', '.', 'edges', '[', 'nd1', ',', 'nd2', ']', '[', 'weightString', ']', ')']
A way of getting the weight of an edge with or without weight as a parameter returns a the value of the weight parameter converted to returnType if it is given or 1 (also converted) if not
['A', 'way', 'of', 'getting', 'the', 'weight', 'of', 'an', 'edge', 'with', 'or', 'without', 'weight', 'as', 'a', 'parameter', 'returns', 'a', 'the', 'value', 'of', 'the', 'weight', 'parameter', 'converted', 'to', 'returnType', 'if', 'it', 'is', 'given', 'or', '1', '(', 'also', 'converted', ')', 'if', 'not']
train
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/graphHelpers.py#L447-L455
3,430
restran/mountains
mountains/file/__init__.py
read_dict
def read_dict(file_name, clear_none=False, encoding='utf-8'): """ 读取字典文件 :param encoding: :param clear_none: :param file_name: :return: """ with open(file_name, 'rb') as f: data = f.read() if encoding is not None: data = data.decode(encoding) line_list = data.splitlines() data = [] i = 0 for line in line_list: i += 1 try: line = force_text(line).strip() data.append(line) except: print('read error line %s' % i) if clear_none: data = [t for t in data if t != ''] data = deque(data) return data
python
def read_dict(file_name, clear_none=False, encoding='utf-8'): """ 读取字典文件 :param encoding: :param clear_none: :param file_name: :return: """ with open(file_name, 'rb') as f: data = f.read() if encoding is not None: data = data.decode(encoding) line_list = data.splitlines() data = [] i = 0 for line in line_list: i += 1 try: line = force_text(line).strip() data.append(line) except: print('read error line %s' % i) if clear_none: data = [t for t in data if t != ''] data = deque(data) return data
['def', 'read_dict', '(', 'file_name', ',', 'clear_none', '=', 'False', ',', 'encoding', '=', "'utf-8'", ')', ':', 'with', 'open', '(', 'file_name', ',', "'rb'", ')', 'as', 'f', ':', 'data', '=', 'f', '.', 'read', '(', ')', 'if', 'encoding', 'is', 'not', 'None', ':', 'data', '=', 'data', '.', 'decode', '(', 'encoding', ')', 'line_list', '=', 'data', '.', 'splitlines', '(', ')', 'data', '=', '[', ']', 'i', '=', '0', 'for', 'line', 'in', 'line_list', ':', 'i', '+=', '1', 'try', ':', 'line', '=', 'force_text', '(', 'line', ')', '.', 'strip', '(', ')', 'data', '.', 'append', '(', 'line', ')', 'except', ':', 'print', '(', "'read error line %s'", '%', 'i', ')', 'if', 'clear_none', ':', 'data', '=', '[', 't', 'for', 't', 'in', 'data', 'if', 't', '!=', "''", ']', 'data', '=', 'deque', '(', 'data', ')', 'return', 'data']
读取字典文件 :param encoding: :param clear_none: :param file_name: :return:
['读取字典文件', ':', 'param', 'encoding', ':', ':', 'param', 'clear_none', ':', ':', 'param', 'file_name', ':', ':', 'return', ':']
train
https://github.com/restran/mountains/blob/a97fee568b112f4e10d878f815d0db3dd0a98d74/mountains/file/__init__.py#L9-L36
3,431
google/grr
grr/client/grr_response_client/comms.py
Timer.Wait
def Wait(self): """Wait until the next action is needed.""" time.sleep(self.sleep_time - int(self.sleep_time)) # Split a long sleep interval into 1 second intervals so we can heartbeat. for _ in range(int(self.sleep_time)): time.sleep(1) # Back off slowly at first and fast if no answer. self.sleep_time = min(self.poll_max, max(self.poll_min, self.sleep_time) * self.poll_slew)
python
def Wait(self): """Wait until the next action is needed.""" time.sleep(self.sleep_time - int(self.sleep_time)) # Split a long sleep interval into 1 second intervals so we can heartbeat. for _ in range(int(self.sleep_time)): time.sleep(1) # Back off slowly at first and fast if no answer. self.sleep_time = min(self.poll_max, max(self.poll_min, self.sleep_time) * self.poll_slew)
['def', 'Wait', '(', 'self', ')', ':', 'time', '.', 'sleep', '(', 'self', '.', 'sleep_time', '-', 'int', '(', 'self', '.', 'sleep_time', ')', ')', '# Split a long sleep interval into 1 second intervals so we can heartbeat.', 'for', '_', 'in', 'range', '(', 'int', '(', 'self', '.', 'sleep_time', ')', ')', ':', 'time', '.', 'sleep', '(', '1', ')', '# Back off slowly at first and fast if no answer.', 'self', '.', 'sleep_time', '=', 'min', '(', 'self', '.', 'poll_max', ',', 'max', '(', 'self', '.', 'poll_min', ',', 'self', '.', 'sleep_time', ')', '*', 'self', '.', 'poll_slew', ')']
Wait until the next action is needed.
['Wait', 'until', 'the', 'next', 'action', 'is', 'needed', '.']
train
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/comms.py#L468-L478
3,432
polysquare/cmake-ast
cmakeast/ast.py
_MultilineStringRecorder.maybe_start_recording
def maybe_start_recording(tokens, index): """Return a new _MultilineStringRecorder when its time to record.""" if _is_begin_quoted_type(tokens[index].type): string_type = _get_string_type_from_token(tokens[index].type) return _MultilineStringRecorder(index, string_type) return None
python
def maybe_start_recording(tokens, index): """Return a new _MultilineStringRecorder when its time to record.""" if _is_begin_quoted_type(tokens[index].type): string_type = _get_string_type_from_token(tokens[index].type) return _MultilineStringRecorder(index, string_type) return None
['def', 'maybe_start_recording', '(', 'tokens', ',', 'index', ')', ':', 'if', '_is_begin_quoted_type', '(', 'tokens', '[', 'index', ']', '.', 'type', ')', ':', 'string_type', '=', '_get_string_type_from_token', '(', 'tokens', '[', 'index', ']', '.', 'type', ')', 'return', '_MultilineStringRecorder', '(', 'index', ',', 'string_type', ')', 'return', 'None']
Return a new _MultilineStringRecorder when its time to record.
['Return', 'a', 'new', '_MultilineStringRecorder', 'when', 'its', 'time', 'to', 'record', '.']
train
https://github.com/polysquare/cmake-ast/blob/431a32d595d76f1f8f993eb6ddcc79effbadff9d/cmakeast/ast.py#L696-L702
3,433
collectiveacuity/labPack
labpack/events/meetup.py
meetupClient.get_member_calendar
def get_member_calendar(self, max_results=0): ''' a method to retrieve the upcoming events for all groups member belongs to :param max_results: [optional] integer with number of events to include :return: dictionary with list of event details inside [json] key event_details = self._reconstruct_event({}) ''' # https://www.meetup.com/meetup_api/docs/self/calendar/#list title = '%s.get_member_calendar' % self.__class__.__name__ # validate inputs input_fields = { 'max_results': max_results } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # construct request fields url = '%s/self/calendar' % self.endpoint params = { 'fields': 'comment_count,event_hosts,rsvp_rules,short_link,survey_questions,rsvpable' } if max_results: params['page'] = str(max_results) # send requests response_details = self._get_request(url, params=params) # construct method output member_calendar = { 'json': [] } for key, value in response_details.items(): if key != 'json': member_calendar[key] = value for event in response_details['json']: member_calendar['json'].append(self._reconstruct_event(event)) return member_calendar
python
def get_member_calendar(self, max_results=0): ''' a method to retrieve the upcoming events for all groups member belongs to :param max_results: [optional] integer with number of events to include :return: dictionary with list of event details inside [json] key event_details = self._reconstruct_event({}) ''' # https://www.meetup.com/meetup_api/docs/self/calendar/#list title = '%s.get_member_calendar' % self.__class__.__name__ # validate inputs input_fields = { 'max_results': max_results } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # construct request fields url = '%s/self/calendar' % self.endpoint params = { 'fields': 'comment_count,event_hosts,rsvp_rules,short_link,survey_questions,rsvpable' } if max_results: params['page'] = str(max_results) # send requests response_details = self._get_request(url, params=params) # construct method output member_calendar = { 'json': [] } for key, value in response_details.items(): if key != 'json': member_calendar[key] = value for event in response_details['json']: member_calendar['json'].append(self._reconstruct_event(event)) return member_calendar
['def', 'get_member_calendar', '(', 'self', ',', 'max_results', '=', '0', ')', ':', '# https://www.meetup.com/meetup_api/docs/self/calendar/#list\r', 'title', '=', "'%s.get_member_calendar'", '%', 'self', '.', '__class__', '.', '__name__', '# validate inputs\r', 'input_fields', '=', '{', "'max_results'", ':', 'max_results', '}', 'for', 'key', ',', 'value', 'in', 'input_fields', '.', 'items', '(', ')', ':', 'if', 'value', ':', 'object_title', '=', "'%s(%s=%s)'", '%', '(', 'title', ',', 'key', ',', 'str', '(', 'value', ')', ')', 'self', '.', 'fields', '.', 'validate', '(', 'value', ',', "'.%s'", '%', 'key', ',', 'object_title', ')', '# construct request fields\r', 'url', '=', "'%s/self/calendar'", '%', 'self', '.', 'endpoint', 'params', '=', '{', "'fields'", ':', "'comment_count,event_hosts,rsvp_rules,short_link,survey_questions,rsvpable'", '}', 'if', 'max_results', ':', 'params', '[', "'page'", ']', '=', 'str', '(', 'max_results', ')', '# send requests\r', 'response_details', '=', 'self', '.', '_get_request', '(', 'url', ',', 'params', '=', 'params', ')', '# construct method output\r', 'member_calendar', '=', '{', "'json'", ':', '[', ']', '}', 'for', 'key', ',', 'value', 'in', 'response_details', '.', 'items', '(', ')', ':', 'if', 'key', '!=', "'json'", ':', 'member_calendar', '[', 'key', ']', '=', 'value', 'for', 'event', 'in', 'response_details', '[', "'json'", ']', ':', 'member_calendar', '[', "'json'", ']', '.', 'append', '(', 'self', '.', '_reconstruct_event', '(', 'event', ')', ')', 'return', 'member_calendar']
a method to retrieve the upcoming events for all groups member belongs to :param max_results: [optional] integer with number of events to include :return: dictionary with list of event details inside [json] key event_details = self._reconstruct_event({})
['a', 'method', 'to', 'retrieve', 'the', 'upcoming', 'events', 'for', 'all', 'groups', 'member', 'belongs', 'to', ':', 'param', 'max_results', ':', '[', 'optional', ']', 'integer', 'with', 'number', 'of', 'events', 'to', 'include', ':', 'return', ':', 'dictionary', 'with', 'list', 'of', 'event', 'details', 'inside', '[', 'json', ']', 'key', 'event_details', '=', 'self', '.', '_reconstruct_event', '(', '{}', ')']
train
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/events/meetup.py#L1384-L1428
3,434
gabstopper/smc-python
smc/elements/network.py
Network.create
def create(cls, name, ipv4_network=None, ipv6_network=None, comment=None): """ Create the network element :param str name: Name of element :param str ipv4_network: network cidr (optional if ipv6) :param str ipv6_network: network cidr (optional if ipv4) :param str comment: comment (optional) :raises CreateElementFailed: element creation failed with reason :return: instance with meta :rtype: Network .. note:: Either an ipv4_network or ipv6_network must be specified """ ipv4_network = ipv4_network if ipv4_network else None ipv6_network = ipv6_network if ipv6_network else None json = {'name': name, 'ipv4_network': ipv4_network, 'ipv6_network': ipv6_network, 'comment': comment} return ElementCreator(cls, json)
python
def create(cls, name, ipv4_network=None, ipv6_network=None, comment=None): """ Create the network element :param str name: Name of element :param str ipv4_network: network cidr (optional if ipv6) :param str ipv6_network: network cidr (optional if ipv4) :param str comment: comment (optional) :raises CreateElementFailed: element creation failed with reason :return: instance with meta :rtype: Network .. note:: Either an ipv4_network or ipv6_network must be specified """ ipv4_network = ipv4_network if ipv4_network else None ipv6_network = ipv6_network if ipv6_network else None json = {'name': name, 'ipv4_network': ipv4_network, 'ipv6_network': ipv6_network, 'comment': comment} return ElementCreator(cls, json)
['def', 'create', '(', 'cls', ',', 'name', ',', 'ipv4_network', '=', 'None', ',', 'ipv6_network', '=', 'None', ',', 'comment', '=', 'None', ')', ':', 'ipv4_network', '=', 'ipv4_network', 'if', 'ipv4_network', 'else', 'None', 'ipv6_network', '=', 'ipv6_network', 'if', 'ipv6_network', 'else', 'None', 'json', '=', '{', "'name'", ':', 'name', ',', "'ipv4_network'", ':', 'ipv4_network', ',', "'ipv6_network'", ':', 'ipv6_network', ',', "'comment'", ':', 'comment', '}', 'return', 'ElementCreator', '(', 'cls', ',', 'json', ')']
Create the network element :param str name: Name of element :param str ipv4_network: network cidr (optional if ipv6) :param str ipv6_network: network cidr (optional if ipv4) :param str comment: comment (optional) :raises CreateElementFailed: element creation failed with reason :return: instance with meta :rtype: Network .. note:: Either an ipv4_network or ipv6_network must be specified
['Create', 'the', 'network', 'element']
train
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/elements/network.py#L186-L208
3,435
classam/silly
silly/main.py
plural
def plural(random=random, *args, **kwargs): """ Return a plural noun. >>> mock_random.seed(0) >>> plural(random=mock_random) 'onions' >>> plural(random=mock_random, capitalize=True) 'Chimps' >>> plural(random=mock_random, slugify=True) 'blisters' """ return inflectify.plural(random.choice(nouns))
python
def plural(random=random, *args, **kwargs): """ Return a plural noun. >>> mock_random.seed(0) >>> plural(random=mock_random) 'onions' >>> plural(random=mock_random, capitalize=True) 'Chimps' >>> plural(random=mock_random, slugify=True) 'blisters' """ return inflectify.plural(random.choice(nouns))
['def', 'plural', '(', 'random', '=', 'random', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'return', 'inflectify', '.', 'plural', '(', 'random', '.', 'choice', '(', 'nouns', ')', ')']
Return a plural noun. >>> mock_random.seed(0) >>> plural(random=mock_random) 'onions' >>> plural(random=mock_random, capitalize=True) 'Chimps' >>> plural(random=mock_random, slugify=True) 'blisters'
['Return', 'a', 'plural', 'noun', '.']
train
https://github.com/classam/silly/blob/f3202e997d5ebc9e4f98370b08665fd1178a9556/silly/main.py#L830-L842
3,436
chezou/tabula-py
tabula/file_util.py
localize_file
def localize_file(path_or_buffer): '''Ensure localize target file. If the target file is remote, this function fetches into local storage. Args: path (str): File path or file like object or URL of target file. Returns: filename (str): file name in local storage temporary_file_flag (bool): temporary file flag ''' path_or_buffer = _stringify_path(path_or_buffer) if _is_url(path_or_buffer): req = urlopen(path_or_buffer) filename = os.path.basename(req.geturl()) if os.path.splitext(filename)[-1] is not ".pdf": pid = os.getpid() filename = "{0}.pdf".format(pid) with open(filename, 'wb') as f: shutil.copyfileobj(req, f) return filename, True elif is_file_like(path_or_buffer): pid = os.getpid() filename = "{0}.pdf".format(pid) with open(filename, 'wb') as f: shutil.copyfileobj(path_or_buffer, f) return filename, True # File path case else: return os.path.expanduser(path_or_buffer), False
python
def localize_file(path_or_buffer): '''Ensure localize target file. If the target file is remote, this function fetches into local storage. Args: path (str): File path or file like object or URL of target file. Returns: filename (str): file name in local storage temporary_file_flag (bool): temporary file flag ''' path_or_buffer = _stringify_path(path_or_buffer) if _is_url(path_or_buffer): req = urlopen(path_or_buffer) filename = os.path.basename(req.geturl()) if os.path.splitext(filename)[-1] is not ".pdf": pid = os.getpid() filename = "{0}.pdf".format(pid) with open(filename, 'wb') as f: shutil.copyfileobj(req, f) return filename, True elif is_file_like(path_or_buffer): pid = os.getpid() filename = "{0}.pdf".format(pid) with open(filename, 'wb') as f: shutil.copyfileobj(path_or_buffer, f) return filename, True # File path case else: return os.path.expanduser(path_or_buffer), False
['def', 'localize_file', '(', 'path_or_buffer', ')', ':', 'path_or_buffer', '=', '_stringify_path', '(', 'path_or_buffer', ')', 'if', '_is_url', '(', 'path_or_buffer', ')', ':', 'req', '=', 'urlopen', '(', 'path_or_buffer', ')', 'filename', '=', 'os', '.', 'path', '.', 'basename', '(', 'req', '.', 'geturl', '(', ')', ')', 'if', 'os', '.', 'path', '.', 'splitext', '(', 'filename', ')', '[', '-', '1', ']', 'is', 'not', '".pdf"', ':', 'pid', '=', 'os', '.', 'getpid', '(', ')', 'filename', '=', '"{0}.pdf"', '.', 'format', '(', 'pid', ')', 'with', 'open', '(', 'filename', ',', "'wb'", ')', 'as', 'f', ':', 'shutil', '.', 'copyfileobj', '(', 'req', ',', 'f', ')', 'return', 'filename', ',', 'True', 'elif', 'is_file_like', '(', 'path_or_buffer', ')', ':', 'pid', '=', 'os', '.', 'getpid', '(', ')', 'filename', '=', '"{0}.pdf"', '.', 'format', '(', 'pid', ')', 'with', 'open', '(', 'filename', ',', "'wb'", ')', 'as', 'f', ':', 'shutil', '.', 'copyfileobj', '(', 'path_or_buffer', ',', 'f', ')', 'return', 'filename', ',', 'True', '# File path case', 'else', ':', 'return', 'os', '.', 'path', '.', 'expanduser', '(', 'path_or_buffer', ')', ',', 'False']
Ensure localize target file. If the target file is remote, this function fetches into local storage. Args: path (str): File path or file like object or URL of target file. Returns: filename (str): file name in local storage temporary_file_flag (bool): temporary file flag
['Ensure', 'localize', 'target', 'file', '.']
train
https://github.com/chezou/tabula-py/blob/e61d46ee3c93bb40396e48dac5a9493e898f561a/tabula/file_util.py#L24-L63
3,437
sk-/git-lint
gitlint/__init__.py
get_vcs_root
def get_vcs_root(): """Returns the vcs module and the root of the repo. Returns: A tuple containing the vcs module to use (git, hg) and the root of the repository. If no repository exisits then (None, None) is returned. """ for vcs in (git, hg): repo_root = vcs.repository_root() if repo_root: return vcs, repo_root return (None, None)
python
def get_vcs_root(): """Returns the vcs module and the root of the repo. Returns: A tuple containing the vcs module to use (git, hg) and the root of the repository. If no repository exisits then (None, None) is returned. """ for vcs in (git, hg): repo_root = vcs.repository_root() if repo_root: return vcs, repo_root return (None, None)
['def', 'get_vcs_root', '(', ')', ':', 'for', 'vcs', 'in', '(', 'git', ',', 'hg', ')', ':', 'repo_root', '=', 'vcs', '.', 'repository_root', '(', ')', 'if', 'repo_root', ':', 'return', 'vcs', ',', 'repo_root', 'return', '(', 'None', ',', 'None', ')']
Returns the vcs module and the root of the repo. Returns: A tuple containing the vcs module to use (git, hg) and the root of the repository. If no repository exisits then (None, None) is returned.
['Returns', 'the', 'vcs', 'module', 'and', 'the', 'root', 'of', 'the', 'repo', '.']
train
https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/__init__.py#L153-L165
3,438
sepandhaghighi/pycm
pycm/pycm_overall_func.py
cross_entropy_calc
def cross_entropy_calc(TOP, P, POP): """ Calculate cross entropy. :param TOP: test outcome positive :type TOP : dict :param P: condition positive :type P : dict :param POP: population :type POP : dict :return: cross entropy as float """ try: result = 0 for i in TOP.keys(): reference_likelihood = P[i] / POP[i] response_likelihood = TOP[i] / POP[i] if response_likelihood != 0 and reference_likelihood != 0: result += reference_likelihood * \ math.log(response_likelihood, 2) return -result except Exception: return "None"
python
def cross_entropy_calc(TOP, P, POP): """ Calculate cross entropy. :param TOP: test outcome positive :type TOP : dict :param P: condition positive :type P : dict :param POP: population :type POP : dict :return: cross entropy as float """ try: result = 0 for i in TOP.keys(): reference_likelihood = P[i] / POP[i] response_likelihood = TOP[i] / POP[i] if response_likelihood != 0 and reference_likelihood != 0: result += reference_likelihood * \ math.log(response_likelihood, 2) return -result except Exception: return "None"
['def', 'cross_entropy_calc', '(', 'TOP', ',', 'P', ',', 'POP', ')', ':', 'try', ':', 'result', '=', '0', 'for', 'i', 'in', 'TOP', '.', 'keys', '(', ')', ':', 'reference_likelihood', '=', 'P', '[', 'i', ']', '/', 'POP', '[', 'i', ']', 'response_likelihood', '=', 'TOP', '[', 'i', ']', '/', 'POP', '[', 'i', ']', 'if', 'response_likelihood', '!=', '0', 'and', 'reference_likelihood', '!=', '0', ':', 'result', '+=', 'reference_likelihood', '*', 'math', '.', 'log', '(', 'response_likelihood', ',', '2', ')', 'return', '-', 'result', 'except', 'Exception', ':', 'return', '"None"']
Calculate cross entropy. :param TOP: test outcome positive :type TOP : dict :param P: condition positive :type P : dict :param POP: population :type POP : dict :return: cross entropy as float
['Calculate', 'cross', 'entropy', '.']
train
https://github.com/sepandhaghighi/pycm/blob/cb03258afd6a821d10acba73c965aaac174bedcd/pycm/pycm_overall_func.py#L327-L349
3,439
androguard/androguard
androguard/core/bytecodes/apk.py
APK.get_requested_aosp_permissions
def get_requested_aosp_permissions(self): """ Returns requested permissions declared within AOSP project. This includes several other permissions as well, which are in the platform apps. :rtype: list of str """ aosp_permissions = [] all_permissions = self.get_permissions() for perm in all_permissions: if perm in list(self.permission_module.keys()): aosp_permissions.append(perm) return aosp_permissions
python
def get_requested_aosp_permissions(self): """ Returns requested permissions declared within AOSP project. This includes several other permissions as well, which are in the platform apps. :rtype: list of str """ aosp_permissions = [] all_permissions = self.get_permissions() for perm in all_permissions: if perm in list(self.permission_module.keys()): aosp_permissions.append(perm) return aosp_permissions
['def', 'get_requested_aosp_permissions', '(', 'self', ')', ':', 'aosp_permissions', '=', '[', ']', 'all_permissions', '=', 'self', '.', 'get_permissions', '(', ')', 'for', 'perm', 'in', 'all_permissions', ':', 'if', 'perm', 'in', 'list', '(', 'self', '.', 'permission_module', '.', 'keys', '(', ')', ')', ':', 'aosp_permissions', '.', 'append', '(', 'perm', ')', 'return', 'aosp_permissions']
Returns requested permissions declared within AOSP project. This includes several other permissions as well, which are in the platform apps. :rtype: list of str
['Returns', 'requested', 'permissions', 'declared', 'within', 'AOSP', 'project', '.']
train
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/bytecodes/apk.py#L1258-L1271
3,440
PyCQA/astroid
astroid/rebuilder.py
TreeRebuilder.visit_break
def visit_break(self, node, parent): """visit a Break node by returning a fresh instance of it""" return nodes.Break( getattr(node, "lineno", None), getattr(node, "col_offset", None), parent )
python
def visit_break(self, node, parent): """visit a Break node by returning a fresh instance of it""" return nodes.Break( getattr(node, "lineno", None), getattr(node, "col_offset", None), parent )
['def', 'visit_break', '(', 'self', ',', 'node', ',', 'parent', ')', ':', 'return', 'nodes', '.', 'Break', '(', 'getattr', '(', 'node', ',', '"lineno"', ',', 'None', ')', ',', 'getattr', '(', 'node', ',', '"col_offset"', ',', 'None', ')', ',', 'parent', ')']
visit a Break node by returning a fresh instance of it
['visit', 'a', 'Break', 'node', 'by', 'returning', 'a', 'fresh', 'instance', 'of', 'it']
train
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/rebuilder.py#L347-L351
3,441
3ll3d00d/vibe
backend/src/recorder/common/mpu6050.py
mpu6050.initialiseDevice
def initialiseDevice(self): """ performs initialisation of the device :param batchSize: the no of samples that each provideData call should yield :return: """ logger.debug("Initialising device") self.getInterruptStatus() self.setAccelerometerSensitivity(self._accelerationFactor * 32768.0) self.setGyroSensitivity(self._gyroFactor * 32768.0) self.setSampleRate(self.fs) for loop in self.ZeroRegister: self.i2c_io.write(self.MPU6050_ADDRESS, loop, 0) # Sets clock source to gyro reference w/ PLL self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_PWR_MGMT_1, 0b00000010) # Controls frequency of wakeups in accel low power mode plus the sensor standby modes self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_PWR_MGMT_2, 0x00) # Enables any I2C master interrupt source to generate an interrupt self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_INT_ENABLE, 0x01) # enable the FIFO self.enableFifo() logger.debug("Initialised device")
python
def initialiseDevice(self): """ performs initialisation of the device :param batchSize: the no of samples that each provideData call should yield :return: """ logger.debug("Initialising device") self.getInterruptStatus() self.setAccelerometerSensitivity(self._accelerationFactor * 32768.0) self.setGyroSensitivity(self._gyroFactor * 32768.0) self.setSampleRate(self.fs) for loop in self.ZeroRegister: self.i2c_io.write(self.MPU6050_ADDRESS, loop, 0) # Sets clock source to gyro reference w/ PLL self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_PWR_MGMT_1, 0b00000010) # Controls frequency of wakeups in accel low power mode plus the sensor standby modes self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_PWR_MGMT_2, 0x00) # Enables any I2C master interrupt source to generate an interrupt self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_INT_ENABLE, 0x01) # enable the FIFO self.enableFifo() logger.debug("Initialised device")
['def', 'initialiseDevice', '(', 'self', ')', ':', 'logger', '.', 'debug', '(', '"Initialising device"', ')', 'self', '.', 'getInterruptStatus', '(', ')', 'self', '.', 'setAccelerometerSensitivity', '(', 'self', '.', '_accelerationFactor', '*', '32768.0', ')', 'self', '.', 'setGyroSensitivity', '(', 'self', '.', '_gyroFactor', '*', '32768.0', ')', 'self', '.', 'setSampleRate', '(', 'self', '.', 'fs', ')', 'for', 'loop', 'in', 'self', '.', 'ZeroRegister', ':', 'self', '.', 'i2c_io', '.', 'write', '(', 'self', '.', 'MPU6050_ADDRESS', ',', 'loop', ',', '0', ')', '# Sets clock source to gyro reference w/ PLL', 'self', '.', 'i2c_io', '.', 'write', '(', 'self', '.', 'MPU6050_ADDRESS', ',', 'self', '.', 'MPU6050_RA_PWR_MGMT_1', ',', '0b00000010', ')', '# Controls frequency of wakeups in accel low power mode plus the sensor standby modes', 'self', '.', 'i2c_io', '.', 'write', '(', 'self', '.', 'MPU6050_ADDRESS', ',', 'self', '.', 'MPU6050_RA_PWR_MGMT_2', ',', '0x00', ')', '# Enables any I2C master interrupt source to generate an interrupt', 'self', '.', 'i2c_io', '.', 'write', '(', 'self', '.', 'MPU6050_ADDRESS', ',', 'self', '.', 'MPU6050_RA_INT_ENABLE', ',', '0x01', ')', '# enable the FIFO', 'self', '.', 'enableFifo', '(', ')', 'logger', '.', 'debug', '(', '"Initialised device"', ')']
performs initialisation of the device :param batchSize: the no of samples that each provideData call should yield :return:
['performs', 'initialisation', 'of', 'the', 'device', ':', 'param', 'batchSize', ':', 'the', 'no', 'of', 'samples', 'that', 'each', 'provideData', 'call', 'should', 'yield', ':', 'return', ':']
train
https://github.com/3ll3d00d/vibe/blob/124b029f13ac746723e92cb47e9cb56edd2e54b5/backend/src/recorder/common/mpu6050.py#L287-L308
3,442
kwikteam/phy
phy/plot/base.py
BaseCanvas.on_draw
def on_draw(self, e): """Draw all visuals.""" gloo.clear() for visual in self.visuals: logger.log(5, "Draw visual `%s`.", visual) visual.on_draw()
python
def on_draw(self, e): """Draw all visuals.""" gloo.clear() for visual in self.visuals: logger.log(5, "Draw visual `%s`.", visual) visual.on_draw()
['def', 'on_draw', '(', 'self', ',', 'e', ')', ':', 'gloo', '.', 'clear', '(', ')', 'for', 'visual', 'in', 'self', '.', 'visuals', ':', 'logger', '.', 'log', '(', '5', ',', '"Draw visual `%s`."', ',', 'visual', ')', 'visual', '.', 'on_draw', '(', ')']
Draw all visuals.
['Draw', 'all', 'visuals', '.']
train
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/base.py#L295-L300
3,443
gwastro/pycbc
pycbc/filter/resample.py
notch_fir
def notch_fir(timeseries, f1, f2, order, beta=5.0): """ notch filter the time series using an FIR filtered generated from the ideal response passed through a time-domain kaiser window (beta = 5.0) The suppression of the notch filter is related to the bandwidth and the number of samples in the filter length. For a few Hz bandwidth, a length corresponding to a few seconds is typically required to create significant suppression in the notched band. To achieve frequency resolution df at sampling frequency fs, order should be at least fs/df. Parameters ---------- Time Series: TimeSeries The time series to be notched. f1: float The start of the frequency suppression. f2: float The end of the frequency suppression. order: int Number of corrupted samples on each side of the time series (Extent of the filter on either side of zero) beta: float Beta parameter of the kaiser window that sets the side lobe attenuation. """ k1 = f1 / float((int(1.0 / timeseries.delta_t) / 2)) k2 = f2 / float((int(1.0 / timeseries.delta_t) / 2)) coeff = scipy.signal.firwin(order * 2 + 1, [k1, k2], window=('kaiser', beta)) data = fir_zero_filter(coeff, timeseries) return TimeSeries(data, epoch=timeseries.start_time, delta_t=timeseries.delta_t)
python
def notch_fir(timeseries, f1, f2, order, beta=5.0): """ notch filter the time series using an FIR filtered generated from the ideal response passed through a time-domain kaiser window (beta = 5.0) The suppression of the notch filter is related to the bandwidth and the number of samples in the filter length. For a few Hz bandwidth, a length corresponding to a few seconds is typically required to create significant suppression in the notched band. To achieve frequency resolution df at sampling frequency fs, order should be at least fs/df. Parameters ---------- Time Series: TimeSeries The time series to be notched. f1: float The start of the frequency suppression. f2: float The end of the frequency suppression. order: int Number of corrupted samples on each side of the time series (Extent of the filter on either side of zero) beta: float Beta parameter of the kaiser window that sets the side lobe attenuation. """ k1 = f1 / float((int(1.0 / timeseries.delta_t) / 2)) k2 = f2 / float((int(1.0 / timeseries.delta_t) / 2)) coeff = scipy.signal.firwin(order * 2 + 1, [k1, k2], window=('kaiser', beta)) data = fir_zero_filter(coeff, timeseries) return TimeSeries(data, epoch=timeseries.start_time, delta_t=timeseries.delta_t)
['def', 'notch_fir', '(', 'timeseries', ',', 'f1', ',', 'f2', ',', 'order', ',', 'beta', '=', '5.0', ')', ':', 'k1', '=', 'f1', '/', 'float', '(', '(', 'int', '(', '1.0', '/', 'timeseries', '.', 'delta_t', ')', '/', '2', ')', ')', 'k2', '=', 'f2', '/', 'float', '(', '(', 'int', '(', '1.0', '/', 'timeseries', '.', 'delta_t', ')', '/', '2', ')', ')', 'coeff', '=', 'scipy', '.', 'signal', '.', 'firwin', '(', 'order', '*', '2', '+', '1', ',', '[', 'k1', ',', 'k2', ']', ',', 'window', '=', '(', "'kaiser'", ',', 'beta', ')', ')', 'data', '=', 'fir_zero_filter', '(', 'coeff', ',', 'timeseries', ')', 'return', 'TimeSeries', '(', 'data', ',', 'epoch', '=', 'timeseries', '.', 'start_time', ',', 'delta_t', '=', 'timeseries', '.', 'delta_t', ')']
notch filter the time series using an FIR filtered generated from the ideal response passed through a time-domain kaiser window (beta = 5.0) The suppression of the notch filter is related to the bandwidth and the number of samples in the filter length. For a few Hz bandwidth, a length corresponding to a few seconds is typically required to create significant suppression in the notched band. To achieve frequency resolution df at sampling frequency fs, order should be at least fs/df. Parameters ---------- Time Series: TimeSeries The time series to be notched. f1: float The start of the frequency suppression. f2: float The end of the frequency suppression. order: int Number of corrupted samples on each side of the time series (Extent of the filter on either side of zero) beta: float Beta parameter of the kaiser window that sets the side lobe attenuation.
['notch', 'filter', 'the', 'time', 'series', 'using', 'an', 'FIR', 'filtered', 'generated', 'from', 'the', 'ideal', 'response', 'passed', 'through', 'a', 'time', '-', 'domain', 'kaiser', 'window', '(', 'beta', '=', '5', '.', '0', ')']
train
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/filter/resample.py#L181-L210
3,444
projectshift/shift-boiler
boiler/cli/boiler.py
version
def version(): """ Version Imports and displays current boiler version. :return: """ echo(green('\nshift-boiler:')) echo(green('-' * 40)) echo(yellow('Version: ') + '{}'.format(boiler_version)) echo(yellow('GitHub: ') + 'https://github.com/projectshift/shift-boiler') echo(yellow('PyPi: ') + 'https://pypi.org/project/shiftboiler/') echo()
python
def version(): """ Version Imports and displays current boiler version. :return: """ echo(green('\nshift-boiler:')) echo(green('-' * 40)) echo(yellow('Version: ') + '{}'.format(boiler_version)) echo(yellow('GitHub: ') + 'https://github.com/projectshift/shift-boiler') echo(yellow('PyPi: ') + 'https://pypi.org/project/shiftboiler/') echo()
['def', 'version', '(', ')', ':', 'echo', '(', 'green', '(', "'\\nshift-boiler:'", ')', ')', 'echo', '(', 'green', '(', "'-'", '*', '40', ')', ')', 'echo', '(', 'yellow', '(', "'Version: '", ')', '+', "'{}'", '.', 'format', '(', 'boiler_version', ')', ')', 'echo', '(', 'yellow', '(', "'GitHub: '", ')', '+', "'https://github.com/projectshift/shift-boiler'", ')', 'echo', '(', 'yellow', '(', "'PyPi: '", ')', '+', "'https://pypi.org/project/shiftboiler/'", ')', 'echo', '(', ')']
Version Imports and displays current boiler version. :return:
['Version', 'Imports', 'and', 'displays', 'current', 'boiler', 'version', '.', ':', 'return', ':']
train
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/cli/boiler.py#L21-L32
3,445
RJT1990/pyflux
pyflux/garch/segarchm.py
SEGARCHM.add_leverage
def add_leverage(self): """ Adds leverage term to the model Returns ---------- None (changes instance attributes) """ if self.leverage is True: pass else: self.leverage = True self.z_no += 1 self.latent_variables.z_list.pop() self.latent_variables.z_list.pop() self.latent_variables.z_list.pop() self.latent_variables.z_list.pop() self.latent_variables.add_z('Leverage Term', fam.Flat(transform=None), fam.Normal(0, 3)) self.latent_variables.add_z('Skewness', fam.Flat(transform='exp'), fam.Normal(0, 3)) self.latent_variables.add_z('v', fam.Flat(transform='exp'), fam.Normal(0, 3)) self.latent_variables.add_z('Returns Constant', fam.Normal(0,3,transform=None), fam.Normal(0, 3)) self.latent_variables.add_z('GARCH-M', fam.Normal(0,3,transform=None), fam.Normal(0, 3)) self.latent_variables.z_list[-3].start = 2.0
python
def add_leverage(self): """ Adds leverage term to the model Returns ---------- None (changes instance attributes) """ if self.leverage is True: pass else: self.leverage = True self.z_no += 1 self.latent_variables.z_list.pop() self.latent_variables.z_list.pop() self.latent_variables.z_list.pop() self.latent_variables.z_list.pop() self.latent_variables.add_z('Leverage Term', fam.Flat(transform=None), fam.Normal(0, 3)) self.latent_variables.add_z('Skewness', fam.Flat(transform='exp'), fam.Normal(0, 3)) self.latent_variables.add_z('v', fam.Flat(transform='exp'), fam.Normal(0, 3)) self.latent_variables.add_z('Returns Constant', fam.Normal(0,3,transform=None), fam.Normal(0, 3)) self.latent_variables.add_z('GARCH-M', fam.Normal(0,3,transform=None), fam.Normal(0, 3)) self.latent_variables.z_list[-3].start = 2.0
['def', 'add_leverage', '(', 'self', ')', ':', 'if', 'self', '.', 'leverage', 'is', 'True', ':', 'pass', 'else', ':', 'self', '.', 'leverage', '=', 'True', 'self', '.', 'z_no', '+=', '1', 'self', '.', 'latent_variables', '.', 'z_list', '.', 'pop', '(', ')', 'self', '.', 'latent_variables', '.', 'z_list', '.', 'pop', '(', ')', 'self', '.', 'latent_variables', '.', 'z_list', '.', 'pop', '(', ')', 'self', '.', 'latent_variables', '.', 'z_list', '.', 'pop', '(', ')', 'self', '.', 'latent_variables', '.', 'add_z', '(', "'Leverage Term'", ',', 'fam', '.', 'Flat', '(', 'transform', '=', 'None', ')', ',', 'fam', '.', 'Normal', '(', '0', ',', '3', ')', ')', 'self', '.', 'latent_variables', '.', 'add_z', '(', "'Skewness'", ',', 'fam', '.', 'Flat', '(', 'transform', '=', "'exp'", ')', ',', 'fam', '.', 'Normal', '(', '0', ',', '3', ')', ')', 'self', '.', 'latent_variables', '.', 'add_z', '(', "'v'", ',', 'fam', '.', 'Flat', '(', 'transform', '=', "'exp'", ')', ',', 'fam', '.', 'Normal', '(', '0', ',', '3', ')', ')', 'self', '.', 'latent_variables', '.', 'add_z', '(', "'Returns Constant'", ',', 'fam', '.', 'Normal', '(', '0', ',', '3', ',', 'transform', '=', 'None', ')', ',', 'fam', '.', 'Normal', '(', '0', ',', '3', ')', ')', 'self', '.', 'latent_variables', '.', 'add_z', '(', "'GARCH-M'", ',', 'fam', '.', 'Normal', '(', '0', ',', '3', ',', 'transform', '=', 'None', ')', ',', 'fam', '.', 'Normal', '(', '0', ',', '3', ')', ')', 'self', '.', 'latent_variables', '.', 'z_list', '[', '-', '3', ']', '.', 'start', '=', '2.0']
Adds leverage term to the model Returns ---------- None (changes instance attributes)
['Adds', 'leverage', 'term', 'to', 'the', 'model']
train
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/garch/segarchm.py#L508-L530
3,446
mlavin/argyle
argyle/system.py
service_command
def service_command(name, command): """Run an init.d/upstart command.""" service_command_template = getattr(env, 'ARGYLE_SERVICE_COMMAND_TEMPLATE', u'/etc/init.d/%(name)s %(command)s') sudo(service_command_template % {'name': name, 'command': command}, pty=False)
python
def service_command(name, command): """Run an init.d/upstart command.""" service_command_template = getattr(env, 'ARGYLE_SERVICE_COMMAND_TEMPLATE', u'/etc/init.d/%(name)s %(command)s') sudo(service_command_template % {'name': name, 'command': command}, pty=False)
['def', 'service_command', '(', 'name', ',', 'command', ')', ':', 'service_command_template', '=', 'getattr', '(', 'env', ',', "'ARGYLE_SERVICE_COMMAND_TEMPLATE'", ',', "u'/etc/init.d/%(name)s %(command)s'", ')', 'sudo', '(', 'service_command_template', '%', '{', "'name'", ':', 'name', ',', "'command'", ':', 'command', '}', ',', 'pty', '=', 'False', ')']
Run an init.d/upstart command.
['Run', 'an', 'init', '.', 'd', '/', 'upstart', 'command', '.']
train
https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/system.py#L129-L135
3,447
xapple/fasta
fasta/aligned.py
AlignedFASTA.build_tree_fast
def build_tree_fast(self, new_path=None, seq_type='nucl' or 'prot'): """Make a tree with FastTree. Names will be truncated however.""" # Check output # if new_path is None: new_path = self.prefix_path + '.tree' # Command # command_args = [] if seq_type == 'nucl': command_args += ['-nt'] command_args += ['-gamma'] command_args += ['-out', new_path] command_args += [self.path] # Run it # sh.FastTree(*command_args) # Return # return FilePath(new_path)
python
def build_tree_fast(self, new_path=None, seq_type='nucl' or 'prot'): """Make a tree with FastTree. Names will be truncated however.""" # Check output # if new_path is None: new_path = self.prefix_path + '.tree' # Command # command_args = [] if seq_type == 'nucl': command_args += ['-nt'] command_args += ['-gamma'] command_args += ['-out', new_path] command_args += [self.path] # Run it # sh.FastTree(*command_args) # Return # return FilePath(new_path)
['def', 'build_tree_fast', '(', 'self', ',', 'new_path', '=', 'None', ',', 'seq_type', '=', "'nucl'", 'or', "'prot'", ')', ':', '# Check output #', 'if', 'new_path', 'is', 'None', ':', 'new_path', '=', 'self', '.', 'prefix_path', '+', "'.tree'", '# Command #', 'command_args', '=', '[', ']', 'if', 'seq_type', '==', "'nucl'", ':', 'command_args', '+=', '[', "'-nt'", ']', 'command_args', '+=', '[', "'-gamma'", ']', 'command_args', '+=', '[', "'-out'", ',', 'new_path', ']', 'command_args', '+=', '[', 'self', '.', 'path', ']', '# Run it #', 'sh', '.', 'FastTree', '(', '*', 'command_args', ')', '# Return #', 'return', 'FilePath', '(', 'new_path', ')']
Make a tree with FastTree. Names will be truncated however.
['Make', 'a', 'tree', 'with', 'FastTree', '.', 'Names', 'will', 'be', 'truncated', 'however', '.']
train
https://github.com/xapple/fasta/blob/a827c3138812d555203be45187ffae1277dd0d76/fasta/aligned.py#L111-L124
3,448
Addvilz/hemp
hemp/gitutils.py
clone
def clone(url, directory, single_branch=None): print_info('Cloning {0} to {1} {2}'.format( url, directory, '[full clone]' if single_branch is None else '[{0}]'.format(single_branch) )) # type: (str, str, str) -> Repo """ Clone a repository, optionally using shallow clone :rtype: Repo :param url: URL of the repository :param directory: Directory to clone to :param single_branch: branch to clone if shallow clone is preferred :return: GitPython repository object of the newly cloned repository """ args = { 'url': url, 'to_path': directory, 'progress': SimpleProgressPrinter(), 'recursive': True } if single_branch is not None: args['depth'] = 1 args['branch'] = single_branch args['single_branch'] = True return Repo.clone_from(**args)
python
def clone(url, directory, single_branch=None): print_info('Cloning {0} to {1} {2}'.format( url, directory, '[full clone]' if single_branch is None else '[{0}]'.format(single_branch) )) # type: (str, str, str) -> Repo """ Clone a repository, optionally using shallow clone :rtype: Repo :param url: URL of the repository :param directory: Directory to clone to :param single_branch: branch to clone if shallow clone is preferred :return: GitPython repository object of the newly cloned repository """ args = { 'url': url, 'to_path': directory, 'progress': SimpleProgressPrinter(), 'recursive': True } if single_branch is not None: args['depth'] = 1 args['branch'] = single_branch args['single_branch'] = True return Repo.clone_from(**args)
['def', 'clone', '(', 'url', ',', 'directory', ',', 'single_branch', '=', 'None', ')', ':', 'print_info', '(', "'Cloning {0} to {1} {2}'", '.', 'format', '(', 'url', ',', 'directory', ',', "'[full clone]'", 'if', 'single_branch', 'is', 'None', 'else', "'[{0}]'", '.', 'format', '(', 'single_branch', ')', ')', ')', '# type: (str, str, str) -> Repo', 'args', '=', '{', "'url'", ':', 'url', ',', "'to_path'", ':', 'directory', ',', "'progress'", ':', 'SimpleProgressPrinter', '(', ')', ',', "'recursive'", ':', 'True', '}', 'if', 'single_branch', 'is', 'not', 'None', ':', 'args', '[', "'depth'", ']', '=', '1', 'args', '[', "'branch'", ']', '=', 'single_branch', 'args', '[', "'single_branch'", ']', '=', 'True', 'return', 'Repo', '.', 'clone_from', '(', '*', '*', 'args', ')']
Clone a repository, optionally using shallow clone :rtype: Repo :param url: URL of the repository :param directory: Directory to clone to :param single_branch: branch to clone if shallow clone is preferred :return: GitPython repository object of the newly cloned repository
['Clone', 'a', 'repository', 'optionally', 'using', 'shallow', 'clone', ':', 'rtype', ':', 'Repo', ':', 'param', 'url', ':', 'URL', 'of', 'the', 'repository', ':', 'param', 'directory', ':', 'Directory', 'to', 'clone', 'to', ':', 'param', 'single_branch', ':', 'branch', 'to', 'clone', 'if', 'shallow', 'clone', 'is', 'preferred', ':', 'return', ':', 'GitPython', 'repository', 'object', 'of', 'the', 'newly', 'cloned', 'repository']
train
https://github.com/Addvilz/hemp/blob/80d189f15ba20068a61efc2591070c80549c9d06/hemp/gitutils.py#L36-L63
3,449
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/extensions_v1beta1_api.py
ExtensionsV1beta1Api.list_replica_set_for_all_namespaces
def list_replica_set_for_all_namespaces(self, **kwargs): # noqa: E501 """list_replica_set_for_all_namespaces # noqa: E501 list or watch objects of kind ReplicaSet # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_replica_set_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1beta1ReplicaSetList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_replica_set_for_all_namespaces_with_http_info(**kwargs) # noqa: E501 else: (data) = self.list_replica_set_for_all_namespaces_with_http_info(**kwargs) # noqa: E501 return data
python
def list_replica_set_for_all_namespaces(self, **kwargs): # noqa: E501 """list_replica_set_for_all_namespaces # noqa: E501 list or watch objects of kind ReplicaSet # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_replica_set_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1beta1ReplicaSetList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_replica_set_for_all_namespaces_with_http_info(**kwargs) # noqa: E501 else: (data) = self.list_replica_set_for_all_namespaces_with_http_info(**kwargs) # noqa: E501 return data
['def', 'list_replica_set_for_all_namespaces', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', '# noqa: E501', 'kwargs', '[', "'_return_http_data_only'", ']', '=', 'True', 'if', 'kwargs', '.', 'get', '(', "'async_req'", ')', ':', 'return', 'self', '.', 'list_replica_set_for_all_namespaces_with_http_info', '(', '*', '*', 'kwargs', ')', '# noqa: E501', 'else', ':', '(', 'data', ')', '=', 'self', '.', 'list_replica_set_for_all_namespaces_with_http_info', '(', '*', '*', 'kwargs', ')', '# noqa: E501', 'return', 'data']
list_replica_set_for_all_namespaces # noqa: E501 list or watch objects of kind ReplicaSet # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_replica_set_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1beta1ReplicaSetList If the method is called asynchronously, returns the request thread.
['list_replica_set_for_all_namespaces', '#', 'noqa', ':', 'E501']
train
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/extensions_v1beta1_api.py#L3759-L3787
3,450
ensime/ensime-vim
ensime_shared/editor.py
Editor.selection_pos
def selection_pos(self): """Return start and end positions of the visual selection respectively.""" buff = self._vim.current.buffer beg = buff.mark('<') end = buff.mark('>') return beg, end
python
def selection_pos(self): """Return start and end positions of the visual selection respectively.""" buff = self._vim.current.buffer beg = buff.mark('<') end = buff.mark('>') return beg, end
['def', 'selection_pos', '(', 'self', ')', ':', 'buff', '=', 'self', '.', '_vim', '.', 'current', '.', 'buffer', 'beg', '=', 'buff', '.', 'mark', '(', "'<'", ')', 'end', '=', 'buff', '.', 'mark', '(', "'>'", ')', 'return', 'beg', ',', 'end']
Return start and end positions of the visual selection respectively.
['Return', 'start', 'and', 'end', 'positions', 'of', 'the', 'visual', 'selection', 'respectively', '.']
train
https://github.com/ensime/ensime-vim/blob/caa734e84f002b25446c615706283a74edd4ecfe/ensime_shared/editor.py#L246-L251
3,451
trevisanj/a99
a99/textinterface.py
format_h4
def format_h4(s, format="text", indents=0): """ Encloses string in format text Args, Returns: see format_h1() """ _CHAR = "^" if format.startswith("text"): return format_underline(s, _CHAR, indents) elif format.startswith("markdown"): return ["#### {}".format(s)] elif format.startswith("rest"): return format_underline(s, _CHAR, 0)
python
def format_h4(s, format="text", indents=0): """ Encloses string in format text Args, Returns: see format_h1() """ _CHAR = "^" if format.startswith("text"): return format_underline(s, _CHAR, indents) elif format.startswith("markdown"): return ["#### {}".format(s)] elif format.startswith("rest"): return format_underline(s, _CHAR, 0)
['def', 'format_h4', '(', 's', ',', 'format', '=', '"text"', ',', 'indents', '=', '0', ')', ':', '_CHAR', '=', '"^"', 'if', 'format', '.', 'startswith', '(', '"text"', ')', ':', 'return', 'format_underline', '(', 's', ',', '_CHAR', ',', 'indents', ')', 'elif', 'format', '.', 'startswith', '(', '"markdown"', ')', ':', 'return', '[', '"#### {}"', '.', 'format', '(', 's', ')', ']', 'elif', 'format', '.', 'startswith', '(', '"rest"', ')', ':', 'return', 'format_underline', '(', 's', ',', '_CHAR', ',', '0', ')']
Encloses string in format text Args, Returns: see format_h1()
['Encloses', 'string', 'in', 'format', 'text', 'Args', 'Returns', ':', 'see', 'format_h1', '()']
train
https://github.com/trevisanj/a99/blob/193e6e3c9b3e4f4a0ba7eb3eece846fe7045c539/a99/textinterface.py#L104-L117
3,452
spacetelescope/stsci.tools
lib/stsci/tools/readgeis.py
stsci
def stsci(hdulist): """For STScI GEIS files, need to do extra steps.""" instrument = hdulist[0].header.get('INSTRUME', '') # Update extension header keywords if instrument in ("WFPC2", "FOC"): rootname = hdulist[0].header.get('ROOTNAME', '') filetype = hdulist[0].header.get('FILETYPE', '') for i in range(1, len(hdulist)): # Add name and extver attributes to match PyFITS data structure hdulist[i].name = filetype hdulist[i]._extver = i # Add extension keywords for this chip to extension hdulist[i].header['EXPNAME'] = (rootname, "9 character exposure identifier") hdulist[i].header['EXTVER']= (i, "extension version number") hdulist[i].header['EXTNAME'] = (filetype, "extension name") hdulist[i].header['INHERIT'] = (True, "inherit the primary header") hdulist[i].header['ROOTNAME'] = (rootname, "rootname of the observation set")
python
def stsci(hdulist): """For STScI GEIS files, need to do extra steps.""" instrument = hdulist[0].header.get('INSTRUME', '') # Update extension header keywords if instrument in ("WFPC2", "FOC"): rootname = hdulist[0].header.get('ROOTNAME', '') filetype = hdulist[0].header.get('FILETYPE', '') for i in range(1, len(hdulist)): # Add name and extver attributes to match PyFITS data structure hdulist[i].name = filetype hdulist[i]._extver = i # Add extension keywords for this chip to extension hdulist[i].header['EXPNAME'] = (rootname, "9 character exposure identifier") hdulist[i].header['EXTVER']= (i, "extension version number") hdulist[i].header['EXTNAME'] = (filetype, "extension name") hdulist[i].header['INHERIT'] = (True, "inherit the primary header") hdulist[i].header['ROOTNAME'] = (rootname, "rootname of the observation set")
['def', 'stsci', '(', 'hdulist', ')', ':', 'instrument', '=', 'hdulist', '[', '0', ']', '.', 'header', '.', 'get', '(', "'INSTRUME'", ',', "''", ')', '# Update extension header keywords', 'if', 'instrument', 'in', '(', '"WFPC2"', ',', '"FOC"', ')', ':', 'rootname', '=', 'hdulist', '[', '0', ']', '.', 'header', '.', 'get', '(', "'ROOTNAME'", ',', "''", ')', 'filetype', '=', 'hdulist', '[', '0', ']', '.', 'header', '.', 'get', '(', "'FILETYPE'", ',', "''", ')', 'for', 'i', 'in', 'range', '(', '1', ',', 'len', '(', 'hdulist', ')', ')', ':', '# Add name and extver attributes to match PyFITS data structure', 'hdulist', '[', 'i', ']', '.', 'name', '=', 'filetype', 'hdulist', '[', 'i', ']', '.', '_extver', '=', 'i', '# Add extension keywords for this chip to extension', 'hdulist', '[', 'i', ']', '.', 'header', '[', "'EXPNAME'", ']', '=', '(', 'rootname', ',', '"9 character exposure identifier"', ')', 'hdulist', '[', 'i', ']', '.', 'header', '[', "'EXTVER'", ']', '=', '(', 'i', ',', '"extension version number"', ')', 'hdulist', '[', 'i', ']', '.', 'header', '[', "'EXTNAME'", ']', '=', '(', 'filetype', ',', '"extension name"', ')', 'hdulist', '[', 'i', ']', '.', 'header', '[', "'INHERIT'", ']', '=', '(', 'True', ',', '"inherit the primary header"', ')', 'hdulist', '[', 'i', ']', '.', 'header', '[', "'ROOTNAME'", ']', '=', '(', 'rootname', ',', '"rootname of the observation set"', ')']
For STScI GEIS files, need to do extra steps.
['For', 'STScI', 'GEIS', 'files', 'need', 'to', 'do', 'extra', 'steps', '.']
train
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/readgeis.py#L79-L97
3,453
spyder-ide/spyder
spyder/plugins/editor/plugin.py
Editor.set_or_clear_breakpoint
def set_or_clear_breakpoint(self): """Set/Clear breakpoint""" editorstack = self.get_current_editorstack() if editorstack is not None: self.switch_to_plugin() editorstack.set_or_clear_breakpoint()
python
def set_or_clear_breakpoint(self): """Set/Clear breakpoint""" editorstack = self.get_current_editorstack() if editorstack is not None: self.switch_to_plugin() editorstack.set_or_clear_breakpoint()
['def', 'set_or_clear_breakpoint', '(', 'self', ')', ':', 'editorstack', '=', 'self', '.', 'get_current_editorstack', '(', ')', 'if', 'editorstack', 'is', 'not', 'None', ':', 'self', '.', 'switch_to_plugin', '(', ')', 'editorstack', '.', 'set_or_clear_breakpoint', '(', ')']
Set/Clear breakpoint
['Set', '/', 'Clear', 'breakpoint']
train
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/plugin.py#L2189-L2194
3,454
LCAV/pylocus
pylocus/lateration.py
get_lateration_parameters
def get_lateration_parameters(all_points, indices, index, edm, W=None): """ Get parameters relevant for lateration from full all_points, edm and W. """ if W is None: W = np.ones(edm.shape) # delete points that are not considered anchors anchors = np.delete(all_points, indices, axis=0) r2 = np.delete(edm[index, :], indices) w = np.delete(W[index, :], indices) # set w to zero where measurements are invalid if np.isnan(r2).any(): nan_measurements = np.where(np.isnan(r2))[0] r2[nan_measurements] = 0.0 w[nan_measurements] = 0.0 if np.isnan(w).any(): nan_measurements = np.where(np.isnan(w))[0] r2[nan_measurements] = 0.0 w[nan_measurements] = 0.0 # delete anchors where weight is zero to avoid ill-conditioning missing_anchors = np.where(w == 0.0)[0] w = np.asarray(np.delete(w, missing_anchors)) r2 = np.asarray(np.delete(r2, missing_anchors)) w.resize(edm.shape[0] - len(indices) - len(missing_anchors), 1) r2.resize(edm.shape[0] - len(indices) - len(missing_anchors), 1) anchors = np.delete(anchors, missing_anchors, axis=0) assert w.shape[0] == anchors.shape[0] assert np.isnan(w).any() == False assert np.isnan(r2).any() == False return anchors, w, r2
python
def get_lateration_parameters(all_points, indices, index, edm, W=None): """ Get parameters relevant for lateration from full all_points, edm and W. """ if W is None: W = np.ones(edm.shape) # delete points that are not considered anchors anchors = np.delete(all_points, indices, axis=0) r2 = np.delete(edm[index, :], indices) w = np.delete(W[index, :], indices) # set w to zero where measurements are invalid if np.isnan(r2).any(): nan_measurements = np.where(np.isnan(r2))[0] r2[nan_measurements] = 0.0 w[nan_measurements] = 0.0 if np.isnan(w).any(): nan_measurements = np.where(np.isnan(w))[0] r2[nan_measurements] = 0.0 w[nan_measurements] = 0.0 # delete anchors where weight is zero to avoid ill-conditioning missing_anchors = np.where(w == 0.0)[0] w = np.asarray(np.delete(w, missing_anchors)) r2 = np.asarray(np.delete(r2, missing_anchors)) w.resize(edm.shape[0] - len(indices) - len(missing_anchors), 1) r2.resize(edm.shape[0] - len(indices) - len(missing_anchors), 1) anchors = np.delete(anchors, missing_anchors, axis=0) assert w.shape[0] == anchors.shape[0] assert np.isnan(w).any() == False assert np.isnan(r2).any() == False return anchors, w, r2
['def', 'get_lateration_parameters', '(', 'all_points', ',', 'indices', ',', 'index', ',', 'edm', ',', 'W', '=', 'None', ')', ':', 'if', 'W', 'is', 'None', ':', 'W', '=', 'np', '.', 'ones', '(', 'edm', '.', 'shape', ')', '# delete points that are not considered anchors', 'anchors', '=', 'np', '.', 'delete', '(', 'all_points', ',', 'indices', ',', 'axis', '=', '0', ')', 'r2', '=', 'np', '.', 'delete', '(', 'edm', '[', 'index', ',', ':', ']', ',', 'indices', ')', 'w', '=', 'np', '.', 'delete', '(', 'W', '[', 'index', ',', ':', ']', ',', 'indices', ')', '# set w to zero where measurements are invalid', 'if', 'np', '.', 'isnan', '(', 'r2', ')', '.', 'any', '(', ')', ':', 'nan_measurements', '=', 'np', '.', 'where', '(', 'np', '.', 'isnan', '(', 'r2', ')', ')', '[', '0', ']', 'r2', '[', 'nan_measurements', ']', '=', '0.0', 'w', '[', 'nan_measurements', ']', '=', '0.0', 'if', 'np', '.', 'isnan', '(', 'w', ')', '.', 'any', '(', ')', ':', 'nan_measurements', '=', 'np', '.', 'where', '(', 'np', '.', 'isnan', '(', 'w', ')', ')', '[', '0', ']', 'r2', '[', 'nan_measurements', ']', '=', '0.0', 'w', '[', 'nan_measurements', ']', '=', '0.0', '# delete anchors where weight is zero to avoid ill-conditioning', 'missing_anchors', '=', 'np', '.', 'where', '(', 'w', '==', '0.0', ')', '[', '0', ']', 'w', '=', 'np', '.', 'asarray', '(', 'np', '.', 'delete', '(', 'w', ',', 'missing_anchors', ')', ')', 'r2', '=', 'np', '.', 'asarray', '(', 'np', '.', 'delete', '(', 'r2', ',', 'missing_anchors', ')', ')', 'w', '.', 'resize', '(', 'edm', '.', 'shape', '[', '0', ']', '-', 'len', '(', 'indices', ')', '-', 'len', '(', 'missing_anchors', ')', ',', '1', ')', 'r2', '.', 'resize', '(', 'edm', '.', 'shape', '[', '0', ']', '-', 'len', '(', 'indices', ')', '-', 'len', '(', 'missing_anchors', ')', ',', '1', ')', 'anchors', '=', 'np', '.', 'delete', '(', 'anchors', ',', 'missing_anchors', ',', 'axis', '=', '0', ')', 'assert', 'w', '.', 'shape', '[', '0', ']', '==', 'anchors', '.', 'shape', '[', '0', ']', 'assert', 'np', '.', 'isnan', '(', 'w', ')', '.', 'any', '(', ')', '==', 'False', 'assert', 'np', '.', 'isnan', '(', 'r2', ')', '.', 'any', '(', ')', '==', 'False', 'return', 'anchors', ',', 'w', ',', 'r2']
Get parameters relevant for lateration from full all_points, edm and W.
['Get', 'parameters', 'relevant', 'for', 'lateration', 'from', 'full', 'all_points', 'edm', 'and', 'W', '.']
train
https://github.com/LCAV/pylocus/blob/c56a38c251d8a435caf4641a8ae6027ecba2c8c6/pylocus/lateration.py#L11-L42
3,455
angr/angr
angr/analyses/variable_recovery/variable_recovery.py
VariableRecoveryState._addr_to_stack_offset
def _addr_to_stack_offset(self, addr): """ Convert an address to a stack offset. :param claripy.ast.Base addr: The address to convert from. :return: A stack offset if the addr comes from the stack pointer, or None if the address does not come from the stack pointer. """ def _parse(addr): if addr.op == '__add__': # __add__ might have multiple arguments parsed = [ _parse(arg) for arg in addr.args ] annotated = [ True for annotated, _ in parsed if annotated is True ] if len(annotated) != 1: # either nothing is annotated, or more than one element is annotated raise ValueError() return True, sum([ offset for _, offset in parsed ]) elif addr.op == '__sub__': # __sub__ might have multiple arguments parsed = [ _parse(arg) for arg in addr.args ] first_annotated, first_offset = parsed[0] if first_annotated is False: # the first argument is not annotated. we don't support it. raise ValueError() if any([ annotated for annotated, _ in parsed[1:] ]): # more than one argument is annotated. we don't support it. raise ValueError() return True, first_offset - sum([ offset for _, offset in parsed[1:] ]) else: anno = next(iter(anno for anno in addr.annotations if isinstance(anno, StackLocationAnnotation)), None) if anno is None: if addr.op == 'BVV': return False, addr._model_concrete.value raise ValueError() return True, anno.offset # find the annotated AST try: annotated, offset = _parse(addr) except ValueError: return None if not annotated: return None return self._to_signed(offset)
python
def _addr_to_stack_offset(self, addr): """ Convert an address to a stack offset. :param claripy.ast.Base addr: The address to convert from. :return: A stack offset if the addr comes from the stack pointer, or None if the address does not come from the stack pointer. """ def _parse(addr): if addr.op == '__add__': # __add__ might have multiple arguments parsed = [ _parse(arg) for arg in addr.args ] annotated = [ True for annotated, _ in parsed if annotated is True ] if len(annotated) != 1: # either nothing is annotated, or more than one element is annotated raise ValueError() return True, sum([ offset for _, offset in parsed ]) elif addr.op == '__sub__': # __sub__ might have multiple arguments parsed = [ _parse(arg) for arg in addr.args ] first_annotated, first_offset = parsed[0] if first_annotated is False: # the first argument is not annotated. we don't support it. raise ValueError() if any([ annotated for annotated, _ in parsed[1:] ]): # more than one argument is annotated. we don't support it. raise ValueError() return True, first_offset - sum([ offset for _, offset in parsed[1:] ]) else: anno = next(iter(anno for anno in addr.annotations if isinstance(anno, StackLocationAnnotation)), None) if anno is None: if addr.op == 'BVV': return False, addr._model_concrete.value raise ValueError() return True, anno.offset # find the annotated AST try: annotated, offset = _parse(addr) except ValueError: return None if not annotated: return None return self._to_signed(offset)
['def', '_addr_to_stack_offset', '(', 'self', ',', 'addr', ')', ':', 'def', '_parse', '(', 'addr', ')', ':', 'if', 'addr', '.', 'op', '==', "'__add__'", ':', '# __add__ might have multiple arguments', 'parsed', '=', '[', '_parse', '(', 'arg', ')', 'for', 'arg', 'in', 'addr', '.', 'args', ']', 'annotated', '=', '[', 'True', 'for', 'annotated', ',', '_', 'in', 'parsed', 'if', 'annotated', 'is', 'True', ']', 'if', 'len', '(', 'annotated', ')', '!=', '1', ':', '# either nothing is annotated, or more than one element is annotated', 'raise', 'ValueError', '(', ')', 'return', 'True', ',', 'sum', '(', '[', 'offset', 'for', '_', ',', 'offset', 'in', 'parsed', ']', ')', 'elif', 'addr', '.', 'op', '==', "'__sub__'", ':', '# __sub__ might have multiple arguments', 'parsed', '=', '[', '_parse', '(', 'arg', ')', 'for', 'arg', 'in', 'addr', '.', 'args', ']', 'first_annotated', ',', 'first_offset', '=', 'parsed', '[', '0', ']', 'if', 'first_annotated', 'is', 'False', ':', "# the first argument is not annotated. we don't support it.", 'raise', 'ValueError', '(', ')', 'if', 'any', '(', '[', 'annotated', 'for', 'annotated', ',', '_', 'in', 'parsed', '[', '1', ':', ']', ']', ')', ':', "# more than one argument is annotated. we don't support it.", 'raise', 'ValueError', '(', ')', 'return', 'True', ',', 'first_offset', '-', 'sum', '(', '[', 'offset', 'for', '_', ',', 'offset', 'in', 'parsed', '[', '1', ':', ']', ']', ')', 'else', ':', 'anno', '=', 'next', '(', 'iter', '(', 'anno', 'for', 'anno', 'in', 'addr', '.', 'annotations', 'if', 'isinstance', '(', 'anno', ',', 'StackLocationAnnotation', ')', ')', ',', 'None', ')', 'if', 'anno', 'is', 'None', ':', 'if', 'addr', '.', 'op', '==', "'BVV'", ':', 'return', 'False', ',', 'addr', '.', '_model_concrete', '.', 'value', 'raise', 'ValueError', '(', ')', 'return', 'True', ',', 'anno', '.', 'offset', '# find the annotated AST', 'try', ':', 'annotated', ',', 'offset', '=', '_parse', '(', 'addr', ')', 'except', 'ValueError', ':', 'return', 'None', 'if', 'not', 'annotated', ':', 'return', 'None', 'return', 'self', '.', '_to_signed', '(', 'offset', ')']
Convert an address to a stack offset. :param claripy.ast.Base addr: The address to convert from. :return: A stack offset if the addr comes from the stack pointer, or None if the address does not come from the stack pointer.
['Convert', 'an', 'address', 'to', 'a', 'stack', 'offset', '.']
train
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/variable_recovery/variable_recovery.py#L325-L372
3,456
UCSBarchlab/PyRTL
pyrtl/rtllib/multipliers.py
generalized_fma
def generalized_fma(mult_pairs, add_wires, signed=False, reducer=adders.wallace_reducer, adder_func=adders.kogge_stone): """Generated an opimitized fused multiply adder. A generalized FMA unit that multiplies each pair of numbers in mult_pairs, then adds the resulting numbers and and the values of the add wires all together to form an answer. This is faster than separate adders and multipliers because you avoid unnecessary adder structures for intermediate representations. :param mult_pairs: Either None (if there are no pairs to multiply) or a list of pairs of wires to multiply: [(mult1_1, mult1_2), ...] :param add_wires: Either None (if there are no individual items to add other than the mult_pairs), or a list of wires for adding on top of the result of the pair multiplication. :param Bool signed: Currently not supported (will be added in the future) The default will likely be changed to True, so if you want the smallest set of wires in the future, specify this as False :param reducer: (advanced) The tree reducer to use :param adder_func: (advanced) The adder to use to add the two results at the end :return WireVector: The result WireVector """ # first need to figure out the max length if mult_pairs: # Need to deal with the case when it is empty mult_max = max(len(m[0]) + len(m[1]) - 1 for m in mult_pairs) else: mult_max = 0 if add_wires: add_max = max(len(x) for x in add_wires) else: add_max = 0 longest_wire_len = max(add_max, mult_max) bits = [[] for i in range(longest_wire_len)] for mult_a, mult_b in mult_pairs: for i, a in enumerate(mult_a): for j, b in enumerate(mult_b): bits[i + j].append(a & b) for wire in add_wires: for bit_loc, bit in enumerate(wire): bits[bit_loc].append(bit) import math result_bitwidth = (longest_wire_len + int(math.ceil(math.log(len(add_wires) + len(mult_pairs), 2)))) return reducer(bits, result_bitwidth, adder_func)
python
def generalized_fma(mult_pairs, add_wires, signed=False, reducer=adders.wallace_reducer, adder_func=adders.kogge_stone): """Generated an opimitized fused multiply adder. A generalized FMA unit that multiplies each pair of numbers in mult_pairs, then adds the resulting numbers and and the values of the add wires all together to form an answer. This is faster than separate adders and multipliers because you avoid unnecessary adder structures for intermediate representations. :param mult_pairs: Either None (if there are no pairs to multiply) or a list of pairs of wires to multiply: [(mult1_1, mult1_2), ...] :param add_wires: Either None (if there are no individual items to add other than the mult_pairs), or a list of wires for adding on top of the result of the pair multiplication. :param Bool signed: Currently not supported (will be added in the future) The default will likely be changed to True, so if you want the smallest set of wires in the future, specify this as False :param reducer: (advanced) The tree reducer to use :param adder_func: (advanced) The adder to use to add the two results at the end :return WireVector: The result WireVector """ # first need to figure out the max length if mult_pairs: # Need to deal with the case when it is empty mult_max = max(len(m[0]) + len(m[1]) - 1 for m in mult_pairs) else: mult_max = 0 if add_wires: add_max = max(len(x) for x in add_wires) else: add_max = 0 longest_wire_len = max(add_max, mult_max) bits = [[] for i in range(longest_wire_len)] for mult_a, mult_b in mult_pairs: for i, a in enumerate(mult_a): for j, b in enumerate(mult_b): bits[i + j].append(a & b) for wire in add_wires: for bit_loc, bit in enumerate(wire): bits[bit_loc].append(bit) import math result_bitwidth = (longest_wire_len + int(math.ceil(math.log(len(add_wires) + len(mult_pairs), 2)))) return reducer(bits, result_bitwidth, adder_func)
['def', 'generalized_fma', '(', 'mult_pairs', ',', 'add_wires', ',', 'signed', '=', 'False', ',', 'reducer', '=', 'adders', '.', 'wallace_reducer', ',', 'adder_func', '=', 'adders', '.', 'kogge_stone', ')', ':', '# first need to figure out the max length', 'if', 'mult_pairs', ':', '# Need to deal with the case when it is empty', 'mult_max', '=', 'max', '(', 'len', '(', 'm', '[', '0', ']', ')', '+', 'len', '(', 'm', '[', '1', ']', ')', '-', '1', 'for', 'm', 'in', 'mult_pairs', ')', 'else', ':', 'mult_max', '=', '0', 'if', 'add_wires', ':', 'add_max', '=', 'max', '(', 'len', '(', 'x', ')', 'for', 'x', 'in', 'add_wires', ')', 'else', ':', 'add_max', '=', '0', 'longest_wire_len', '=', 'max', '(', 'add_max', ',', 'mult_max', ')', 'bits', '=', '[', '[', ']', 'for', 'i', 'in', 'range', '(', 'longest_wire_len', ')', ']', 'for', 'mult_a', ',', 'mult_b', 'in', 'mult_pairs', ':', 'for', 'i', ',', 'a', 'in', 'enumerate', '(', 'mult_a', ')', ':', 'for', 'j', ',', 'b', 'in', 'enumerate', '(', 'mult_b', ')', ':', 'bits', '[', 'i', '+', 'j', ']', '.', 'append', '(', 'a', '&', 'b', ')', 'for', 'wire', 'in', 'add_wires', ':', 'for', 'bit_loc', ',', 'bit', 'in', 'enumerate', '(', 'wire', ')', ':', 'bits', '[', 'bit_loc', ']', '.', 'append', '(', 'bit', ')', 'import', 'math', 'result_bitwidth', '=', '(', 'longest_wire_len', '+', 'int', '(', 'math', '.', 'ceil', '(', 'math', '.', 'log', '(', 'len', '(', 'add_wires', ')', '+', 'len', '(', 'mult_pairs', ')', ',', '2', ')', ')', ')', ')', 'return', 'reducer', '(', 'bits', ',', 'result_bitwidth', ',', 'adder_func', ')']
Generated an opimitized fused multiply adder. A generalized FMA unit that multiplies each pair of numbers in mult_pairs, then adds the resulting numbers and and the values of the add wires all together to form an answer. This is faster than separate adders and multipliers because you avoid unnecessary adder structures for intermediate representations. :param mult_pairs: Either None (if there are no pairs to multiply) or a list of pairs of wires to multiply: [(mult1_1, mult1_2), ...] :param add_wires: Either None (if there are no individual items to add other than the mult_pairs), or a list of wires for adding on top of the result of the pair multiplication. :param Bool signed: Currently not supported (will be added in the future) The default will likely be changed to True, so if you want the smallest set of wires in the future, specify this as False :param reducer: (advanced) The tree reducer to use :param adder_func: (advanced) The adder to use to add the two results at the end :return WireVector: The result WireVector
['Generated', 'an', 'opimitized', 'fused', 'multiply', 'adder', '.']
train
https://github.com/UCSBarchlab/PyRTL/blob/0988e5c9c10ededd5e1f58d5306603f9edf4b3e2/pyrtl/rtllib/multipliers.py#L208-L258
3,457
fake-name/ChromeController
ChromeController/Generator/Generated.py
ChromeRemoteDebugInterface.Storage_trackCacheStorageForOrigin
def Storage_trackCacheStorageForOrigin(self, origin): """ Function path: Storage.trackCacheStorageForOrigin Domain: Storage Method name: trackCacheStorageForOrigin Parameters: Required arguments: 'origin' (type: string) -> Security origin. No return value. Description: Registers origin to be notified when an update occurs to its cache storage list. """ assert isinstance(origin, (str,) ), "Argument 'origin' must be of type '['str']'. Received type: '%s'" % type( origin) subdom_funcs = self.synchronous_command('Storage.trackCacheStorageForOrigin', origin=origin) return subdom_funcs
python
def Storage_trackCacheStorageForOrigin(self, origin): """ Function path: Storage.trackCacheStorageForOrigin Domain: Storage Method name: trackCacheStorageForOrigin Parameters: Required arguments: 'origin' (type: string) -> Security origin. No return value. Description: Registers origin to be notified when an update occurs to its cache storage list. """ assert isinstance(origin, (str,) ), "Argument 'origin' must be of type '['str']'. Received type: '%s'" % type( origin) subdom_funcs = self.synchronous_command('Storage.trackCacheStorageForOrigin', origin=origin) return subdom_funcs
['def', 'Storage_trackCacheStorageForOrigin', '(', 'self', ',', 'origin', ')', ':', 'assert', 'isinstance', '(', 'origin', ',', '(', 'str', ',', ')', ')', ',', '"Argument \'origin\' must be of type \'[\'str\']\'. Received type: \'%s\'"', '%', 'type', '(', 'origin', ')', 'subdom_funcs', '=', 'self', '.', 'synchronous_command', '(', "'Storage.trackCacheStorageForOrigin'", ',', 'origin', '=', 'origin', ')', 'return', 'subdom_funcs']
Function path: Storage.trackCacheStorageForOrigin Domain: Storage Method name: trackCacheStorageForOrigin Parameters: Required arguments: 'origin' (type: string) -> Security origin. No return value. Description: Registers origin to be notified when an update occurs to its cache storage list.
['Function', 'path', ':', 'Storage', '.', 'trackCacheStorageForOrigin', 'Domain', ':', 'Storage', 'Method', 'name', ':', 'trackCacheStorageForOrigin', 'Parameters', ':', 'Required', 'arguments', ':', 'origin', '(', 'type', ':', 'string', ')', '-', '>', 'Security', 'origin', '.', 'No', 'return', 'value', '.', 'Description', ':', 'Registers', 'origin', 'to', 'be', 'notified', 'when', 'an', 'update', 'occurs', 'to', 'its', 'cache', 'storage', 'list', '.']
train
https://github.com/fake-name/ChromeController/blob/914dd136184e8f1165c7aa6ef30418aaf10c61f0/ChromeController/Generator/Generated.py#L6262-L6280
3,458
quantopian/metautils
metautils/compat.py
compose
def compose(*fs): """ Compose functions together in order: compose(f, g, h) = lambda n: f(g(h(n))) """ # Pull the iterator out into a tuple so we can call `composed` # more than once. rs = tuple(reversed(fs)) def composed(n): return reduce(lambda a, b: b(a), rs, n) # Attempt to make the function look pretty with # a fresh docstring and name. try: composed.__doc__ = 'lambda n: ' + _composed_doc(fs) except AttributeError: # One of our callables does not have a `__name__`, whatever. pass else: # We already know that for all `f` in `fs`, there exists `f.__name__` composed.__name__ = '_of_'.join(f.__name__ for f in fs) return composed
python
def compose(*fs): """ Compose functions together in order: compose(f, g, h) = lambda n: f(g(h(n))) """ # Pull the iterator out into a tuple so we can call `composed` # more than once. rs = tuple(reversed(fs)) def composed(n): return reduce(lambda a, b: b(a), rs, n) # Attempt to make the function look pretty with # a fresh docstring and name. try: composed.__doc__ = 'lambda n: ' + _composed_doc(fs) except AttributeError: # One of our callables does not have a `__name__`, whatever. pass else: # We already know that for all `f` in `fs`, there exists `f.__name__` composed.__name__ = '_of_'.join(f.__name__ for f in fs) return composed
['def', 'compose', '(', '*', 'fs', ')', ':', '# Pull the iterator out into a tuple so we can call `composed`', '# more than once.', 'rs', '=', 'tuple', '(', 'reversed', '(', 'fs', ')', ')', 'def', 'composed', '(', 'n', ')', ':', 'return', 'reduce', '(', 'lambda', 'a', ',', 'b', ':', 'b', '(', 'a', ')', ',', 'rs', ',', 'n', ')', '# Attempt to make the function look pretty with', '# a fresh docstring and name.', 'try', ':', 'composed', '.', '__doc__', '=', "'lambda n: '", '+', '_composed_doc', '(', 'fs', ')', 'except', 'AttributeError', ':', '# One of our callables does not have a `__name__`, whatever.', 'pass', 'else', ':', '# We already know that for all `f` in `fs`, there exists `f.__name__`', 'composed', '.', '__name__', '=', "'_of_'", '.', 'join', '(', 'f', '.', '__name__', 'for', 'f', 'in', 'fs', ')', 'return', 'composed']
Compose functions together in order: compose(f, g, h) = lambda n: f(g(h(n)))
['Compose', 'functions', 'together', 'in', 'order', ':']
train
https://github.com/quantopian/metautils/blob/10e11c5bd8bd7ded52b97261f61c3186607bd617/metautils/compat.py#L72-L96
3,459
janpipek/physt
physt/plotting/vega.py
_create_axes
def _create_axes(hist: HistogramBase, vega: dict, kwargs: dict): """Create axes in the figure.""" xlabel = kwargs.pop("xlabel", hist.axis_names[0]) ylabel = kwargs.pop("ylabel", hist.axis_names[1] if len(hist.axis_names) >= 2 else None) vega["axes"] = [ {"orient": "bottom", "scale": "xscale", "title": xlabel}, {"orient": "left", "scale": "yscale", "title": ylabel} ]
python
def _create_axes(hist: HistogramBase, vega: dict, kwargs: dict): """Create axes in the figure.""" xlabel = kwargs.pop("xlabel", hist.axis_names[0]) ylabel = kwargs.pop("ylabel", hist.axis_names[1] if len(hist.axis_names) >= 2 else None) vega["axes"] = [ {"orient": "bottom", "scale": "xscale", "title": xlabel}, {"orient": "left", "scale": "yscale", "title": ylabel} ]
['def', '_create_axes', '(', 'hist', ':', 'HistogramBase', ',', 'vega', ':', 'dict', ',', 'kwargs', ':', 'dict', ')', ':', 'xlabel', '=', 'kwargs', '.', 'pop', '(', '"xlabel"', ',', 'hist', '.', 'axis_names', '[', '0', ']', ')', 'ylabel', '=', 'kwargs', '.', 'pop', '(', '"ylabel"', ',', 'hist', '.', 'axis_names', '[', '1', ']', 'if', 'len', '(', 'hist', '.', 'axis_names', ')', '>=', '2', 'else', 'None', ')', 'vega', '[', '"axes"', ']', '=', '[', '{', '"orient"', ':', '"bottom"', ',', '"scale"', ':', '"xscale"', ',', '"title"', ':', 'xlabel', '}', ',', '{', '"orient"', ':', '"left"', ',', '"scale"', ':', '"yscale"', ',', '"title"', ':', 'ylabel', '}', ']']
Create axes in the figure.
['Create', 'axes', 'in', 'the', 'figure', '.']
train
https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/plotting/vega.py#L673-L680
3,460
caffeinehit/django-oauth2-provider
provider/oauth2/models.py
AccessToken.get_expire_delta
def get_expire_delta(self, reference=None): """ Return the number of seconds until this token expires. """ if reference is None: reference = now() expiration = self.expires if timezone: if timezone.is_aware(reference) and timezone.is_naive(expiration): # MySQL doesn't support timezone for datetime fields # so we assume that the date was stored in the UTC timezone expiration = timezone.make_aware(expiration, timezone.utc) elif timezone.is_naive(reference) and timezone.is_aware(expiration): reference = timezone.make_aware(reference, timezone.utc) timedelta = expiration - reference return timedelta.days*86400 + timedelta.seconds
python
def get_expire_delta(self, reference=None): """ Return the number of seconds until this token expires. """ if reference is None: reference = now() expiration = self.expires if timezone: if timezone.is_aware(reference) and timezone.is_naive(expiration): # MySQL doesn't support timezone for datetime fields # so we assume that the date was stored in the UTC timezone expiration = timezone.make_aware(expiration, timezone.utc) elif timezone.is_naive(reference) and timezone.is_aware(expiration): reference = timezone.make_aware(reference, timezone.utc) timedelta = expiration - reference return timedelta.days*86400 + timedelta.seconds
['def', 'get_expire_delta', '(', 'self', ',', 'reference', '=', 'None', ')', ':', 'if', 'reference', 'is', 'None', ':', 'reference', '=', 'now', '(', ')', 'expiration', '=', 'self', '.', 'expires', 'if', 'timezone', ':', 'if', 'timezone', '.', 'is_aware', '(', 'reference', ')', 'and', 'timezone', '.', 'is_naive', '(', 'expiration', ')', ':', "# MySQL doesn't support timezone for datetime fields", '# so we assume that the date was stored in the UTC timezone', 'expiration', '=', 'timezone', '.', 'make_aware', '(', 'expiration', ',', 'timezone', '.', 'utc', ')', 'elif', 'timezone', '.', 'is_naive', '(', 'reference', ')', 'and', 'timezone', '.', 'is_aware', '(', 'expiration', ')', ':', 'reference', '=', 'timezone', '.', 'make_aware', '(', 'reference', ',', 'timezone', '.', 'utc', ')', 'timedelta', '=', 'expiration', '-', 'reference', 'return', 'timedelta', '.', 'days', '*', '86400', '+', 'timedelta', '.', 'seconds']
Return the number of seconds until this token expires.
['Return', 'the', 'number', 'of', 'seconds', 'until', 'this', 'token', 'expires', '.']
train
https://github.com/caffeinehit/django-oauth2-provider/blob/6b5bc0d3ad706d2aaa47fa476f38406cddd01236/provider/oauth2/models.py#L149-L166
3,461
NASA-AMMOS/AIT-Core
ait/core/api.py
GeventDeque.extend
def extend(self, iterable): """Extend the right side of this GeventDeque by appending elements from the iterable argument. """ self._deque.extend(iterable) if len(self._deque) > 0: self.notEmpty.set()
python
def extend(self, iterable): """Extend the right side of this GeventDeque by appending elements from the iterable argument. """ self._deque.extend(iterable) if len(self._deque) > 0: self.notEmpty.set()
['def', 'extend', '(', 'self', ',', 'iterable', ')', ':', 'self', '.', '_deque', '.', 'extend', '(', 'iterable', ')', 'if', 'len', '(', 'self', '.', '_deque', ')', '>', '0', ':', 'self', '.', 'notEmpty', '.', 'set', '(', ')']
Extend the right side of this GeventDeque by appending elements from the iterable argument.
['Extend', 'the', 'right', 'side', 'of', 'this', 'GeventDeque', 'by', 'appending', 'elements', 'from', 'the', 'iterable', 'argument', '.']
train
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/api.py#L299-L305
3,462
ratcave/ratcave
ratcave/mesh.py
Mesh._fill_vao
def _fill_vao(self): """Put array location in VAO for shader in same order as arrays given to Mesh.""" with self.vao: self.vbos = [] for loc, verts in enumerate(self.arrays): vbo = VBO(verts) self.vbos.append(vbo) self.vao.assign_vertex_attrib_location(vbo, loc)
python
def _fill_vao(self): """Put array location in VAO for shader in same order as arrays given to Mesh.""" with self.vao: self.vbos = [] for loc, verts in enumerate(self.arrays): vbo = VBO(verts) self.vbos.append(vbo) self.vao.assign_vertex_attrib_location(vbo, loc)
['def', '_fill_vao', '(', 'self', ')', ':', 'with', 'self', '.', 'vao', ':', 'self', '.', 'vbos', '=', '[', ']', 'for', 'loc', ',', 'verts', 'in', 'enumerate', '(', 'self', '.', 'arrays', ')', ':', 'vbo', '=', 'VBO', '(', 'verts', ')', 'self', '.', 'vbos', '.', 'append', '(', 'vbo', ')', 'self', '.', 'vao', '.', 'assign_vertex_attrib_location', '(', 'vbo', ',', 'loc', ')']
Put array location in VAO for shader in same order as arrays given to Mesh.
['Put', 'array', 'location', 'in', 'VAO', 'for', 'shader', 'in', 'same', 'order', 'as', 'arrays', 'given', 'to', 'Mesh', '.']
train
https://github.com/ratcave/ratcave/blob/e3862cdaba100ac2c6c78c08c4b09638e0c88fd4/ratcave/mesh.py#L192-L199
3,463
zeroSteiner/smoke-zephyr
smoke_zephyr/configuration.py
Configuration.get_missing
def get_missing(self, verify_file): """ Use a verification configuration which has a list of required options and their respective types. This information is used to identify missing and incompatible options in the loaded configuration. :param str verify_file: The file to load for verification data. :return: A dictionary of missing and incompatible settings. :rtype: dict """ vconf = Configuration(verify_file) missing = {} for setting, setting_type in vconf.get('settings').items(): if not self.has_option(setting): missing['missing'] = missing.get('settings', []) missing['missing'].append(setting) elif not type(self.get(setting)).__name__ == setting_type: missing['incompatible'] = missing.get('incompatible', []) missing['incompatible'].append((setting, setting_type)) return missing
python
def get_missing(self, verify_file): """ Use a verification configuration which has a list of required options and their respective types. This information is used to identify missing and incompatible options in the loaded configuration. :param str verify_file: The file to load for verification data. :return: A dictionary of missing and incompatible settings. :rtype: dict """ vconf = Configuration(verify_file) missing = {} for setting, setting_type in vconf.get('settings').items(): if not self.has_option(setting): missing['missing'] = missing.get('settings', []) missing['missing'].append(setting) elif not type(self.get(setting)).__name__ == setting_type: missing['incompatible'] = missing.get('incompatible', []) missing['incompatible'].append((setting, setting_type)) return missing
['def', 'get_missing', '(', 'self', ',', 'verify_file', ')', ':', 'vconf', '=', 'Configuration', '(', 'verify_file', ')', 'missing', '=', '{', '}', 'for', 'setting', ',', 'setting_type', 'in', 'vconf', '.', 'get', '(', "'settings'", ')', '.', 'items', '(', ')', ':', 'if', 'not', 'self', '.', 'has_option', '(', 'setting', ')', ':', 'missing', '[', "'missing'", ']', '=', 'missing', '.', 'get', '(', "'settings'", ',', '[', ']', ')', 'missing', '[', "'missing'", ']', '.', 'append', '(', 'setting', ')', 'elif', 'not', 'type', '(', 'self', '.', 'get', '(', 'setting', ')', ')', '.', '__name__', '==', 'setting_type', ':', 'missing', '[', "'incompatible'", ']', '=', 'missing', '.', 'get', '(', "'incompatible'", ',', '[', ']', ')', 'missing', '[', "'incompatible'", ']', '.', 'append', '(', '(', 'setting', ',', 'setting_type', ')', ')', 'return', 'missing']
Use a verification configuration which has a list of required options and their respective types. This information is used to identify missing and incompatible options in the loaded configuration. :param str verify_file: The file to load for verification data. :return: A dictionary of missing and incompatible settings. :rtype: dict
['Use', 'a', 'verification', 'configuration', 'which', 'has', 'a', 'list', 'of', 'required', 'options', 'and', 'their', 'respective', 'types', '.', 'This', 'information', 'is', 'used', 'to', 'identify', 'missing', 'and', 'incompatible', 'options', 'in', 'the', 'loaded', 'configuration', '.']
train
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/configuration.py#L197-L216
3,464
juju/charm-helpers
charmhelpers/core/hookenv.py
expected_peer_units
def expected_peer_units(): """Get a generator for units we expect to join peer relation based on goal-state. The local unit is excluded from the result to make it easy to gauge completion of all peers joining the relation with existing hook tools. Example usage: log('peer {} of {} joined peer relation' .format(len(related_units()), len(list(expected_peer_units())))) This function will raise NotImplementedError if used with juju versions without goal-state support. :returns: iterator :rtype: types.GeneratorType :raises: NotImplementedError """ if not has_juju_version("2.4.0"): # goal-state first appeared in 2.4.0. raise NotImplementedError("goal-state") _goal_state = goal_state() return (key for key in _goal_state['units'] if '/' in key and key != local_unit())
python
def expected_peer_units(): """Get a generator for units we expect to join peer relation based on goal-state. The local unit is excluded from the result to make it easy to gauge completion of all peers joining the relation with existing hook tools. Example usage: log('peer {} of {} joined peer relation' .format(len(related_units()), len(list(expected_peer_units())))) This function will raise NotImplementedError if used with juju versions without goal-state support. :returns: iterator :rtype: types.GeneratorType :raises: NotImplementedError """ if not has_juju_version("2.4.0"): # goal-state first appeared in 2.4.0. raise NotImplementedError("goal-state") _goal_state = goal_state() return (key for key in _goal_state['units'] if '/' in key and key != local_unit())
['def', 'expected_peer_units', '(', ')', ':', 'if', 'not', 'has_juju_version', '(', '"2.4.0"', ')', ':', '# goal-state first appeared in 2.4.0.', 'raise', 'NotImplementedError', '(', '"goal-state"', ')', '_goal_state', '=', 'goal_state', '(', ')', 'return', '(', 'key', 'for', 'key', 'in', '_goal_state', '[', "'units'", ']', 'if', "'/'", 'in', 'key', 'and', 'key', '!=', 'local_unit', '(', ')', ')']
Get a generator for units we expect to join peer relation based on goal-state. The local unit is excluded from the result to make it easy to gauge completion of all peers joining the relation with existing hook tools. Example usage: log('peer {} of {} joined peer relation' .format(len(related_units()), len(list(expected_peer_units())))) This function will raise NotImplementedError if used with juju versions without goal-state support. :returns: iterator :rtype: types.GeneratorType :raises: NotImplementedError
['Get', 'a', 'generator', 'for', 'units', 'we', 'expect', 'to', 'join', 'peer', 'relation', 'based', 'on', 'goal', '-', 'state', '.']
train
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/core/hookenv.py#L518-L542
3,465
Azure/blobxfer
blobxfer/operations/azure/__init__.py
StorageAccount._credential_allows_container_list
def _credential_allows_container_list(self): # type: (StorageAccount) -> bool """Check if container list is allowed :param StorageAccount self: this :rtype: bool :return: if container list is allowed """ if self.is_sas: sasparts = self.key.split('&') caccess = self.can_create_containers # search for container signed resource for service level sas if not caccess: for part in sasparts: tmp = part.split('=') if tmp[0] == 'sr': caccess = 'c' in tmp[1] or 's' in tmp[1] break elif tmp[0] == 'si': # assume sas policies allow container list return True # search for list permission if caccess: for part in sasparts: tmp = part.split('=') if tmp[0] == 'sp': return 'l' in tmp[1] # sas doesn't allow container level list return False else: # storage account key always allows container list return True
python
def _credential_allows_container_list(self): # type: (StorageAccount) -> bool """Check if container list is allowed :param StorageAccount self: this :rtype: bool :return: if container list is allowed """ if self.is_sas: sasparts = self.key.split('&') caccess = self.can_create_containers # search for container signed resource for service level sas if not caccess: for part in sasparts: tmp = part.split('=') if tmp[0] == 'sr': caccess = 'c' in tmp[1] or 's' in tmp[1] break elif tmp[0] == 'si': # assume sas policies allow container list return True # search for list permission if caccess: for part in sasparts: tmp = part.split('=') if tmp[0] == 'sp': return 'l' in tmp[1] # sas doesn't allow container level list return False else: # storage account key always allows container list return True
['def', '_credential_allows_container_list', '(', 'self', ')', ':', '# type: (StorageAccount) -> bool', 'if', 'self', '.', 'is_sas', ':', 'sasparts', '=', 'self', '.', 'key', '.', 'split', '(', "'&'", ')', 'caccess', '=', 'self', '.', 'can_create_containers', '# search for container signed resource for service level sas', 'if', 'not', 'caccess', ':', 'for', 'part', 'in', 'sasparts', ':', 'tmp', '=', 'part', '.', 'split', '(', "'='", ')', 'if', 'tmp', '[', '0', ']', '==', "'sr'", ':', 'caccess', '=', "'c'", 'in', 'tmp', '[', '1', ']', 'or', "'s'", 'in', 'tmp', '[', '1', ']', 'break', 'elif', 'tmp', '[', '0', ']', '==', "'si'", ':', '# assume sas policies allow container list', 'return', 'True', '# search for list permission', 'if', 'caccess', ':', 'for', 'part', 'in', 'sasparts', ':', 'tmp', '=', 'part', '.', 'split', '(', "'='", ')', 'if', 'tmp', '[', '0', ']', '==', "'sp'", ':', 'return', "'l'", 'in', 'tmp', '[', '1', ']', "# sas doesn't allow container level list", 'return', 'False', 'else', ':', '# storage account key always allows container list', 'return', 'True']
Check if container list is allowed :param StorageAccount self: this :rtype: bool :return: if container list is allowed
['Check', 'if', 'container', 'list', 'is', 'allowed', ':', 'param', 'StorageAccount', 'self', ':', 'this', ':', 'rtype', ':', 'bool', ':', 'return', ':', 'if', 'container', 'list', 'is', 'allowed']
train
https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/operations/azure/__init__.py#L252-L282
3,466
linuxsoftware/ls.joyous
ls/joyous/models/events.py
PostponementPage.postponed_from_when
def postponed_from_when(self): """ A string describing when the event was postponed from (in the local time zone). """ what = self.what if what: return _("{what} from {when}").format(what=what, when=self.cancellationpage.when)
python
def postponed_from_when(self): """ A string describing when the event was postponed from (in the local time zone). """ what = self.what if what: return _("{what} from {when}").format(what=what, when=self.cancellationpage.when)
['def', 'postponed_from_when', '(', 'self', ')', ':', 'what', '=', 'self', '.', 'what', 'if', 'what', ':', 'return', '_', '(', '"{what} from {when}"', ')', '.', 'format', '(', 'what', '=', 'what', ',', 'when', '=', 'self', '.', 'cancellationpage', '.', 'when', ')']
A string describing when the event was postponed from (in the local time zone).
['A', 'string', 'describing', 'when', 'the', 'event', 'was', 'postponed', 'from', '(', 'in', 'the', 'local', 'time', 'zone', ')', '.']
train
https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/models/events.py#L1740-L1747
3,467
quintusdias/glymur
glymur/codestream.py
Codestream._parse_ppt_segment
def _parse_ppt_segment(self, fptr): """Parse the PPT segment. The packet headers are not parsed, i.e. they remain "uninterpreted" raw data beffers. Parameters ---------- fptr : file object The file to parse. Returns ------- PPTSegment The current PPT segment. """ offset = fptr.tell() - 2 read_buffer = fptr.read(3) length, zppt = struct.unpack('>HB', read_buffer) length = length zppt = zppt numbytes = length - 3 ippt = fptr.read(numbytes) return PPTsegment(zppt, ippt, length, offset)
python
def _parse_ppt_segment(self, fptr): """Parse the PPT segment. The packet headers are not parsed, i.e. they remain "uninterpreted" raw data beffers. Parameters ---------- fptr : file object The file to parse. Returns ------- PPTSegment The current PPT segment. """ offset = fptr.tell() - 2 read_buffer = fptr.read(3) length, zppt = struct.unpack('>HB', read_buffer) length = length zppt = zppt numbytes = length - 3 ippt = fptr.read(numbytes) return PPTsegment(zppt, ippt, length, offset)
['def', '_parse_ppt_segment', '(', 'self', ',', 'fptr', ')', ':', 'offset', '=', 'fptr', '.', 'tell', '(', ')', '-', '2', 'read_buffer', '=', 'fptr', '.', 'read', '(', '3', ')', 'length', ',', 'zppt', '=', 'struct', '.', 'unpack', '(', "'>HB'", ',', 'read_buffer', ')', 'length', '=', 'length', 'zppt', '=', 'zppt', 'numbytes', '=', 'length', '-', '3', 'ippt', '=', 'fptr', '.', 'read', '(', 'numbytes', ')', 'return', 'PPTsegment', '(', 'zppt', ',', 'ippt', ',', 'length', ',', 'offset', ')']
Parse the PPT segment. The packet headers are not parsed, i.e. they remain "uninterpreted" raw data beffers. Parameters ---------- fptr : file object The file to parse. Returns ------- PPTSegment The current PPT segment.
['Parse', 'the', 'PPT', 'segment', '.']
train
https://github.com/quintusdias/glymur/blob/8b8fb091130fff00f1028dc82219e69e3f9baf6d/glymur/codestream.py#L541-L567
3,468
mkoura/dump2polarion
dump2polarion/dumper_cli.py
process_args
def process_args(args): """Processes passed arguments.""" passed_args = args if isinstance(args, argparse.Namespace): passed_args = vars(passed_args) elif hasattr(args, "to_dict"): passed_args = passed_args.to_dict() return Box(passed_args, frozen_box=True, default_box=True)
python
def process_args(args): """Processes passed arguments.""" passed_args = args if isinstance(args, argparse.Namespace): passed_args = vars(passed_args) elif hasattr(args, "to_dict"): passed_args = passed_args.to_dict() return Box(passed_args, frozen_box=True, default_box=True)
['def', 'process_args', '(', 'args', ')', ':', 'passed_args', '=', 'args', 'if', 'isinstance', '(', 'args', ',', 'argparse', '.', 'Namespace', ')', ':', 'passed_args', '=', 'vars', '(', 'passed_args', ')', 'elif', 'hasattr', '(', 'args', ',', '"to_dict"', ')', ':', 'passed_args', '=', 'passed_args', '.', 'to_dict', '(', ')', 'return', 'Box', '(', 'passed_args', ',', 'frozen_box', '=', 'True', ',', 'default_box', '=', 'True', ')']
Processes passed arguments.
['Processes', 'passed', 'arguments', '.']
train
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/dumper_cli.py#L79-L87
3,469
phoebe-project/phoebe2
phoebe/backend/universe.py
System.populate_observables
def populate_observables(self, time, kinds, datasets, ignore_effects=False): """ TODO: add documentation ignore_effects: whether to ignore reflection and features (useful for computing luminosities) """ if self.irrad_method is not 'none' and not ignore_effects: # TODO: only for kinds that require intensities (i.e. not orbit or # dynamical RVs, etc) self.handle_reflection() for kind, dataset in zip(kinds, datasets): for starref, body in self.items(): body.populate_observable(time, kind, dataset)
python
def populate_observables(self, time, kinds, datasets, ignore_effects=False): """ TODO: add documentation ignore_effects: whether to ignore reflection and features (useful for computing luminosities) """ if self.irrad_method is not 'none' and not ignore_effects: # TODO: only for kinds that require intensities (i.e. not orbit or # dynamical RVs, etc) self.handle_reflection() for kind, dataset in zip(kinds, datasets): for starref, body in self.items(): body.populate_observable(time, kind, dataset)
['def', 'populate_observables', '(', 'self', ',', 'time', ',', 'kinds', ',', 'datasets', ',', 'ignore_effects', '=', 'False', ')', ':', 'if', 'self', '.', 'irrad_method', 'is', 'not', "'none'", 'and', 'not', 'ignore_effects', ':', '# TODO: only for kinds that require intensities (i.e. not orbit or', '# dynamical RVs, etc)', 'self', '.', 'handle_reflection', '(', ')', 'for', 'kind', ',', 'dataset', 'in', 'zip', '(', 'kinds', ',', 'datasets', ')', ':', 'for', 'starref', ',', 'body', 'in', 'self', '.', 'items', '(', ')', ':', 'body', '.', 'populate_observable', '(', 'time', ',', 'kind', ',', 'dataset', ')']
TODO: add documentation ignore_effects: whether to ignore reflection and features (useful for computing luminosities)
['TODO', ':', 'add', 'documentation']
train
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/backend/universe.py#L301-L316
3,470
ev3dev/ev3dev-lang-python
ev3dev2/sound.py
Sound.play_song
def play_song(self, song, tempo=120, delay=0.05): """ Plays a song provided as a list of tuples containing the note name and its value using music conventional notation instead of numerical values for frequency and duration. It supports symbolic notes (e.g. ``A4``, ``D#3``, ``Gb5``) and durations (e.g. ``q``, ``h``). For an exhaustive list of accepted note symbols and values, have a look at the ``_NOTE_FREQUENCIES`` and ``_NOTE_VALUES`` private dictionaries in the source code. The value can be suffixed by modifiers: - a *divider* introduced by a ``/`` to obtain triplets for instance (e.g. ``q/3`` for a triplet of eight note) - a *multiplier* introduced by ``*`` (e.g. ``*1.5`` is a dotted note). Shortcuts exist for common modifiers: - ``3`` produces a triplet member note. For instance `e3` gives a triplet of eight notes, i.e. 3 eight notes in the duration of a single quarter. You must ensure that 3 triplets notes are defined in sequence to match the count, otherwise the result will not be the expected one. - ``.`` produces a dotted note, i.e. which duration is one and a half the base one. Double dots are not currently supported. Example:: >>> # A long time ago in a galaxy far, >>> # far away... >>> Sound.play_song(( >>> ('D4', 'e3'), # intro anacrouse >>> ('D4', 'e3'), >>> ('D4', 'e3'), >>> ('G4', 'h'), # meas 1 >>> ('D5', 'h'), >>> ('C5', 'e3'), # meas 2 >>> ('B4', 'e3'), >>> ('A4', 'e3'), >>> ('G5', 'h'), >>> ('D5', 'q'), >>> ('C5', 'e3'), # meas 3 >>> ('B4', 'e3'), >>> ('A4', 'e3'), >>> ('G5', 'h'), >>> ('D5', 'q'), >>> ('C5', 'e3'), # meas 4 >>> ('B4', 'e3'), >>> ('C5', 'e3'), >>> ('A4', 'h.'), >>> )) .. important:: Only 4/4 signature songs are supported with respect to note durations. :param iterable[tuple(string, string)] song: the song :param int tempo: the song tempo, given in quarters per minute :param float delay: delay between notes (in seconds) :return: the spawn subprocess from ``subprocess.Popen`` :raises ValueError: if invalid note in song or invalid play parameters """ if tempo <= 0: raise ValueError('invalid tempo (%s)' % tempo) if delay < 0: raise ValueError('invalid delay (%s)' % delay) delay_ms = int(delay * 1000) meas_duration_ms = 60000 / tempo * 4 # we only support 4/4 bars, hence "* 4" def beep_args(note, value): """ Builds the arguments string for producing a beep matching the requested note and value. Args: note (str): the note note and octave value (str): the note value expression Returns: str: the arguments to be passed to the beep command """ freq = self._NOTE_FREQUENCIES.get(note.upper(), self._NOTE_FREQUENCIES[note]) if '/' in value: base, factor = value.split('/') duration_ms = meas_duration_ms * self._NOTE_VALUES[base] / float(factor) elif '*' in value: base, factor = value.split('*') duration_ms = meas_duration_ms * self._NOTE_VALUES[base] * float(factor) elif value.endswith('.'): base = value[:-1] duration_ms = meas_duration_ms * self._NOTE_VALUES[base] * 1.5 elif value.endswith('3'): base = value[:-1] duration_ms = meas_duration_ms * self._NOTE_VALUES[base] * 2 / 3 else: duration_ms = meas_duration_ms * self._NOTE_VALUES[value] return '-f %d -l %d -D %d' % (freq, duration_ms, delay_ms) try: return self.beep(' -n '.join( [beep_args(note, value) for (note, value) in song] )) except KeyError as e: raise ValueError('invalid note (%s)' % e)
python
def play_song(self, song, tempo=120, delay=0.05): """ Plays a song provided as a list of tuples containing the note name and its value using music conventional notation instead of numerical values for frequency and duration. It supports symbolic notes (e.g. ``A4``, ``D#3``, ``Gb5``) and durations (e.g. ``q``, ``h``). For an exhaustive list of accepted note symbols and values, have a look at the ``_NOTE_FREQUENCIES`` and ``_NOTE_VALUES`` private dictionaries in the source code. The value can be suffixed by modifiers: - a *divider* introduced by a ``/`` to obtain triplets for instance (e.g. ``q/3`` for a triplet of eight note) - a *multiplier* introduced by ``*`` (e.g. ``*1.5`` is a dotted note). Shortcuts exist for common modifiers: - ``3`` produces a triplet member note. For instance `e3` gives a triplet of eight notes, i.e. 3 eight notes in the duration of a single quarter. You must ensure that 3 triplets notes are defined in sequence to match the count, otherwise the result will not be the expected one. - ``.`` produces a dotted note, i.e. which duration is one and a half the base one. Double dots are not currently supported. Example:: >>> # A long time ago in a galaxy far, >>> # far away... >>> Sound.play_song(( >>> ('D4', 'e3'), # intro anacrouse >>> ('D4', 'e3'), >>> ('D4', 'e3'), >>> ('G4', 'h'), # meas 1 >>> ('D5', 'h'), >>> ('C5', 'e3'), # meas 2 >>> ('B4', 'e3'), >>> ('A4', 'e3'), >>> ('G5', 'h'), >>> ('D5', 'q'), >>> ('C5', 'e3'), # meas 3 >>> ('B4', 'e3'), >>> ('A4', 'e3'), >>> ('G5', 'h'), >>> ('D5', 'q'), >>> ('C5', 'e3'), # meas 4 >>> ('B4', 'e3'), >>> ('C5', 'e3'), >>> ('A4', 'h.'), >>> )) .. important:: Only 4/4 signature songs are supported with respect to note durations. :param iterable[tuple(string, string)] song: the song :param int tempo: the song tempo, given in quarters per minute :param float delay: delay between notes (in seconds) :return: the spawn subprocess from ``subprocess.Popen`` :raises ValueError: if invalid note in song or invalid play parameters """ if tempo <= 0: raise ValueError('invalid tempo (%s)' % tempo) if delay < 0: raise ValueError('invalid delay (%s)' % delay) delay_ms = int(delay * 1000) meas_duration_ms = 60000 / tempo * 4 # we only support 4/4 bars, hence "* 4" def beep_args(note, value): """ Builds the arguments string for producing a beep matching the requested note and value. Args: note (str): the note note and octave value (str): the note value expression Returns: str: the arguments to be passed to the beep command """ freq = self._NOTE_FREQUENCIES.get(note.upper(), self._NOTE_FREQUENCIES[note]) if '/' in value: base, factor = value.split('/') duration_ms = meas_duration_ms * self._NOTE_VALUES[base] / float(factor) elif '*' in value: base, factor = value.split('*') duration_ms = meas_duration_ms * self._NOTE_VALUES[base] * float(factor) elif value.endswith('.'): base = value[:-1] duration_ms = meas_duration_ms * self._NOTE_VALUES[base] * 1.5 elif value.endswith('3'): base = value[:-1] duration_ms = meas_duration_ms * self._NOTE_VALUES[base] * 2 / 3 else: duration_ms = meas_duration_ms * self._NOTE_VALUES[value] return '-f %d -l %d -D %d' % (freq, duration_ms, delay_ms) try: return self.beep(' -n '.join( [beep_args(note, value) for (note, value) in song] )) except KeyError as e: raise ValueError('invalid note (%s)' % e)
['def', 'play_song', '(', 'self', ',', 'song', ',', 'tempo', '=', '120', ',', 'delay', '=', '0.05', ')', ':', 'if', 'tempo', '<=', '0', ':', 'raise', 'ValueError', '(', "'invalid tempo (%s)'", '%', 'tempo', ')', 'if', 'delay', '<', '0', ':', 'raise', 'ValueError', '(', "'invalid delay (%s)'", '%', 'delay', ')', 'delay_ms', '=', 'int', '(', 'delay', '*', '1000', ')', 'meas_duration_ms', '=', '60000', '/', 'tempo', '*', '4', '# we only support 4/4 bars, hence "* 4"', 'def', 'beep_args', '(', 'note', ',', 'value', ')', ':', '""" Builds the arguments string for producing a beep matching\n the requested note and value.\n\n Args:\n note (str): the note note and octave\n value (str): the note value expression\n Returns:\n str: the arguments to be passed to the beep command\n """', 'freq', '=', 'self', '.', '_NOTE_FREQUENCIES', '.', 'get', '(', 'note', '.', 'upper', '(', ')', ',', 'self', '.', '_NOTE_FREQUENCIES', '[', 'note', ']', ')', 'if', "'/'", 'in', 'value', ':', 'base', ',', 'factor', '=', 'value', '.', 'split', '(', "'/'", ')', 'duration_ms', '=', 'meas_duration_ms', '*', 'self', '.', '_NOTE_VALUES', '[', 'base', ']', '/', 'float', '(', 'factor', ')', 'elif', "'*'", 'in', 'value', ':', 'base', ',', 'factor', '=', 'value', '.', 'split', '(', "'*'", ')', 'duration_ms', '=', 'meas_duration_ms', '*', 'self', '.', '_NOTE_VALUES', '[', 'base', ']', '*', 'float', '(', 'factor', ')', 'elif', 'value', '.', 'endswith', '(', "'.'", ')', ':', 'base', '=', 'value', '[', ':', '-', '1', ']', 'duration_ms', '=', 'meas_duration_ms', '*', 'self', '.', '_NOTE_VALUES', '[', 'base', ']', '*', '1.5', 'elif', 'value', '.', 'endswith', '(', "'3'", ')', ':', 'base', '=', 'value', '[', ':', '-', '1', ']', 'duration_ms', '=', 'meas_duration_ms', '*', 'self', '.', '_NOTE_VALUES', '[', 'base', ']', '*', '2', '/', '3', 'else', ':', 'duration_ms', '=', 'meas_duration_ms', '*', 'self', '.', '_NOTE_VALUES', '[', 'value', ']', 'return', "'-f %d -l %d -D %d'", '%', '(', 'freq', ',', 'duration_ms', ',', 'delay_ms', ')', 'try', ':', 'return', 'self', '.', 'beep', '(', "' -n '", '.', 'join', '(', '[', 'beep_args', '(', 'note', ',', 'value', ')', 'for', '(', 'note', ',', 'value', ')', 'in', 'song', ']', ')', ')', 'except', 'KeyError', 'as', 'e', ':', 'raise', 'ValueError', '(', "'invalid note (%s)'", '%', 'e', ')']
Plays a song provided as a list of tuples containing the note name and its value using music conventional notation instead of numerical values for frequency and duration. It supports symbolic notes (e.g. ``A4``, ``D#3``, ``Gb5``) and durations (e.g. ``q``, ``h``). For an exhaustive list of accepted note symbols and values, have a look at the ``_NOTE_FREQUENCIES`` and ``_NOTE_VALUES`` private dictionaries in the source code. The value can be suffixed by modifiers: - a *divider* introduced by a ``/`` to obtain triplets for instance (e.g. ``q/3`` for a triplet of eight note) - a *multiplier* introduced by ``*`` (e.g. ``*1.5`` is a dotted note). Shortcuts exist for common modifiers: - ``3`` produces a triplet member note. For instance `e3` gives a triplet of eight notes, i.e. 3 eight notes in the duration of a single quarter. You must ensure that 3 triplets notes are defined in sequence to match the count, otherwise the result will not be the expected one. - ``.`` produces a dotted note, i.e. which duration is one and a half the base one. Double dots are not currently supported. Example:: >>> # A long time ago in a galaxy far, >>> # far away... >>> Sound.play_song(( >>> ('D4', 'e3'), # intro anacrouse >>> ('D4', 'e3'), >>> ('D4', 'e3'), >>> ('G4', 'h'), # meas 1 >>> ('D5', 'h'), >>> ('C5', 'e3'), # meas 2 >>> ('B4', 'e3'), >>> ('A4', 'e3'), >>> ('G5', 'h'), >>> ('D5', 'q'), >>> ('C5', 'e3'), # meas 3 >>> ('B4', 'e3'), >>> ('A4', 'e3'), >>> ('G5', 'h'), >>> ('D5', 'q'), >>> ('C5', 'e3'), # meas 4 >>> ('B4', 'e3'), >>> ('C5', 'e3'), >>> ('A4', 'h.'), >>> )) .. important:: Only 4/4 signature songs are supported with respect to note durations. :param iterable[tuple(string, string)] song: the song :param int tempo: the song tempo, given in quarters per minute :param float delay: delay between notes (in seconds) :return: the spawn subprocess from ``subprocess.Popen`` :raises ValueError: if invalid note in song or invalid play parameters
['Plays', 'a', 'song', 'provided', 'as', 'a', 'list', 'of', 'tuples', 'containing', 'the', 'note', 'name', 'and', 'its', 'value', 'using', 'music', 'conventional', 'notation', 'instead', 'of', 'numerical', 'values', 'for', 'frequency', 'and', 'duration', '.']
train
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/sound.py#L380-L485
3,471
apache/incubator-mxnet
example/sparse/wide_deep/data.py
preprocess_uci_adult
def preprocess_uci_adult(data_name): """Some tricks of feature engineering are adapted from tensorflow's wide and deep tutorial. """ csv_columns = [ "age", "workclass", "fnlwgt", "education", "education_num", "marital_status", "occupation", "relationship", "race", "gender", "capital_gain", "capital_loss", "hours_per_week", "native_country", "income_bracket" ] vocabulary_dict = { "gender": [ "Female", "Male" ], "education": [ "Bachelors", "HS-grad", "11th", "Masters", "9th", "Some-college", "Assoc-acdm", "Assoc-voc", "7th-8th", "Doctorate", "Prof-school", "5th-6th", "10th", "1st-4th", "Preschool", "12th" ], "marital_status": [ "Married-civ-spouse", "Divorced", "Married-spouse-absent", "Never-married", "Separated", "Married-AF-spouse", "Widowed" ], "relationship": [ "Husband", "Not-in-family", "Wife", "Own-child", "Unmarried", "Other-relative" ], "workclass": [ "Self-emp-not-inc", "Private", "State-gov", "Federal-gov", "Local-gov", "?", "Self-emp-inc", "Without-pay", "Never-worked" ] } # wide columns crossed_columns = [ ["education", "occupation"], ["native_country", "occupation"], ["age_buckets", "education", "occupation"], ] age_boundaries = [18, 25, 30, 35, 40, 45, 50, 55, 60, 65] # deep columns indicator_columns = ['workclass', 'education', 'gender', 'relationship'] embedding_columns = ['native_country', 'occupation'] continuous_columns = ['age', 'education_num', 'capital_gain', 'capital_loss', 'hours_per_week'] # income_bracket column is the label labels = ["<", ">"] hash_bucket_size = 1000 csr_ncols = len(crossed_columns) * hash_bucket_size dns_ncols = len(continuous_columns) + len(embedding_columns) for col in indicator_columns: dns_ncols += len(vocabulary_dict[col]) label_list = [] csr_list = [] dns_list = [] with open(data_name) as f: for row in DictReader(f, fieldnames=csv_columns): label_list.append(labels.index(row['income_bracket'].strip()[0])) for i, cols in enumerate(crossed_columns): if cols[0] == "age_buckets": age_bucket = np.digitize(float(row["age"]), age_boundaries) s = '_'.join([row[col].strip() for col in cols[1:]]) s += '_' + str(age_bucket) csr_list.append((i * hash_bucket_size + hash(s) % hash_bucket_size, 1.0)) else: s = '_'.join([row[col].strip() for col in cols]) csr_list.append((i * hash_bucket_size + hash(s) % hash_bucket_size, 1.0)) dns_row = [0] * dns_ncols dns_dim = 0 for col in embedding_columns: dns_row[dns_dim] = hash(row[col].strip()) % hash_bucket_size dns_dim += 1 for col in indicator_columns: dns_row[dns_dim + vocabulary_dict[col].index(row[col].strip())] = 1.0 dns_dim += len(vocabulary_dict[col]) for col in continuous_columns: dns_row[dns_dim] = float(row[col].strip()) dns_dim += 1 dns_list.append(dns_row) data_list = [item[1] for item in csr_list] indices_list = [item[0] for item in csr_list] indptr_list = range(0, len(indices_list) + 1, len(crossed_columns)) # convert to ndarrays csr = mx.nd.sparse.csr_matrix((data_list, indices_list, indptr_list), shape=(len(label_list), hash_bucket_size * len(crossed_columns))) dns = np.array(dns_list) label = np.array(label_list) return csr, dns, label
python
def preprocess_uci_adult(data_name): """Some tricks of feature engineering are adapted from tensorflow's wide and deep tutorial. """ csv_columns = [ "age", "workclass", "fnlwgt", "education", "education_num", "marital_status", "occupation", "relationship", "race", "gender", "capital_gain", "capital_loss", "hours_per_week", "native_country", "income_bracket" ] vocabulary_dict = { "gender": [ "Female", "Male" ], "education": [ "Bachelors", "HS-grad", "11th", "Masters", "9th", "Some-college", "Assoc-acdm", "Assoc-voc", "7th-8th", "Doctorate", "Prof-school", "5th-6th", "10th", "1st-4th", "Preschool", "12th" ], "marital_status": [ "Married-civ-spouse", "Divorced", "Married-spouse-absent", "Never-married", "Separated", "Married-AF-spouse", "Widowed" ], "relationship": [ "Husband", "Not-in-family", "Wife", "Own-child", "Unmarried", "Other-relative" ], "workclass": [ "Self-emp-not-inc", "Private", "State-gov", "Federal-gov", "Local-gov", "?", "Self-emp-inc", "Without-pay", "Never-worked" ] } # wide columns crossed_columns = [ ["education", "occupation"], ["native_country", "occupation"], ["age_buckets", "education", "occupation"], ] age_boundaries = [18, 25, 30, 35, 40, 45, 50, 55, 60, 65] # deep columns indicator_columns = ['workclass', 'education', 'gender', 'relationship'] embedding_columns = ['native_country', 'occupation'] continuous_columns = ['age', 'education_num', 'capital_gain', 'capital_loss', 'hours_per_week'] # income_bracket column is the label labels = ["<", ">"] hash_bucket_size = 1000 csr_ncols = len(crossed_columns) * hash_bucket_size dns_ncols = len(continuous_columns) + len(embedding_columns) for col in indicator_columns: dns_ncols += len(vocabulary_dict[col]) label_list = [] csr_list = [] dns_list = [] with open(data_name) as f: for row in DictReader(f, fieldnames=csv_columns): label_list.append(labels.index(row['income_bracket'].strip()[0])) for i, cols in enumerate(crossed_columns): if cols[0] == "age_buckets": age_bucket = np.digitize(float(row["age"]), age_boundaries) s = '_'.join([row[col].strip() for col in cols[1:]]) s += '_' + str(age_bucket) csr_list.append((i * hash_bucket_size + hash(s) % hash_bucket_size, 1.0)) else: s = '_'.join([row[col].strip() for col in cols]) csr_list.append((i * hash_bucket_size + hash(s) % hash_bucket_size, 1.0)) dns_row = [0] * dns_ncols dns_dim = 0 for col in embedding_columns: dns_row[dns_dim] = hash(row[col].strip()) % hash_bucket_size dns_dim += 1 for col in indicator_columns: dns_row[dns_dim + vocabulary_dict[col].index(row[col].strip())] = 1.0 dns_dim += len(vocabulary_dict[col]) for col in continuous_columns: dns_row[dns_dim] = float(row[col].strip()) dns_dim += 1 dns_list.append(dns_row) data_list = [item[1] for item in csr_list] indices_list = [item[0] for item in csr_list] indptr_list = range(0, len(indices_list) + 1, len(crossed_columns)) # convert to ndarrays csr = mx.nd.sparse.csr_matrix((data_list, indices_list, indptr_list), shape=(len(label_list), hash_bucket_size * len(crossed_columns))) dns = np.array(dns_list) label = np.array(label_list) return csr, dns, label
['def', 'preprocess_uci_adult', '(', 'data_name', ')', ':', 'csv_columns', '=', '[', '"age"', ',', '"workclass"', ',', '"fnlwgt"', ',', '"education"', ',', '"education_num"', ',', '"marital_status"', ',', '"occupation"', ',', '"relationship"', ',', '"race"', ',', '"gender"', ',', '"capital_gain"', ',', '"capital_loss"', ',', '"hours_per_week"', ',', '"native_country"', ',', '"income_bracket"', ']', 'vocabulary_dict', '=', '{', '"gender"', ':', '[', '"Female"', ',', '"Male"', ']', ',', '"education"', ':', '[', '"Bachelors"', ',', '"HS-grad"', ',', '"11th"', ',', '"Masters"', ',', '"9th"', ',', '"Some-college"', ',', '"Assoc-acdm"', ',', '"Assoc-voc"', ',', '"7th-8th"', ',', '"Doctorate"', ',', '"Prof-school"', ',', '"5th-6th"', ',', '"10th"', ',', '"1st-4th"', ',', '"Preschool"', ',', '"12th"', ']', ',', '"marital_status"', ':', '[', '"Married-civ-spouse"', ',', '"Divorced"', ',', '"Married-spouse-absent"', ',', '"Never-married"', ',', '"Separated"', ',', '"Married-AF-spouse"', ',', '"Widowed"', ']', ',', '"relationship"', ':', '[', '"Husband"', ',', '"Not-in-family"', ',', '"Wife"', ',', '"Own-child"', ',', '"Unmarried"', ',', '"Other-relative"', ']', ',', '"workclass"', ':', '[', '"Self-emp-not-inc"', ',', '"Private"', ',', '"State-gov"', ',', '"Federal-gov"', ',', '"Local-gov"', ',', '"?"', ',', '"Self-emp-inc"', ',', '"Without-pay"', ',', '"Never-worked"', ']', '}', '# wide columns', 'crossed_columns', '=', '[', '[', '"education"', ',', '"occupation"', ']', ',', '[', '"native_country"', ',', '"occupation"', ']', ',', '[', '"age_buckets"', ',', '"education"', ',', '"occupation"', ']', ',', ']', 'age_boundaries', '=', '[', '18', ',', '25', ',', '30', ',', '35', ',', '40', ',', '45', ',', '50', ',', '55', ',', '60', ',', '65', ']', '# deep columns', 'indicator_columns', '=', '[', "'workclass'", ',', "'education'", ',', "'gender'", ',', "'relationship'", ']', 'embedding_columns', '=', '[', "'native_country'", ',', "'occupation'", ']', 'continuous_columns', '=', '[', "'age'", ',', "'education_num'", ',', "'capital_gain'", ',', "'capital_loss'", ',', "'hours_per_week'", ']', '# income_bracket column is the label', 'labels', '=', '[', '"<"', ',', '">"', ']', 'hash_bucket_size', '=', '1000', 'csr_ncols', '=', 'len', '(', 'crossed_columns', ')', '*', 'hash_bucket_size', 'dns_ncols', '=', 'len', '(', 'continuous_columns', ')', '+', 'len', '(', 'embedding_columns', ')', 'for', 'col', 'in', 'indicator_columns', ':', 'dns_ncols', '+=', 'len', '(', 'vocabulary_dict', '[', 'col', ']', ')', 'label_list', '=', '[', ']', 'csr_list', '=', '[', ']', 'dns_list', '=', '[', ']', 'with', 'open', '(', 'data_name', ')', 'as', 'f', ':', 'for', 'row', 'in', 'DictReader', '(', 'f', ',', 'fieldnames', '=', 'csv_columns', ')', ':', 'label_list', '.', 'append', '(', 'labels', '.', 'index', '(', 'row', '[', "'income_bracket'", ']', '.', 'strip', '(', ')', '[', '0', ']', ')', ')', 'for', 'i', ',', 'cols', 'in', 'enumerate', '(', 'crossed_columns', ')', ':', 'if', 'cols', '[', '0', ']', '==', '"age_buckets"', ':', 'age_bucket', '=', 'np', '.', 'digitize', '(', 'float', '(', 'row', '[', '"age"', ']', ')', ',', 'age_boundaries', ')', 's', '=', "'_'", '.', 'join', '(', '[', 'row', '[', 'col', ']', '.', 'strip', '(', ')', 'for', 'col', 'in', 'cols', '[', '1', ':', ']', ']', ')', 's', '+=', "'_'", '+', 'str', '(', 'age_bucket', ')', 'csr_list', '.', 'append', '(', '(', 'i', '*', 'hash_bucket_size', '+', 'hash', '(', 's', ')', '%', 'hash_bucket_size', ',', '1.0', ')', ')', 'else', ':', 's', '=', "'_'", '.', 'join', '(', '[', 'row', '[', 'col', ']', '.', 'strip', '(', ')', 'for', 'col', 'in', 'cols', ']', ')', 'csr_list', '.', 'append', '(', '(', 'i', '*', 'hash_bucket_size', '+', 'hash', '(', 's', ')', '%', 'hash_bucket_size', ',', '1.0', ')', ')', 'dns_row', '=', '[', '0', ']', '*', 'dns_ncols', 'dns_dim', '=', '0', 'for', 'col', 'in', 'embedding_columns', ':', 'dns_row', '[', 'dns_dim', ']', '=', 'hash', '(', 'row', '[', 'col', ']', '.', 'strip', '(', ')', ')', '%', 'hash_bucket_size', 'dns_dim', '+=', '1', 'for', 'col', 'in', 'indicator_columns', ':', 'dns_row', '[', 'dns_dim', '+', 'vocabulary_dict', '[', 'col', ']', '.', 'index', '(', 'row', '[', 'col', ']', '.', 'strip', '(', ')', ')', ']', '=', '1.0', 'dns_dim', '+=', 'len', '(', 'vocabulary_dict', '[', 'col', ']', ')', 'for', 'col', 'in', 'continuous_columns', ':', 'dns_row', '[', 'dns_dim', ']', '=', 'float', '(', 'row', '[', 'col', ']', '.', 'strip', '(', ')', ')', 'dns_dim', '+=', '1', 'dns_list', '.', 'append', '(', 'dns_row', ')', 'data_list', '=', '[', 'item', '[', '1', ']', 'for', 'item', 'in', 'csr_list', ']', 'indices_list', '=', '[', 'item', '[', '0', ']', 'for', 'item', 'in', 'csr_list', ']', 'indptr_list', '=', 'range', '(', '0', ',', 'len', '(', 'indices_list', ')', '+', '1', ',', 'len', '(', 'crossed_columns', ')', ')', '# convert to ndarrays', 'csr', '=', 'mx', '.', 'nd', '.', 'sparse', '.', 'csr_matrix', '(', '(', 'data_list', ',', 'indices_list', ',', 'indptr_list', ')', ',', 'shape', '=', '(', 'len', '(', 'label_list', ')', ',', 'hash_bucket_size', '*', 'len', '(', 'crossed_columns', ')', ')', ')', 'dns', '=', 'np', '.', 'array', '(', 'dns_list', ')', 'label', '=', 'np', '.', 'array', '(', 'label_list', ')', 'return', 'csr', ',', 'dns', ',', 'label']
Some tricks of feature engineering are adapted from tensorflow's wide and deep tutorial.
['Some', 'tricks', 'of', 'feature', 'engineering', 'are', 'adapted', 'from', 'tensorflow', 's', 'wide', 'and', 'deep', 'tutorial', '.']
train
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/sparse/wide_deep/data.py#L40-L139
3,472
minhhoit/yacms
yacms/core/managers.py
PublishedManager.published
def published(self, for_user=None): """ For non-staff users, return items with a published status and whose publish and expiry dates fall before and after the current date when specified. """ from yacms.core.models import CONTENT_STATUS_PUBLISHED if for_user is not None and for_user.is_staff: return self.all() return self.filter( Q(publish_date__lte=now()) | Q(publish_date__isnull=True), Q(expiry_date__gte=now()) | Q(expiry_date__isnull=True), Q(status=CONTENT_STATUS_PUBLISHED))
python
def published(self, for_user=None): """ For non-staff users, return items with a published status and whose publish and expiry dates fall before and after the current date when specified. """ from yacms.core.models import CONTENT_STATUS_PUBLISHED if for_user is not None and for_user.is_staff: return self.all() return self.filter( Q(publish_date__lte=now()) | Q(publish_date__isnull=True), Q(expiry_date__gte=now()) | Q(expiry_date__isnull=True), Q(status=CONTENT_STATUS_PUBLISHED))
['def', 'published', '(', 'self', ',', 'for_user', '=', 'None', ')', ':', 'from', 'yacms', '.', 'core', '.', 'models', 'import', 'CONTENT_STATUS_PUBLISHED', 'if', 'for_user', 'is', 'not', 'None', 'and', 'for_user', '.', 'is_staff', ':', 'return', 'self', '.', 'all', '(', ')', 'return', 'self', '.', 'filter', '(', 'Q', '(', 'publish_date__lte', '=', 'now', '(', ')', ')', '|', 'Q', '(', 'publish_date__isnull', '=', 'True', ')', ',', 'Q', '(', 'expiry_date__gte', '=', 'now', '(', ')', ')', '|', 'Q', '(', 'expiry_date__isnull', '=', 'True', ')', ',', 'Q', '(', 'status', '=', 'CONTENT_STATUS_PUBLISHED', ')', ')']
For non-staff users, return items with a published status and whose publish and expiry dates fall before and after the current date when specified.
['For', 'non', '-', 'staff', 'users', 'return', 'items', 'with', 'a', 'published', 'status', 'and', 'whose', 'publish', 'and', 'expiry', 'dates', 'fall', 'before', 'and', 'after', 'the', 'current', 'date', 'when', 'specified', '.']
train
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/managers.py#L58-L70
3,473
phaethon/kamene
kamene/contrib/gsm_um.py
activateAaPdpContextAccept
def activateAaPdpContextAccept(ProtocolConfigurationOptions_presence=0, GprsTimer_presence=0): """ACTIVATE AA PDP CONTEXT ACCEPT Section 9.5.11""" a = TpPd(pd=0x8) b = MessageType(mesType=0x51) # 01010001 c = LlcServiceAccessPointIdentifier() d = QualityOfService() e = MobileId() f = PacketDataProtocolAddress() g = RadioPriorityAndSpareHalfOctets() packet = a / b / c / d / e / f / g if ProtocolConfigurationOptions_presence is 1: i = ProtocolConfigurationOptions(ieiPCO=0x27) packet = packet / i if GprsTimer_presence is 1: j = GprsTimer(ieiGT=0x29) packet = packet / j return packet
python
def activateAaPdpContextAccept(ProtocolConfigurationOptions_presence=0, GprsTimer_presence=0): """ACTIVATE AA PDP CONTEXT ACCEPT Section 9.5.11""" a = TpPd(pd=0x8) b = MessageType(mesType=0x51) # 01010001 c = LlcServiceAccessPointIdentifier() d = QualityOfService() e = MobileId() f = PacketDataProtocolAddress() g = RadioPriorityAndSpareHalfOctets() packet = a / b / c / d / e / f / g if ProtocolConfigurationOptions_presence is 1: i = ProtocolConfigurationOptions(ieiPCO=0x27) packet = packet / i if GprsTimer_presence is 1: j = GprsTimer(ieiGT=0x29) packet = packet / j return packet
['def', 'activateAaPdpContextAccept', '(', 'ProtocolConfigurationOptions_presence', '=', '0', ',', 'GprsTimer_presence', '=', '0', ')', ':', 'a', '=', 'TpPd', '(', 'pd', '=', '0x8', ')', 'b', '=', 'MessageType', '(', 'mesType', '=', '0x51', ')', '# 01010001', 'c', '=', 'LlcServiceAccessPointIdentifier', '(', ')', 'd', '=', 'QualityOfService', '(', ')', 'e', '=', 'MobileId', '(', ')', 'f', '=', 'PacketDataProtocolAddress', '(', ')', 'g', '=', 'RadioPriorityAndSpareHalfOctets', '(', ')', 'packet', '=', 'a', '/', 'b', '/', 'c', '/', 'd', '/', 'e', '/', 'f', '/', 'g', 'if', 'ProtocolConfigurationOptions_presence', 'is', '1', ':', 'i', '=', 'ProtocolConfigurationOptions', '(', 'ieiPCO', '=', '0x27', ')', 'packet', '=', 'packet', '/', 'i', 'if', 'GprsTimer_presence', 'is', '1', ':', 'j', '=', 'GprsTimer', '(', 'ieiGT', '=', '0x29', ')', 'packet', '=', 'packet', '/', 'j', 'return', 'packet']
ACTIVATE AA PDP CONTEXT ACCEPT Section 9.5.11
['ACTIVATE', 'AA', 'PDP', 'CONTEXT', 'ACCEPT', 'Section', '9', '.', '5', '.', '11']
train
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/contrib/gsm_um.py#L2757-L2774
3,474
klavinslab/coral
coral/database/_yeast.py
get_yeast_sequence
def get_yeast_sequence(chromosome, start, end, reverse_complement=False): '''Acquire a sequence from SGD http://www.yeastgenome.org :param chromosome: Yeast chromosome. :type chromosome: int :param start: A biostart. :type start: int :param end: A bioend. :type end: int :param reverse_complement: Get the reverse complement. :type revervse_complement: bool :returns: A DNA sequence. :rtype: coral.DNA ''' import requests if start != end: if reverse_complement: rev_option = '-REV' else: rev_option = '' param_url = '&chr=' + str(chromosome) + '&beg=' + str(start) + \ '&end=' + str(end) + '&rev=' + rev_option url = 'http://www.yeastgenome.org/cgi-bin/getSeq?map=a2map' + \ param_url res = requests.get(url) # ok... sadely, I contacted SGD and they haven;t implemented this so # I have to parse their yeastgenome page, but # it is easy between the raw sequence is between <pre> tags! # warning that's for the first < so we need +5! begin_index = res.text.index('<pre>') end_index = res.text.index('</pre>') sequence = res.text[begin_index + 5:end_index] sequence = sequence.replace('\n', '').replace('\r', '') else: sequence = '' return coral.DNA(sequence)
python
def get_yeast_sequence(chromosome, start, end, reverse_complement=False): '''Acquire a sequence from SGD http://www.yeastgenome.org :param chromosome: Yeast chromosome. :type chromosome: int :param start: A biostart. :type start: int :param end: A bioend. :type end: int :param reverse_complement: Get the reverse complement. :type revervse_complement: bool :returns: A DNA sequence. :rtype: coral.DNA ''' import requests if start != end: if reverse_complement: rev_option = '-REV' else: rev_option = '' param_url = '&chr=' + str(chromosome) + '&beg=' + str(start) + \ '&end=' + str(end) + '&rev=' + rev_option url = 'http://www.yeastgenome.org/cgi-bin/getSeq?map=a2map' + \ param_url res = requests.get(url) # ok... sadely, I contacted SGD and they haven;t implemented this so # I have to parse their yeastgenome page, but # it is easy between the raw sequence is between <pre> tags! # warning that's for the first < so we need +5! begin_index = res.text.index('<pre>') end_index = res.text.index('</pre>') sequence = res.text[begin_index + 5:end_index] sequence = sequence.replace('\n', '').replace('\r', '') else: sequence = '' return coral.DNA(sequence)
['def', 'get_yeast_sequence', '(', 'chromosome', ',', 'start', ',', 'end', ',', 'reverse_complement', '=', 'False', ')', ':', 'import', 'requests', 'if', 'start', '!=', 'end', ':', 'if', 'reverse_complement', ':', 'rev_option', '=', "'-REV'", 'else', ':', 'rev_option', '=', "''", 'param_url', '=', "'&chr='", '+', 'str', '(', 'chromosome', ')', '+', "'&beg='", '+', 'str', '(', 'start', ')', '+', "'&end='", '+', 'str', '(', 'end', ')', '+', "'&rev='", '+', 'rev_option', 'url', '=', "'http://www.yeastgenome.org/cgi-bin/getSeq?map=a2map'", '+', 'param_url', 'res', '=', 'requests', '.', 'get', '(', 'url', ')', '# ok... sadely, I contacted SGD and they haven;t implemented this so', '# I have to parse their yeastgenome page, but', '# it is easy between the raw sequence is between <pre> tags!', "# warning that's for the first < so we need +5!", 'begin_index', '=', 'res', '.', 'text', '.', 'index', '(', "'<pre>'", ')', 'end_index', '=', 'res', '.', 'text', '.', 'index', '(', "'</pre>'", ')', 'sequence', '=', 'res', '.', 'text', '[', 'begin_index', '+', '5', ':', 'end_index', ']', 'sequence', '=', 'sequence', '.', 'replace', '(', "'\\n'", ',', "''", ')', '.', 'replace', '(', "'\\r'", ',', "''", ')', 'else', ':', 'sequence', '=', "''", 'return', 'coral', '.', 'DNA', '(', 'sequence', ')']
Acquire a sequence from SGD http://www.yeastgenome.org :param chromosome: Yeast chromosome. :type chromosome: int :param start: A biostart. :type start: int :param end: A bioend. :type end: int :param reverse_complement: Get the reverse complement. :type revervse_complement: bool :returns: A DNA sequence. :rtype: coral.DNA
['Acquire', 'a', 'sequence', 'from', 'SGD', 'http', ':', '//', 'www', '.', 'yeastgenome', '.', 'org', ':', 'param', 'chromosome', ':', 'Yeast', 'chromosome', '.', ':', 'type', 'chromosome', ':', 'int', ':', 'param', 'start', ':', 'A', 'biostart', '.', ':', 'type', 'start', ':', 'int', ':', 'param', 'end', ':', 'A', 'bioend', '.', ':', 'type', 'end', ':', 'int', ':', 'param', 'reverse_complement', ':', 'Get', 'the', 'reverse', 'complement', '.', ':', 'type', 'revervse_complement', ':', 'bool', ':', 'returns', ':', 'A', 'DNA', 'sequence', '.', ':', 'rtype', ':', 'coral', '.', 'DNA']
train
https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/database/_yeast.py#L86-L126
3,475
openstack/networking-cisco
networking_cisco/plugins/cisco/cfg_agent/device_drivers/iosxe/iosxe_routing_driver.py
IosXeRoutingDriver._get_vrfs
def _get_vrfs(self): """Get the current VRFs configured in the device. :return: A list of vrf names as string """ vrfs = [] ios_cfg = self._get_running_config() parse = HTParser(ios_cfg) vrfs_raw = parse.find_lines("^vrf definition") for line in vrfs_raw: # raw format ['ip vrf <vrf-name>',....] vrf_name = line.strip().split(' ')[2] vrfs.append(vrf_name) LOG.info("VRFs:%s", vrfs) return vrfs
python
def _get_vrfs(self): """Get the current VRFs configured in the device. :return: A list of vrf names as string """ vrfs = [] ios_cfg = self._get_running_config() parse = HTParser(ios_cfg) vrfs_raw = parse.find_lines("^vrf definition") for line in vrfs_raw: # raw format ['ip vrf <vrf-name>',....] vrf_name = line.strip().split(' ')[2] vrfs.append(vrf_name) LOG.info("VRFs:%s", vrfs) return vrfs
['def', '_get_vrfs', '(', 'self', ')', ':', 'vrfs', '=', '[', ']', 'ios_cfg', '=', 'self', '.', '_get_running_config', '(', ')', 'parse', '=', 'HTParser', '(', 'ios_cfg', ')', 'vrfs_raw', '=', 'parse', '.', 'find_lines', '(', '"^vrf definition"', ')', 'for', 'line', 'in', 'vrfs_raw', ':', "# raw format ['ip vrf <vrf-name>',....]", 'vrf_name', '=', 'line', '.', 'strip', '(', ')', '.', 'split', '(', "' '", ')', '[', '2', ']', 'vrfs', '.', 'append', '(', 'vrf_name', ')', 'LOG', '.', 'info', '(', '"VRFs:%s"', ',', 'vrfs', ')', 'return', 'vrfs']
Get the current VRFs configured in the device. :return: A list of vrf names as string
['Get', 'the', 'current', 'VRFs', 'configured', 'in', 'the', 'device', '.']
train
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/cfg_agent/device_drivers/iosxe/iosxe_routing_driver.py#L339-L353
3,476
ehansis/ozelot
ozelot/etl/tasks.py
ORMTask.close_session
def close_session(self, commit=True): """Commit and close the DB session associated with this task (no error is raised if None is open) Args: commit (bool): commit session before closing (default=True) """ if self._session is not None: if commit: self._session.commit() self._session.close() self._session = None
python
def close_session(self, commit=True): """Commit and close the DB session associated with this task (no error is raised if None is open) Args: commit (bool): commit session before closing (default=True) """ if self._session is not None: if commit: self._session.commit() self._session.close() self._session = None
['def', 'close_session', '(', 'self', ',', 'commit', '=', 'True', ')', ':', 'if', 'self', '.', '_session', 'is', 'not', 'None', ':', 'if', 'commit', ':', 'self', '.', '_session', '.', 'commit', '(', ')', 'self', '.', '_session', '.', 'close', '(', ')', 'self', '.', '_session', '=', 'None']
Commit and close the DB session associated with this task (no error is raised if None is open) Args: commit (bool): commit session before closing (default=True)
['Commit', 'and', 'close', 'the', 'DB', 'session', 'associated', 'with', 'this', 'task', '(', 'no', 'error', 'is', 'raised', 'if', 'None', 'is', 'open', ')']
train
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/ozelot/etl/tasks.py#L142-L152
3,477
polysquare/polysquare-setuptools-lint
polysquare_setuptools_lint/__init__.py
_run_prospector
def _run_prospector(filename, stamp_file_name, disabled_linters, show_lint_files): """Run prospector.""" linter_tools = [ "pep257", "pep8", "pyflakes" ] if can_run_pylint(): linter_tools.append("pylint") # Run prospector on tests. There are some errors we don't care about: # - invalid-name: This is often triggered because test method names # can be quite long. Descriptive test method names are # good, so disable this warning. # - super-on-old-class: unittest.TestCase is a new style class, but # pylint detects an old style class. # - too-many-public-methods: TestCase subclasses by definition have # lots of methods. test_ignore_codes = [ "invalid-name", "super-on-old-class", "too-many-public-methods" ] kwargs = dict() if _file_is_test(filename): kwargs["ignore_codes"] = test_ignore_codes else: if can_run_frosted(): linter_tools += ["frosted"] return _stamped_deps(stamp_file_name, _run_prospector_on, [filename], linter_tools, disabled_linters, show_lint_files, **kwargs)
python
def _run_prospector(filename, stamp_file_name, disabled_linters, show_lint_files): """Run prospector.""" linter_tools = [ "pep257", "pep8", "pyflakes" ] if can_run_pylint(): linter_tools.append("pylint") # Run prospector on tests. There are some errors we don't care about: # - invalid-name: This is often triggered because test method names # can be quite long. Descriptive test method names are # good, so disable this warning. # - super-on-old-class: unittest.TestCase is a new style class, but # pylint detects an old style class. # - too-many-public-methods: TestCase subclasses by definition have # lots of methods. test_ignore_codes = [ "invalid-name", "super-on-old-class", "too-many-public-methods" ] kwargs = dict() if _file_is_test(filename): kwargs["ignore_codes"] = test_ignore_codes else: if can_run_frosted(): linter_tools += ["frosted"] return _stamped_deps(stamp_file_name, _run_prospector_on, [filename], linter_tools, disabled_linters, show_lint_files, **kwargs)
['def', '_run_prospector', '(', 'filename', ',', 'stamp_file_name', ',', 'disabled_linters', ',', 'show_lint_files', ')', ':', 'linter_tools', '=', '[', '"pep257"', ',', '"pep8"', ',', '"pyflakes"', ']', 'if', 'can_run_pylint', '(', ')', ':', 'linter_tools', '.', 'append', '(', '"pylint"', ')', "# Run prospector on tests. There are some errors we don't care about:", '# - invalid-name: This is often triggered because test method names', '# can be quite long. Descriptive test method names are', '# good, so disable this warning.', '# - super-on-old-class: unittest.TestCase is a new style class, but', '# pylint detects an old style class.', '# - too-many-public-methods: TestCase subclasses by definition have', '# lots of methods.', 'test_ignore_codes', '=', '[', '"invalid-name"', ',', '"super-on-old-class"', ',', '"too-many-public-methods"', ']', 'kwargs', '=', 'dict', '(', ')', 'if', '_file_is_test', '(', 'filename', ')', ':', 'kwargs', '[', '"ignore_codes"', ']', '=', 'test_ignore_codes', 'else', ':', 'if', 'can_run_frosted', '(', ')', ':', 'linter_tools', '+=', '[', '"frosted"', ']', 'return', '_stamped_deps', '(', 'stamp_file_name', ',', '_run_prospector_on', ',', '[', 'filename', ']', ',', 'linter_tools', ',', 'disabled_linters', ',', 'show_lint_files', ',', '*', '*', 'kwargs', ')']
Run prospector.
['Run', 'prospector', '.']
train
https://github.com/polysquare/polysquare-setuptools-lint/blob/5df5a6401c7ad6a90b42230eeb99c82cc56952b6/polysquare_setuptools_lint/__init__.py#L244-L286
3,478
Clinical-Genomics/trailblazer
trailblazer/cli/core.py
start
def start(context, mip_config, email, priority, dryrun, command, start_with, family): """Start a new analysis.""" mip_cli = MipCli(context.obj['script']) mip_config = mip_config or context.obj['mip_config'] email = email or environ_email() kwargs = dict(config=mip_config, family=family, priority=priority, email=email, dryrun=dryrun, start_with=start_with) if command: mip_command = mip_cli.build_command(**kwargs) click.echo(' '.join(mip_command)) else: try: mip_cli(**kwargs) if not dryrun: context.obj['store'].add_pending(family, email=email) except MipStartError as error: click.echo(click.style(error.message, fg='red'))
python
def start(context, mip_config, email, priority, dryrun, command, start_with, family): """Start a new analysis.""" mip_cli = MipCli(context.obj['script']) mip_config = mip_config or context.obj['mip_config'] email = email or environ_email() kwargs = dict(config=mip_config, family=family, priority=priority, email=email, dryrun=dryrun, start_with=start_with) if command: mip_command = mip_cli.build_command(**kwargs) click.echo(' '.join(mip_command)) else: try: mip_cli(**kwargs) if not dryrun: context.obj['store'].add_pending(family, email=email) except MipStartError as error: click.echo(click.style(error.message, fg='red'))
['def', 'start', '(', 'context', ',', 'mip_config', ',', 'email', ',', 'priority', ',', 'dryrun', ',', 'command', ',', 'start_with', ',', 'family', ')', ':', 'mip_cli', '=', 'MipCli', '(', 'context', '.', 'obj', '[', "'script'", ']', ')', 'mip_config', '=', 'mip_config', 'or', 'context', '.', 'obj', '[', "'mip_config'", ']', 'email', '=', 'email', 'or', 'environ_email', '(', ')', 'kwargs', '=', 'dict', '(', 'config', '=', 'mip_config', ',', 'family', '=', 'family', ',', 'priority', '=', 'priority', ',', 'email', '=', 'email', ',', 'dryrun', '=', 'dryrun', ',', 'start_with', '=', 'start_with', ')', 'if', 'command', ':', 'mip_command', '=', 'mip_cli', '.', 'build_command', '(', '*', '*', 'kwargs', ')', 'click', '.', 'echo', '(', "' '", '.', 'join', '(', 'mip_command', ')', ')', 'else', ':', 'try', ':', 'mip_cli', '(', '*', '*', 'kwargs', ')', 'if', 'not', 'dryrun', ':', 'context', '.', 'obj', '[', "'store'", ']', '.', 'add_pending', '(', 'family', ',', 'email', '=', 'email', ')', 'except', 'MipStartError', 'as', 'error', ':', 'click', '.', 'echo', '(', 'click', '.', 'style', '(', 'error', '.', 'message', ',', 'fg', '=', "'red'", ')', ')']
Start a new analysis.
['Start', 'a', 'new', 'analysis', '.']
train
https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/cli/core.py#L80-L95
3,479
AguaClara/aguaclara
aguaclara/design/lfom.py
LFOM.n_orifices_per_row_max
def n_orifices_per_row_max(self): """A bound on the number of orifices allowed in each row. The distance between consecutive orifices must be enough to retain structural integrity of the pipe. """ c = math.pi * pipe.ID_SDR(self.nom_diam_pipe, self.sdr) b = self.orifice_diameter + self.s_orifice return math.floor(c/b)
python
def n_orifices_per_row_max(self): """A bound on the number of orifices allowed in each row. The distance between consecutive orifices must be enough to retain structural integrity of the pipe. """ c = math.pi * pipe.ID_SDR(self.nom_diam_pipe, self.sdr) b = self.orifice_diameter + self.s_orifice return math.floor(c/b)
['def', 'n_orifices_per_row_max', '(', 'self', ')', ':', 'c', '=', 'math', '.', 'pi', '*', 'pipe', '.', 'ID_SDR', '(', 'self', '.', 'nom_diam_pipe', ',', 'self', '.', 'sdr', ')', 'b', '=', 'self', '.', 'orifice_diameter', '+', 'self', '.', 's_orifice', 'return', 'math', '.', 'floor', '(', 'c', '/', 'b', ')']
A bound on the number of orifices allowed in each row. The distance between consecutive orifices must be enough to retain structural integrity of the pipe.
['A', 'bound', 'on', 'the', 'number', 'of', 'orifices', 'allowed', 'in', 'each', 'row', '.', 'The', 'distance', 'between', 'consecutive', 'orifices', 'must', 'be', 'enough', 'to', 'retain', 'structural', 'integrity', 'of', 'the', 'pipe', '.']
train
https://github.com/AguaClara/aguaclara/blob/8dd4e734768b166a7fc2b60388a24df2f93783fc/aguaclara/design/lfom.py#L108-L116
3,480
wtsi-hgi/consul-lock
consullock/cli.py
parse_cli_configuration
def parse_cli_configuration(arguments: List[str]) -> CliConfiguration: """ Parses the configuration passed in via command line arguments. :param arguments: CLI arguments :return: the configuration """ try: parsed_arguments = {x.replace("_", "-"): y for x, y in vars(_argument_parser.parse_args(arguments)).items()} except SystemExit as e: if e.code == SUCCESS_EXIT_CODE: raise e raise InvalidCliArgumentError() from e parsed_action = parsed_arguments[ACTION_CLI_PARAMETER_ACCESS] if parsed_action is None: _argument_parser.print_help() exit(INVALID_CLI_ARGUMENT_EXIT_CODE) action = Action(parsed_action) session_ttl = parsed_arguments.get(SESSION_TTL_LONG_PARAMETER, None) if session_ttl == NO_EXPIRY_SESSION_TTL_CLI_PARAMETER_VALUE: session_ttl = None shared_parameters = dict( key=parsed_arguments[KEY_PARAMETER], log_verbosity=_get_verbosity(parsed_arguments), session_ttl=session_ttl) if action == Action.UNLOCK: return CliUnlockConfiguration( **shared_parameters, regex_key_enabled=parsed_arguments.get(REGEX_KEY_ENABLED_SHORT_PARAMETER, DEFAULT_REGEX_KEY_ENABLED)) else: parameters = dict( **shared_parameters, non_blocking=parsed_arguments.get(NON_BLOCKING_LONG_PARAMETER, DEFAULT_NON_BLOCKING), timeout=parsed_arguments.get(TIMEOUT_LONG_PARAMETER, DEFAULT_TIMEOUT), metadata=parsed_arguments.get(METADATA_LONG_PARAMETER, DEFAULT_METADATA), on_before_locked_executables=list(itertools.chain(*parsed_arguments.get( ON_BEFORE_LOCK_LONG_PARAMETER, []))), on_lock_already_locked_executables=list(itertools.chain(*parsed_arguments.get( ON_LOCK_ALREADY_LOCKED_LONG_PARAMETER, []))), lock_poll_interval=parsed_arguments.get( LOCK_POLL_INTERVAL_SHORT_PARAMETER, DEFAULT_LOCK_POLL_INTERVAL_GENERATOR(1))) if action == Action.LOCK: return CliLockConfiguration(**parameters) else: return CliLockAndExecuteConfiguration( **parameters, executable=parsed_arguments[EXECUTABLE_PARAMETER])
python
def parse_cli_configuration(arguments: List[str]) -> CliConfiguration: """ Parses the configuration passed in via command line arguments. :param arguments: CLI arguments :return: the configuration """ try: parsed_arguments = {x.replace("_", "-"): y for x, y in vars(_argument_parser.parse_args(arguments)).items()} except SystemExit as e: if e.code == SUCCESS_EXIT_CODE: raise e raise InvalidCliArgumentError() from e parsed_action = parsed_arguments[ACTION_CLI_PARAMETER_ACCESS] if parsed_action is None: _argument_parser.print_help() exit(INVALID_CLI_ARGUMENT_EXIT_CODE) action = Action(parsed_action) session_ttl = parsed_arguments.get(SESSION_TTL_LONG_PARAMETER, None) if session_ttl == NO_EXPIRY_SESSION_TTL_CLI_PARAMETER_VALUE: session_ttl = None shared_parameters = dict( key=parsed_arguments[KEY_PARAMETER], log_verbosity=_get_verbosity(parsed_arguments), session_ttl=session_ttl) if action == Action.UNLOCK: return CliUnlockConfiguration( **shared_parameters, regex_key_enabled=parsed_arguments.get(REGEX_KEY_ENABLED_SHORT_PARAMETER, DEFAULT_REGEX_KEY_ENABLED)) else: parameters = dict( **shared_parameters, non_blocking=parsed_arguments.get(NON_BLOCKING_LONG_PARAMETER, DEFAULT_NON_BLOCKING), timeout=parsed_arguments.get(TIMEOUT_LONG_PARAMETER, DEFAULT_TIMEOUT), metadata=parsed_arguments.get(METADATA_LONG_PARAMETER, DEFAULT_METADATA), on_before_locked_executables=list(itertools.chain(*parsed_arguments.get( ON_BEFORE_LOCK_LONG_PARAMETER, []))), on_lock_already_locked_executables=list(itertools.chain(*parsed_arguments.get( ON_LOCK_ALREADY_LOCKED_LONG_PARAMETER, []))), lock_poll_interval=parsed_arguments.get( LOCK_POLL_INTERVAL_SHORT_PARAMETER, DEFAULT_LOCK_POLL_INTERVAL_GENERATOR(1))) if action == Action.LOCK: return CliLockConfiguration(**parameters) else: return CliLockAndExecuteConfiguration( **parameters, executable=parsed_arguments[EXECUTABLE_PARAMETER])
['def', 'parse_cli_configuration', '(', 'arguments', ':', 'List', '[', 'str', ']', ')', '->', 'CliConfiguration', ':', 'try', ':', 'parsed_arguments', '=', '{', 'x', '.', 'replace', '(', '"_"', ',', '"-"', ')', ':', 'y', 'for', 'x', ',', 'y', 'in', 'vars', '(', '_argument_parser', '.', 'parse_args', '(', 'arguments', ')', ')', '.', 'items', '(', ')', '}', 'except', 'SystemExit', 'as', 'e', ':', 'if', 'e', '.', 'code', '==', 'SUCCESS_EXIT_CODE', ':', 'raise', 'e', 'raise', 'InvalidCliArgumentError', '(', ')', 'from', 'e', 'parsed_action', '=', 'parsed_arguments', '[', 'ACTION_CLI_PARAMETER_ACCESS', ']', 'if', 'parsed_action', 'is', 'None', ':', '_argument_parser', '.', 'print_help', '(', ')', 'exit', '(', 'INVALID_CLI_ARGUMENT_EXIT_CODE', ')', 'action', '=', 'Action', '(', 'parsed_action', ')', 'session_ttl', '=', 'parsed_arguments', '.', 'get', '(', 'SESSION_TTL_LONG_PARAMETER', ',', 'None', ')', 'if', 'session_ttl', '==', 'NO_EXPIRY_SESSION_TTL_CLI_PARAMETER_VALUE', ':', 'session_ttl', '=', 'None', 'shared_parameters', '=', 'dict', '(', 'key', '=', 'parsed_arguments', '[', 'KEY_PARAMETER', ']', ',', 'log_verbosity', '=', '_get_verbosity', '(', 'parsed_arguments', ')', ',', 'session_ttl', '=', 'session_ttl', ')', 'if', 'action', '==', 'Action', '.', 'UNLOCK', ':', 'return', 'CliUnlockConfiguration', '(', '*', '*', 'shared_parameters', ',', 'regex_key_enabled', '=', 'parsed_arguments', '.', 'get', '(', 'REGEX_KEY_ENABLED_SHORT_PARAMETER', ',', 'DEFAULT_REGEX_KEY_ENABLED', ')', ')', 'else', ':', 'parameters', '=', 'dict', '(', '*', '*', 'shared_parameters', ',', 'non_blocking', '=', 'parsed_arguments', '.', 'get', '(', 'NON_BLOCKING_LONG_PARAMETER', ',', 'DEFAULT_NON_BLOCKING', ')', ',', 'timeout', '=', 'parsed_arguments', '.', 'get', '(', 'TIMEOUT_LONG_PARAMETER', ',', 'DEFAULT_TIMEOUT', ')', ',', 'metadata', '=', 'parsed_arguments', '.', 'get', '(', 'METADATA_LONG_PARAMETER', ',', 'DEFAULT_METADATA', ')', ',', 'on_before_locked_executables', '=', 'list', '(', 'itertools', '.', 'chain', '(', '*', 'parsed_arguments', '.', 'get', '(', 'ON_BEFORE_LOCK_LONG_PARAMETER', ',', '[', ']', ')', ')', ')', ',', 'on_lock_already_locked_executables', '=', 'list', '(', 'itertools', '.', 'chain', '(', '*', 'parsed_arguments', '.', 'get', '(', 'ON_LOCK_ALREADY_LOCKED_LONG_PARAMETER', ',', '[', ']', ')', ')', ')', ',', 'lock_poll_interval', '=', 'parsed_arguments', '.', 'get', '(', 'LOCK_POLL_INTERVAL_SHORT_PARAMETER', ',', 'DEFAULT_LOCK_POLL_INTERVAL_GENERATOR', '(', '1', ')', ')', ')', 'if', 'action', '==', 'Action', '.', 'LOCK', ':', 'return', 'CliLockConfiguration', '(', '*', '*', 'parameters', ')', 'else', ':', 'return', 'CliLockAndExecuteConfiguration', '(', '*', '*', 'parameters', ',', 'executable', '=', 'parsed_arguments', '[', 'EXECUTABLE_PARAMETER', ']', ')']
Parses the configuration passed in via command line arguments. :param arguments: CLI arguments :return: the configuration
['Parses', 'the', 'configuration', 'passed', 'in', 'via', 'command', 'line', 'arguments', '.', ':', 'param', 'arguments', ':', 'CLI', 'arguments', ':', 'return', ':', 'the', 'configuration']
train
https://github.com/wtsi-hgi/consul-lock/blob/deb07ab41dabbb49f4d0bbc062bc3b4b6e5d71b2/consullock/cli.py#L185-L234
3,481
ejeschke/ginga
ginga/Bindings.py
ImageViewBindings.sc_pan
def sc_pan(self, viewer, event, msg=True): """Interactively pan the image by scrolling motion. """ if not self.canpan: return True # User has "Pan Reverse" preference set? rev = self.settings.get('pan_reverse', False) direction = event.direction if rev: direction = math.fmod(direction + 180.0, 360.0) pan_accel = self.settings.get('scroll_pan_acceleration', 1.0) # Internal factor to adjust the panning speed so that user-adjustable # scroll_pan_acceleration is normalized to 1.0 for "normal" speed scr_pan_adj_factor = 1.4142135623730951 amount = (event.amount * scr_pan_adj_factor * pan_accel) / 360.0 self.pan_omni(viewer, direction, amount) return True
python
def sc_pan(self, viewer, event, msg=True): """Interactively pan the image by scrolling motion. """ if not self.canpan: return True # User has "Pan Reverse" preference set? rev = self.settings.get('pan_reverse', False) direction = event.direction if rev: direction = math.fmod(direction + 180.0, 360.0) pan_accel = self.settings.get('scroll_pan_acceleration', 1.0) # Internal factor to adjust the panning speed so that user-adjustable # scroll_pan_acceleration is normalized to 1.0 for "normal" speed scr_pan_adj_factor = 1.4142135623730951 amount = (event.amount * scr_pan_adj_factor * pan_accel) / 360.0 self.pan_omni(viewer, direction, amount) return True
['def', 'sc_pan', '(', 'self', ',', 'viewer', ',', 'event', ',', 'msg', '=', 'True', ')', ':', 'if', 'not', 'self', '.', 'canpan', ':', 'return', 'True', '# User has "Pan Reverse" preference set?', 'rev', '=', 'self', '.', 'settings', '.', 'get', '(', "'pan_reverse'", ',', 'False', ')', 'direction', '=', 'event', '.', 'direction', 'if', 'rev', ':', 'direction', '=', 'math', '.', 'fmod', '(', 'direction', '+', '180.0', ',', '360.0', ')', 'pan_accel', '=', 'self', '.', 'settings', '.', 'get', '(', "'scroll_pan_acceleration'", ',', '1.0', ')', '# Internal factor to adjust the panning speed so that user-adjustable', '# scroll_pan_acceleration is normalized to 1.0 for "normal" speed', 'scr_pan_adj_factor', '=', '1.4142135623730951', 'amount', '=', '(', 'event', '.', 'amount', '*', 'scr_pan_adj_factor', '*', 'pan_accel', ')', '/', '360.0', 'self', '.', 'pan_omni', '(', 'viewer', ',', 'direction', ',', 'amount', ')', 'return', 'True']
Interactively pan the image by scrolling motion.
['Interactively', 'pan', 'the', 'image', 'by', 'scrolling', 'motion', '.']
train
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/Bindings.py#L1994-L2014
3,482
ipfs/py-ipfs-api
ipfsapi/http.py
HTTPClient.session
def session(self): """A context manager for this client's session. This function closes the current session when this client goes out of scope. """ self._session = requests.session() yield self._session.close() self._session = None
python
def session(self): """A context manager for this client's session. This function closes the current session when this client goes out of scope. """ self._session = requests.session() yield self._session.close() self._session = None
['def', 'session', '(', 'self', ')', ':', 'self', '.', '_session', '=', 'requests', '.', 'session', '(', ')', 'yield', 'self', '.', '_session', '.', 'close', '(', ')', 'self', '.', '_session', '=', 'None']
A context manager for this client's session. This function closes the current session when this client goes out of scope.
['A', 'context', 'manager', 'for', 'this', 'client', 's', 'session', '.']
train
https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/http.py#L311-L320
3,483
quantumlib/Cirq
cirq/google/sim/xmon_simulator.py
XmonSimulator._run
def _run( self, circuit: circuits.Circuit, param_resolver: study.ParamResolver, repetitions: int, ) -> Dict[str, List[np.ndarray]]: """See definition in `cirq.SimulatesSamples`.""" circuit = protocols.resolve_parameters(circuit, param_resolver) _verify_xmon_circuit(circuit) # Delegate to appropriate method based on contents. if circuit.are_all_measurements_terminal(): return self._run_sweep_sample(circuit, repetitions) else: return self._run_sweep_repeat(circuit, repetitions)
python
def _run( self, circuit: circuits.Circuit, param_resolver: study.ParamResolver, repetitions: int, ) -> Dict[str, List[np.ndarray]]: """See definition in `cirq.SimulatesSamples`.""" circuit = protocols.resolve_parameters(circuit, param_resolver) _verify_xmon_circuit(circuit) # Delegate to appropriate method based on contents. if circuit.are_all_measurements_terminal(): return self._run_sweep_sample(circuit, repetitions) else: return self._run_sweep_repeat(circuit, repetitions)
['def', '_run', '(', 'self', ',', 'circuit', ':', 'circuits', '.', 'Circuit', ',', 'param_resolver', ':', 'study', '.', 'ParamResolver', ',', 'repetitions', ':', 'int', ',', ')', '->', 'Dict', '[', 'str', ',', 'List', '[', 'np', '.', 'ndarray', ']', ']', ':', 'circuit', '=', 'protocols', '.', 'resolve_parameters', '(', 'circuit', ',', 'param_resolver', ')', '_verify_xmon_circuit', '(', 'circuit', ')', '# Delegate to appropriate method based on contents.', 'if', 'circuit', '.', 'are_all_measurements_terminal', '(', ')', ':', 'return', 'self', '.', '_run_sweep_sample', '(', 'circuit', ',', 'repetitions', ')', 'else', ':', 'return', 'self', '.', '_run_sweep_repeat', '(', 'circuit', ',', 'repetitions', ')']
See definition in `cirq.SimulatesSamples`.
['See', 'definition', 'in', 'cirq', '.', 'SimulatesSamples', '.']
train
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/google/sim/xmon_simulator.py#L144-L159
3,484
yyuu/botornado
boto/ec2/connection.py
EC2Connection.modify_image_attribute
def modify_image_attribute(self, image_id, attribute='launchPermission', operation='add', user_ids=None, groups=None, product_codes=None): """ Changes an attribute of an image. :type image_id: string :param image_id: The image id you wish to change :type attribute: string :param attribute: The attribute you wish to change :type operation: string :param operation: Either add or remove (this is required for changing launchPermissions) :type user_ids: list :param user_ids: The Amazon IDs of users to add/remove attributes :type groups: list :param groups: The groups to add/remove attributes :type product_codes: list :param product_codes: Amazon DevPay product code. Currently only one product code can be associated with an AMI. Once set, the product code cannot be changed or reset. """ params = {'ImageId' : image_id, 'Attribute' : attribute, 'OperationType' : operation} if user_ids: self.build_list_params(params, user_ids, 'UserId') if groups: self.build_list_params(params, groups, 'UserGroup') if product_codes: self.build_list_params(params, product_codes, 'ProductCode') return self.get_status('ModifyImageAttribute', params, verb='POST')
python
def modify_image_attribute(self, image_id, attribute='launchPermission', operation='add', user_ids=None, groups=None, product_codes=None): """ Changes an attribute of an image. :type image_id: string :param image_id: The image id you wish to change :type attribute: string :param attribute: The attribute you wish to change :type operation: string :param operation: Either add or remove (this is required for changing launchPermissions) :type user_ids: list :param user_ids: The Amazon IDs of users to add/remove attributes :type groups: list :param groups: The groups to add/remove attributes :type product_codes: list :param product_codes: Amazon DevPay product code. Currently only one product code can be associated with an AMI. Once set, the product code cannot be changed or reset. """ params = {'ImageId' : image_id, 'Attribute' : attribute, 'OperationType' : operation} if user_ids: self.build_list_params(params, user_ids, 'UserId') if groups: self.build_list_params(params, groups, 'UserGroup') if product_codes: self.build_list_params(params, product_codes, 'ProductCode') return self.get_status('ModifyImageAttribute', params, verb='POST')
['def', 'modify_image_attribute', '(', 'self', ',', 'image_id', ',', 'attribute', '=', "'launchPermission'", ',', 'operation', '=', "'add'", ',', 'user_ids', '=', 'None', ',', 'groups', '=', 'None', ',', 'product_codes', '=', 'None', ')', ':', 'params', '=', '{', "'ImageId'", ':', 'image_id', ',', "'Attribute'", ':', 'attribute', ',', "'OperationType'", ':', 'operation', '}', 'if', 'user_ids', ':', 'self', '.', 'build_list_params', '(', 'params', ',', 'user_ids', ',', "'UserId'", ')', 'if', 'groups', ':', 'self', '.', 'build_list_params', '(', 'params', ',', 'groups', ',', "'UserGroup'", ')', 'if', 'product_codes', ':', 'self', '.', 'build_list_params', '(', 'params', ',', 'product_codes', ',', "'ProductCode'", ')', 'return', 'self', '.', 'get_status', '(', "'ModifyImageAttribute'", ',', 'params', ',', 'verb', '=', "'POST'", ')']
Changes an attribute of an image. :type image_id: string :param image_id: The image id you wish to change :type attribute: string :param attribute: The attribute you wish to change :type operation: string :param operation: Either add or remove (this is required for changing launchPermissions) :type user_ids: list :param user_ids: The Amazon IDs of users to add/remove attributes :type groups: list :param groups: The groups to add/remove attributes :type product_codes: list :param product_codes: Amazon DevPay product code. Currently only one product code can be associated with an AMI. Once set, the product code cannot be changed or reset.
['Changes', 'an', 'attribute', 'of', 'an', 'image', '.']
train
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/ec2/connection.py#L375-L411
3,485
inasafe/inasafe
safe/gui/tools/wizard/step_kw57_extra_keywords.py
extra_keywords_to_widgets
def extra_keywords_to_widgets(extra_keyword_definition): """Create widgets for extra keyword. :param extra_keyword_definition: An extra keyword definition. :type extra_keyword_definition: dict :return: QCheckBox and The input widget :rtype: (QCheckBox, QWidget) """ # Check box check_box = QCheckBox(extra_keyword_definition['name']) check_box.setToolTip(extra_keyword_definition['description']) check_box.setChecked(True) # Input widget if extra_keyword_definition['type'] == float: input_widget = QDoubleSpinBox() input_widget.setMinimum(extra_keyword_definition['minimum']) input_widget.setMaximum(extra_keyword_definition['maximum']) input_widget.setSuffix(extra_keyword_definition['unit_string']) elif extra_keyword_definition['type'] == int: input_widget = QSpinBox() input_widget.setMinimum(extra_keyword_definition['minimum']) input_widget.setMaximum(extra_keyword_definition['maximum']) input_widget.setSuffix(extra_keyword_definition['unit_string']) elif extra_keyword_definition['type'] == str: if extra_keyword_definition.get('options'): input_widget = QComboBox() options = extra_keyword_definition['options'] for option in options: input_widget.addItem( option['name'], option['key'], ) default_option_index = input_widget.findData( extra_keyword_definition['default_option']) input_widget.setCurrentIndex(default_option_index) else: input_widget = QLineEdit() elif extra_keyword_definition['type'] == datetime: input_widget = QDateTimeEdit() input_widget.setCalendarPopup(True) input_widget.setDisplayFormat('hh:mm:ss, d MMM yyyy') input_widget.setDateTime(datetime.now()) else: raise Exception input_widget.setToolTip(extra_keyword_definition['description']) # Signal # noinspection PyUnresolvedReferences check_box.stateChanged.connect(input_widget.setEnabled) return check_box, input_widget
python
def extra_keywords_to_widgets(extra_keyword_definition): """Create widgets for extra keyword. :param extra_keyword_definition: An extra keyword definition. :type extra_keyword_definition: dict :return: QCheckBox and The input widget :rtype: (QCheckBox, QWidget) """ # Check box check_box = QCheckBox(extra_keyword_definition['name']) check_box.setToolTip(extra_keyword_definition['description']) check_box.setChecked(True) # Input widget if extra_keyword_definition['type'] == float: input_widget = QDoubleSpinBox() input_widget.setMinimum(extra_keyword_definition['minimum']) input_widget.setMaximum(extra_keyword_definition['maximum']) input_widget.setSuffix(extra_keyword_definition['unit_string']) elif extra_keyword_definition['type'] == int: input_widget = QSpinBox() input_widget.setMinimum(extra_keyword_definition['minimum']) input_widget.setMaximum(extra_keyword_definition['maximum']) input_widget.setSuffix(extra_keyword_definition['unit_string']) elif extra_keyword_definition['type'] == str: if extra_keyword_definition.get('options'): input_widget = QComboBox() options = extra_keyword_definition['options'] for option in options: input_widget.addItem( option['name'], option['key'], ) default_option_index = input_widget.findData( extra_keyword_definition['default_option']) input_widget.setCurrentIndex(default_option_index) else: input_widget = QLineEdit() elif extra_keyword_definition['type'] == datetime: input_widget = QDateTimeEdit() input_widget.setCalendarPopup(True) input_widget.setDisplayFormat('hh:mm:ss, d MMM yyyy') input_widget.setDateTime(datetime.now()) else: raise Exception input_widget.setToolTip(extra_keyword_definition['description']) # Signal # noinspection PyUnresolvedReferences check_box.stateChanged.connect(input_widget.setEnabled) return check_box, input_widget
['def', 'extra_keywords_to_widgets', '(', 'extra_keyword_definition', ')', ':', '# Check box', 'check_box', '=', 'QCheckBox', '(', 'extra_keyword_definition', '[', "'name'", ']', ')', 'check_box', '.', 'setToolTip', '(', 'extra_keyword_definition', '[', "'description'", ']', ')', 'check_box', '.', 'setChecked', '(', 'True', ')', '# Input widget', 'if', 'extra_keyword_definition', '[', "'type'", ']', '==', 'float', ':', 'input_widget', '=', 'QDoubleSpinBox', '(', ')', 'input_widget', '.', 'setMinimum', '(', 'extra_keyword_definition', '[', "'minimum'", ']', ')', 'input_widget', '.', 'setMaximum', '(', 'extra_keyword_definition', '[', "'maximum'", ']', ')', 'input_widget', '.', 'setSuffix', '(', 'extra_keyword_definition', '[', "'unit_string'", ']', ')', 'elif', 'extra_keyword_definition', '[', "'type'", ']', '==', 'int', ':', 'input_widget', '=', 'QSpinBox', '(', ')', 'input_widget', '.', 'setMinimum', '(', 'extra_keyword_definition', '[', "'minimum'", ']', ')', 'input_widget', '.', 'setMaximum', '(', 'extra_keyword_definition', '[', "'maximum'", ']', ')', 'input_widget', '.', 'setSuffix', '(', 'extra_keyword_definition', '[', "'unit_string'", ']', ')', 'elif', 'extra_keyword_definition', '[', "'type'", ']', '==', 'str', ':', 'if', 'extra_keyword_definition', '.', 'get', '(', "'options'", ')', ':', 'input_widget', '=', 'QComboBox', '(', ')', 'options', '=', 'extra_keyword_definition', '[', "'options'", ']', 'for', 'option', 'in', 'options', ':', 'input_widget', '.', 'addItem', '(', 'option', '[', "'name'", ']', ',', 'option', '[', "'key'", ']', ',', ')', 'default_option_index', '=', 'input_widget', '.', 'findData', '(', 'extra_keyword_definition', '[', "'default_option'", ']', ')', 'input_widget', '.', 'setCurrentIndex', '(', 'default_option_index', ')', 'else', ':', 'input_widget', '=', 'QLineEdit', '(', ')', 'elif', 'extra_keyword_definition', '[', "'type'", ']', '==', 'datetime', ':', 'input_widget', '=', 'QDateTimeEdit', '(', ')', 'input_widget', '.', 'setCalendarPopup', '(', 'True', ')', 'input_widget', '.', 'setDisplayFormat', '(', "'hh:mm:ss, d MMM yyyy'", ')', 'input_widget', '.', 'setDateTime', '(', 'datetime', '.', 'now', '(', ')', ')', 'else', ':', 'raise', 'Exception', 'input_widget', '.', 'setToolTip', '(', 'extra_keyword_definition', '[', "'description'", ']', ')', '# Signal', '# noinspection PyUnresolvedReferences', 'check_box', '.', 'stateChanged', '.', 'connect', '(', 'input_widget', '.', 'setEnabled', ')', 'return', 'check_box', ',', 'input_widget']
Create widgets for extra keyword. :param extra_keyword_definition: An extra keyword definition. :type extra_keyword_definition: dict :return: QCheckBox and The input widget :rtype: (QCheckBox, QWidget)
['Create', 'widgets', 'for', 'extra', 'keyword', '.']
train
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/wizard/step_kw57_extra_keywords.py#L173-L225
3,486
gwpy/gwpy
gwpy/io/cache.py
is_cache
def is_cache(cache): """Returns `True` if ``cache`` is a readable cache file or object Parameters ---------- cache : `str`, `file`, `list` Object to detect as cache Returns ------- iscache : `bool` `True` if the input object is a cache, or a file in LAL cache format, otherwise `False` """ if isinstance(cache, string_types + FILE_LIKE): try: return bool(len(read_cache(cache))) except (TypeError, ValueError, UnicodeDecodeError, ImportError): # failed to parse cache return False if HAS_CACHE and isinstance(cache, Cache): return True if (isinstance(cache, (list, tuple)) and cache and all(map(is_cache_entry, cache))): return True return False
python
def is_cache(cache): """Returns `True` if ``cache`` is a readable cache file or object Parameters ---------- cache : `str`, `file`, `list` Object to detect as cache Returns ------- iscache : `bool` `True` if the input object is a cache, or a file in LAL cache format, otherwise `False` """ if isinstance(cache, string_types + FILE_LIKE): try: return bool(len(read_cache(cache))) except (TypeError, ValueError, UnicodeDecodeError, ImportError): # failed to parse cache return False if HAS_CACHE and isinstance(cache, Cache): return True if (isinstance(cache, (list, tuple)) and cache and all(map(is_cache_entry, cache))): return True return False
['def', 'is_cache', '(', 'cache', ')', ':', 'if', 'isinstance', '(', 'cache', ',', 'string_types', '+', 'FILE_LIKE', ')', ':', 'try', ':', 'return', 'bool', '(', 'len', '(', 'read_cache', '(', 'cache', ')', ')', ')', 'except', '(', 'TypeError', ',', 'ValueError', ',', 'UnicodeDecodeError', ',', 'ImportError', ')', ':', '# failed to parse cache', 'return', 'False', 'if', 'HAS_CACHE', 'and', 'isinstance', '(', 'cache', ',', 'Cache', ')', ':', 'return', 'True', 'if', '(', 'isinstance', '(', 'cache', ',', '(', 'list', ',', 'tuple', ')', ')', 'and', 'cache', 'and', 'all', '(', 'map', '(', 'is_cache_entry', ',', 'cache', ')', ')', ')', ':', 'return', 'True', 'return', 'False']
Returns `True` if ``cache`` is a readable cache file or object Parameters ---------- cache : `str`, `file`, `list` Object to detect as cache Returns ------- iscache : `bool` `True` if the input object is a cache, or a file in LAL cache format, otherwise `False`
['Returns', 'True', 'if', 'cache', 'is', 'a', 'readable', 'cache', 'file', 'or', 'object']
train
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/cache.py#L265-L291
3,487
pip-services3-python/pip-services3-commons-python
pip_services3_commons/random/RandomString.py
RandomString.distort
def distort(value): """ Distorts a string by randomly replacing characters in it. :param value: a string to distort. :return: a distored string. """ value = value.lower() if (RandomBoolean.chance(1, 5)): value = value[0:1].upper() + value[1:] if (RandomBoolean.chance(1, 3)): value = value + random.choice(_symbols) return value
python
def distort(value): """ Distorts a string by randomly replacing characters in it. :param value: a string to distort. :return: a distored string. """ value = value.lower() if (RandomBoolean.chance(1, 5)): value = value[0:1].upper() + value[1:] if (RandomBoolean.chance(1, 3)): value = value + random.choice(_symbols) return value
['def', 'distort', '(', 'value', ')', ':', 'value', '=', 'value', '.', 'lower', '(', ')', 'if', '(', 'RandomBoolean', '.', 'chance', '(', '1', ',', '5', ')', ')', ':', 'value', '=', 'value', '[', '0', ':', '1', ']', '.', 'upper', '(', ')', '+', 'value', '[', '1', ':', ']', 'if', '(', 'RandomBoolean', '.', 'chance', '(', '1', ',', '3', ')', ')', ':', 'value', '=', 'value', '+', 'random', '.', 'choice', '(', '_symbols', ')', 'return', 'value']
Distorts a string by randomly replacing characters in it. :param value: a string to distort. :return: a distored string.
['Distorts', 'a', 'string', 'by', 'randomly', 'replacing', 'characters', 'in', 'it', '.']
train
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/random/RandomString.py#L47-L63
3,488
mosdef-hub/mbuild
mbuild/lib/recipes/silica_interface.py
SilicaInterface._strip_stray_atoms
def _strip_stray_atoms(self): """Remove stray atoms and surface pieces. """ components = self.bond_graph.connected_components() major_component = max(components, key=len) for atom in list(self.particles()): if atom not in major_component: self.remove(atom)
python
def _strip_stray_atoms(self): """Remove stray atoms and surface pieces. """ components = self.bond_graph.connected_components() major_component = max(components, key=len) for atom in list(self.particles()): if atom not in major_component: self.remove(atom)
['def', '_strip_stray_atoms', '(', 'self', ')', ':', 'components', '=', 'self', '.', 'bond_graph', '.', 'connected_components', '(', ')', 'major_component', '=', 'max', '(', 'components', ',', 'key', '=', 'len', ')', 'for', 'atom', 'in', 'list', '(', 'self', '.', 'particles', '(', ')', ')', ':', 'if', 'atom', 'not', 'in', 'major_component', ':', 'self', '.', 'remove', '(', 'atom', ')']
Remove stray atoms and surface pieces.
['Remove', 'stray', 'atoms', 'and', 'surface', 'pieces', '.']
train
https://github.com/mosdef-hub/mbuild/blob/dcb80a2becd5d0e6a7e3e7bcb1b59793c46a2dd3/mbuild/lib/recipes/silica_interface.py#L76-L82
3,489
quantopian/zipline
zipline/assets/asset_writer.py
AssetDBWriter.init_db
def init_db(self, txn=None): """Connect to database and create tables. Parameters ---------- txn : sa.engine.Connection, optional The transaction to execute in. If this is not provided, a new transaction will be started with the engine provided. Returns ------- metadata : sa.MetaData The metadata that describes the new assets db. """ with ExitStack() as stack: if txn is None: txn = stack.enter_context(self.engine.begin()) tables_already_exist = self._all_tables_present(txn) # Create the SQL tables if they do not already exist. metadata.create_all(txn, checkfirst=True) if tables_already_exist: check_version_info(txn, version_info, ASSET_DB_VERSION) else: write_version_info(txn, version_info, ASSET_DB_VERSION)
python
def init_db(self, txn=None): """Connect to database and create tables. Parameters ---------- txn : sa.engine.Connection, optional The transaction to execute in. If this is not provided, a new transaction will be started with the engine provided. Returns ------- metadata : sa.MetaData The metadata that describes the new assets db. """ with ExitStack() as stack: if txn is None: txn = stack.enter_context(self.engine.begin()) tables_already_exist = self._all_tables_present(txn) # Create the SQL tables if they do not already exist. metadata.create_all(txn, checkfirst=True) if tables_already_exist: check_version_info(txn, version_info, ASSET_DB_VERSION) else: write_version_info(txn, version_info, ASSET_DB_VERSION)
['def', 'init_db', '(', 'self', ',', 'txn', '=', 'None', ')', ':', 'with', 'ExitStack', '(', ')', 'as', 'stack', ':', 'if', 'txn', 'is', 'None', ':', 'txn', '=', 'stack', '.', 'enter_context', '(', 'self', '.', 'engine', '.', 'begin', '(', ')', ')', 'tables_already_exist', '=', 'self', '.', '_all_tables_present', '(', 'txn', ')', '# Create the SQL tables if they do not already exist.', 'metadata', '.', 'create_all', '(', 'txn', ',', 'checkfirst', '=', 'True', ')', 'if', 'tables_already_exist', ':', 'check_version_info', '(', 'txn', ',', 'version_info', ',', 'ASSET_DB_VERSION', ')', 'else', ':', 'write_version_info', '(', 'txn', ',', 'version_info', ',', 'ASSET_DB_VERSION', ')']
Connect to database and create tables. Parameters ---------- txn : sa.engine.Connection, optional The transaction to execute in. If this is not provided, a new transaction will be started with the engine provided. Returns ------- metadata : sa.MetaData The metadata that describes the new assets db.
['Connect', 'to', 'database', 'and', 'create', 'tables', '.']
train
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_writer.py#L876-L902
3,490
iotile/coretools
iotilegateway/iotilegateway/supervisor/server.py
IOTileSupervisor.respond_rpc
async def respond_rpc(self, msg, _context): """Respond to an RPC previously sent to a service.""" rpc_id = msg.get('response_uuid') result = msg.get('result') payload = msg.get('response') self.service_manager.send_rpc_response(rpc_id, result, payload)
python
async def respond_rpc(self, msg, _context): """Respond to an RPC previously sent to a service.""" rpc_id = msg.get('response_uuid') result = msg.get('result') payload = msg.get('response') self.service_manager.send_rpc_response(rpc_id, result, payload)
['async', 'def', 'respond_rpc', '(', 'self', ',', 'msg', ',', '_context', ')', ':', 'rpc_id', '=', 'msg', '.', 'get', '(', "'response_uuid'", ')', 'result', '=', 'msg', '.', 'get', '(', "'result'", ')', 'payload', '=', 'msg', '.', 'get', '(', "'response'", ')', 'self', '.', 'service_manager', '.', 'send_rpc_response', '(', 'rpc_id', ',', 'result', ',', 'payload', ')']
Respond to an RPC previously sent to a service.
['Respond', 'to', 'an', 'RPC', 'previously', 'sent', 'to', 'a', 'service', '.']
train
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilegateway/iotilegateway/supervisor/server.py#L162-L169
3,491
ramrod-project/database-brain
schema/brain/binary/data.py
put_buffer
def put_buffer(filename, content, conn=None): """ helper function for put :param filename: <str> :param content: <bytes> :param conn: <rethinkdb.DefaultConnection> :return: <dict> """ obj_dict = {PRIMARY_FIELD: filename, CONTENT_FIELD: content, TIMESTAMP_FIELD: time()} return put(obj_dict, conn=conn)
python
def put_buffer(filename, content, conn=None): """ helper function for put :param filename: <str> :param content: <bytes> :param conn: <rethinkdb.DefaultConnection> :return: <dict> """ obj_dict = {PRIMARY_FIELD: filename, CONTENT_FIELD: content, TIMESTAMP_FIELD: time()} return put(obj_dict, conn=conn)
['def', 'put_buffer', '(', 'filename', ',', 'content', ',', 'conn', '=', 'None', ')', ':', 'obj_dict', '=', '{', 'PRIMARY_FIELD', ':', 'filename', ',', 'CONTENT_FIELD', ':', 'content', ',', 'TIMESTAMP_FIELD', ':', 'time', '(', ')', '}', 'return', 'put', '(', 'obj_dict', ',', 'conn', '=', 'conn', ')']
helper function for put :param filename: <str> :param content: <bytes> :param conn: <rethinkdb.DefaultConnection> :return: <dict>
['helper', 'function', 'for', 'put', ':', 'param', 'filename', ':', '<str', '>', ':', 'param', 'content', ':', '<bytes', '>', ':', 'param', 'conn', ':', '<rethinkdb', '.', 'DefaultConnection', '>', ':', 'return', ':', '<dict', '>']
train
https://github.com/ramrod-project/database-brain/blob/b024cb44f34cabb9d80af38271ddb65c25767083/schema/brain/binary/data.py#L44-L55
3,492
AndresMWeber/Nomenclate
nomenclate/core/formatter.py
FormatString.get_valid_format_order
def get_valid_format_order(cls, format_target, format_order=None): """ Checks to see if the target format string follows the proper style """ format_order = format_order or cls.parse_format_order(format_target) cls.validate_no_token_duplicates(format_order) format_target = cls.remove_tokens(format_target, format_order) format_target = cls.remove_static_text(format_target) cls.validate_separator_characters(format_target) cls.validate_matched_parenthesis(format_target) return format_order
python
def get_valid_format_order(cls, format_target, format_order=None): """ Checks to see if the target format string follows the proper style """ format_order = format_order or cls.parse_format_order(format_target) cls.validate_no_token_duplicates(format_order) format_target = cls.remove_tokens(format_target, format_order) format_target = cls.remove_static_text(format_target) cls.validate_separator_characters(format_target) cls.validate_matched_parenthesis(format_target) return format_order
['def', 'get_valid_format_order', '(', 'cls', ',', 'format_target', ',', 'format_order', '=', 'None', ')', ':', 'format_order', '=', 'format_order', 'or', 'cls', '.', 'parse_format_order', '(', 'format_target', ')', 'cls', '.', 'validate_no_token_duplicates', '(', 'format_order', ')', 'format_target', '=', 'cls', '.', 'remove_tokens', '(', 'format_target', ',', 'format_order', ')', 'format_target', '=', 'cls', '.', 'remove_static_text', '(', 'format_target', ')', 'cls', '.', 'validate_separator_characters', '(', 'format_target', ')', 'cls', '.', 'validate_matched_parenthesis', '(', 'format_target', ')', 'return', 'format_order']
Checks to see if the target format string follows the proper style
['Checks', 'to', 'see', 'if', 'the', 'target', 'format', 'string', 'follows', 'the', 'proper', 'style']
train
https://github.com/AndresMWeber/Nomenclate/blob/e6d6fc28beac042bad588e56fbe77531d2de6b6f/nomenclate/core/formatter.py#L52-L61
3,493
raphaelvallat/pingouin
pingouin/correlation.py
distance_corr
def distance_corr(x, y, tail='upper', n_boot=1000, seed=None): """Distance correlation between two arrays. Statistical significance (p-value) is evaluated with a permutation test. Parameters ---------- x, y : np.ndarray 1D or 2D input arrays, shape (n_samples, n_features). x and y must have the same number of samples and must not contain missing values. tail : str Tail for p-value :: 'upper' : one-sided (upper tail) 'lower' : one-sided (lower tail) 'two-sided' : two-sided n_boot : int or None Number of bootstrap to perform. If None, no bootstrapping is performed and the function only returns the distance correlation (no p-value). Default is 1000 (thus giving a precision of 0.001). seed : int or None Random state seed. Returns ------- dcor : float Sample distance correlation (range from 0 to 1). pval : float P-value Notes ----- From Wikipedia: *Distance correlation is a measure of dependence between two paired random vectors of arbitrary, not necessarily equal, dimension. The distance correlation coefficient is zero if and only if the random vectors are independent. Thus, distance correlation measures both linear and nonlinear association between two random variables or random vectors. This is in contrast to Pearson's correlation, which can only detect linear association between two random variables.* The distance correlation of two random variables is obtained by dividing their distance covariance by the product of their distance standard deviations: .. math:: \\text{dCor}(X, Y) = \\frac{\\text{dCov}(X, Y)} {\\sqrt{\\text{dVar}(X) \\cdot \\text{dVar}(Y)}} where :math:`\\text{dCov}(X, Y)` is the square root of the arithmetic average of the product of the double-centered pairwise Euclidean distance matrices. Note that by contrast to Pearson's correlation, the distance correlation cannot be negative, i.e :math:`0 \\leq \\text{dCor} \\leq 1`. Results have been tested against the 'energy' R package. To be consistent with this latter, only the one-sided p-value is computed, i.e. the upper tail of the T-statistic. References ---------- .. [1] https://en.wikipedia.org/wiki/Distance_correlation .. [2] Székely, G. J., Rizzo, M. L., & Bakirov, N. K. (2007). Measuring and testing dependence by correlation of distances. The annals of statistics, 35(6), 2769-2794. .. [3] https://gist.github.com/satra/aa3d19a12b74e9ab7941 .. [4] https://gist.github.com/wladston/c931b1495184fbb99bec .. [5] https://cran.r-project.org/web/packages/energy/energy.pdf Examples -------- 1. With two 1D vectors >>> from pingouin import distance_corr >>> a = [1, 2, 3, 4, 5] >>> b = [1, 2, 9, 4, 4] >>> distance_corr(a, b, seed=9) (0.7626762424168667, 0.312) 2. With two 2D arrays and no p-value >>> import numpy as np >>> np.random.seed(123) >>> from pingouin import distance_corr >>> a = np.random.random((10, 10)) >>> b = np.random.random((10, 10)) >>> distance_corr(a, b, n_boot=None) 0.8799633012275321 """ assert tail in ['upper', 'lower', 'two-sided'], 'Wrong tail argument.' x = np.asarray(x) y = np.asarray(y) # Check for NaN values if any([np.isnan(np.min(x)), np.isnan(np.min(y))]): raise ValueError('Input arrays must not contain NaN values.') if x.ndim == 1: x = x[:, None] if y.ndim == 1: y = y[:, None] assert x.shape[0] == y.shape[0], 'x and y must have same number of samples' # Extract number of samples n = x.shape[0] n2 = n**2 # Process first array to avoid redundancy when performing bootstrap a = squareform(pdist(x, metric='euclidean')) A = a - a.mean(axis=0)[None, :] - a.mean(axis=1)[:, None] + a.mean() dcov2_xx = np.vdot(A, A) / n2 # Process second array and compute final distance correlation dcor = _dcorr(y, n2, A, dcov2_xx) # Compute one-sided p-value using a bootstrap procedure if n_boot is not None and n_boot > 1: # Define random seed and permutation rng = np.random.RandomState(seed) bootsam = rng.random_sample((n_boot, n)).argsort(axis=1) bootstat = np.empty(n_boot) for i in range(n_boot): bootstat[i] = _dcorr(y[bootsam[i, :]], n2, A, dcov2_xx) pval = _perm_pval(bootstat, dcor, tail=tail) return dcor, pval else: return dcor
python
def distance_corr(x, y, tail='upper', n_boot=1000, seed=None): """Distance correlation between two arrays. Statistical significance (p-value) is evaluated with a permutation test. Parameters ---------- x, y : np.ndarray 1D or 2D input arrays, shape (n_samples, n_features). x and y must have the same number of samples and must not contain missing values. tail : str Tail for p-value :: 'upper' : one-sided (upper tail) 'lower' : one-sided (lower tail) 'two-sided' : two-sided n_boot : int or None Number of bootstrap to perform. If None, no bootstrapping is performed and the function only returns the distance correlation (no p-value). Default is 1000 (thus giving a precision of 0.001). seed : int or None Random state seed. Returns ------- dcor : float Sample distance correlation (range from 0 to 1). pval : float P-value Notes ----- From Wikipedia: *Distance correlation is a measure of dependence between two paired random vectors of arbitrary, not necessarily equal, dimension. The distance correlation coefficient is zero if and only if the random vectors are independent. Thus, distance correlation measures both linear and nonlinear association between two random variables or random vectors. This is in contrast to Pearson's correlation, which can only detect linear association between two random variables.* The distance correlation of two random variables is obtained by dividing their distance covariance by the product of their distance standard deviations: .. math:: \\text{dCor}(X, Y) = \\frac{\\text{dCov}(X, Y)} {\\sqrt{\\text{dVar}(X) \\cdot \\text{dVar}(Y)}} where :math:`\\text{dCov}(X, Y)` is the square root of the arithmetic average of the product of the double-centered pairwise Euclidean distance matrices. Note that by contrast to Pearson's correlation, the distance correlation cannot be negative, i.e :math:`0 \\leq \\text{dCor} \\leq 1`. Results have been tested against the 'energy' R package. To be consistent with this latter, only the one-sided p-value is computed, i.e. the upper tail of the T-statistic. References ---------- .. [1] https://en.wikipedia.org/wiki/Distance_correlation .. [2] Székely, G. J., Rizzo, M. L., & Bakirov, N. K. (2007). Measuring and testing dependence by correlation of distances. The annals of statistics, 35(6), 2769-2794. .. [3] https://gist.github.com/satra/aa3d19a12b74e9ab7941 .. [4] https://gist.github.com/wladston/c931b1495184fbb99bec .. [5] https://cran.r-project.org/web/packages/energy/energy.pdf Examples -------- 1. With two 1D vectors >>> from pingouin import distance_corr >>> a = [1, 2, 3, 4, 5] >>> b = [1, 2, 9, 4, 4] >>> distance_corr(a, b, seed=9) (0.7626762424168667, 0.312) 2. With two 2D arrays and no p-value >>> import numpy as np >>> np.random.seed(123) >>> from pingouin import distance_corr >>> a = np.random.random((10, 10)) >>> b = np.random.random((10, 10)) >>> distance_corr(a, b, n_boot=None) 0.8799633012275321 """ assert tail in ['upper', 'lower', 'two-sided'], 'Wrong tail argument.' x = np.asarray(x) y = np.asarray(y) # Check for NaN values if any([np.isnan(np.min(x)), np.isnan(np.min(y))]): raise ValueError('Input arrays must not contain NaN values.') if x.ndim == 1: x = x[:, None] if y.ndim == 1: y = y[:, None] assert x.shape[0] == y.shape[0], 'x and y must have same number of samples' # Extract number of samples n = x.shape[0] n2 = n**2 # Process first array to avoid redundancy when performing bootstrap a = squareform(pdist(x, metric='euclidean')) A = a - a.mean(axis=0)[None, :] - a.mean(axis=1)[:, None] + a.mean() dcov2_xx = np.vdot(A, A) / n2 # Process second array and compute final distance correlation dcor = _dcorr(y, n2, A, dcov2_xx) # Compute one-sided p-value using a bootstrap procedure if n_boot is not None and n_boot > 1: # Define random seed and permutation rng = np.random.RandomState(seed) bootsam = rng.random_sample((n_boot, n)).argsort(axis=1) bootstat = np.empty(n_boot) for i in range(n_boot): bootstat[i] = _dcorr(y[bootsam[i, :]], n2, A, dcov2_xx) pval = _perm_pval(bootstat, dcor, tail=tail) return dcor, pval else: return dcor
['def', 'distance_corr', '(', 'x', ',', 'y', ',', 'tail', '=', "'upper'", ',', 'n_boot', '=', '1000', ',', 'seed', '=', 'None', ')', ':', 'assert', 'tail', 'in', '[', "'upper'", ',', "'lower'", ',', "'two-sided'", ']', ',', "'Wrong tail argument.'", 'x', '=', 'np', '.', 'asarray', '(', 'x', ')', 'y', '=', 'np', '.', 'asarray', '(', 'y', ')', '# Check for NaN values', 'if', 'any', '(', '[', 'np', '.', 'isnan', '(', 'np', '.', 'min', '(', 'x', ')', ')', ',', 'np', '.', 'isnan', '(', 'np', '.', 'min', '(', 'y', ')', ')', ']', ')', ':', 'raise', 'ValueError', '(', "'Input arrays must not contain NaN values.'", ')', 'if', 'x', '.', 'ndim', '==', '1', ':', 'x', '=', 'x', '[', ':', ',', 'None', ']', 'if', 'y', '.', 'ndim', '==', '1', ':', 'y', '=', 'y', '[', ':', ',', 'None', ']', 'assert', 'x', '.', 'shape', '[', '0', ']', '==', 'y', '.', 'shape', '[', '0', ']', ',', "'x and y must have same number of samples'", '# Extract number of samples', 'n', '=', 'x', '.', 'shape', '[', '0', ']', 'n2', '=', 'n', '**', '2', '# Process first array to avoid redundancy when performing bootstrap', 'a', '=', 'squareform', '(', 'pdist', '(', 'x', ',', 'metric', '=', "'euclidean'", ')', ')', 'A', '=', 'a', '-', 'a', '.', 'mean', '(', 'axis', '=', '0', ')', '[', 'None', ',', ':', ']', '-', 'a', '.', 'mean', '(', 'axis', '=', '1', ')', '[', ':', ',', 'None', ']', '+', 'a', '.', 'mean', '(', ')', 'dcov2_xx', '=', 'np', '.', 'vdot', '(', 'A', ',', 'A', ')', '/', 'n2', '# Process second array and compute final distance correlation', 'dcor', '=', '_dcorr', '(', 'y', ',', 'n2', ',', 'A', ',', 'dcov2_xx', ')', '# Compute one-sided p-value using a bootstrap procedure', 'if', 'n_boot', 'is', 'not', 'None', 'and', 'n_boot', '>', '1', ':', '# Define random seed and permutation', 'rng', '=', 'np', '.', 'random', '.', 'RandomState', '(', 'seed', ')', 'bootsam', '=', 'rng', '.', 'random_sample', '(', '(', 'n_boot', ',', 'n', ')', ')', '.', 'argsort', '(', 'axis', '=', '1', ')', 'bootstat', '=', 'np', '.', 'empty', '(', 'n_boot', ')', 'for', 'i', 'in', 'range', '(', 'n_boot', ')', ':', 'bootstat', '[', 'i', ']', '=', '_dcorr', '(', 'y', '[', 'bootsam', '[', 'i', ',', ':', ']', ']', ',', 'n2', ',', 'A', ',', 'dcov2_xx', ')', 'pval', '=', '_perm_pval', '(', 'bootstat', ',', 'dcor', ',', 'tail', '=', 'tail', ')', 'return', 'dcor', ',', 'pval', 'else', ':', 'return', 'dcor']
Distance correlation between two arrays. Statistical significance (p-value) is evaluated with a permutation test. Parameters ---------- x, y : np.ndarray 1D or 2D input arrays, shape (n_samples, n_features). x and y must have the same number of samples and must not contain missing values. tail : str Tail for p-value :: 'upper' : one-sided (upper tail) 'lower' : one-sided (lower tail) 'two-sided' : two-sided n_boot : int or None Number of bootstrap to perform. If None, no bootstrapping is performed and the function only returns the distance correlation (no p-value). Default is 1000 (thus giving a precision of 0.001). seed : int or None Random state seed. Returns ------- dcor : float Sample distance correlation (range from 0 to 1). pval : float P-value Notes ----- From Wikipedia: *Distance correlation is a measure of dependence between two paired random vectors of arbitrary, not necessarily equal, dimension. The distance correlation coefficient is zero if and only if the random vectors are independent. Thus, distance correlation measures both linear and nonlinear association between two random variables or random vectors. This is in contrast to Pearson's correlation, which can only detect linear association between two random variables.* The distance correlation of two random variables is obtained by dividing their distance covariance by the product of their distance standard deviations: .. math:: \\text{dCor}(X, Y) = \\frac{\\text{dCov}(X, Y)} {\\sqrt{\\text{dVar}(X) \\cdot \\text{dVar}(Y)}} where :math:`\\text{dCov}(X, Y)` is the square root of the arithmetic average of the product of the double-centered pairwise Euclidean distance matrices. Note that by contrast to Pearson's correlation, the distance correlation cannot be negative, i.e :math:`0 \\leq \\text{dCor} \\leq 1`. Results have been tested against the 'energy' R package. To be consistent with this latter, only the one-sided p-value is computed, i.e. the upper tail of the T-statistic. References ---------- .. [1] https://en.wikipedia.org/wiki/Distance_correlation .. [2] Székely, G. J., Rizzo, M. L., & Bakirov, N. K. (2007). Measuring and testing dependence by correlation of distances. The annals of statistics, 35(6), 2769-2794. .. [3] https://gist.github.com/satra/aa3d19a12b74e9ab7941 .. [4] https://gist.github.com/wladston/c931b1495184fbb99bec .. [5] https://cran.r-project.org/web/packages/energy/energy.pdf Examples -------- 1. With two 1D vectors >>> from pingouin import distance_corr >>> a = [1, 2, 3, 4, 5] >>> b = [1, 2, 9, 4, 4] >>> distance_corr(a, b, seed=9) (0.7626762424168667, 0.312) 2. With two 2D arrays and no p-value >>> import numpy as np >>> np.random.seed(123) >>> from pingouin import distance_corr >>> a = np.random.random((10, 10)) >>> b = np.random.random((10, 10)) >>> distance_corr(a, b, n_boot=None) 0.8799633012275321
['Distance', 'correlation', 'between', 'two', 'arrays', '.']
train
https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/correlation.py#L774-L909
3,494
LettError/ufoProcessor
Lib/ufoProcessor/emptyPen.py
checkGlyphIsEmpty
def checkGlyphIsEmpty(glyph, allowWhiteSpace=True): """ This will establish if the glyph is completely empty by drawing the glyph with an EmptyPen. Additionally, the unicode of the glyph is checked against a list of known unicode whitespace characters. This makes it possible to filter out glyphs that have a valid reason to be empty and those that can be ignored. """ whiteSpace = [ 0x9, # horizontal tab 0xa, # line feed 0xb, # vertical tab 0xc, # form feed 0xd, # carriage return 0x20, # space 0x85, # next line 0xa0, # nobreak space 0x1680, # ogham space mark 0x180e, # mongolian vowel separator 0x2000, # en quad 0x2001, # em quad 0x2003, # en space 0x2004, # three per em space 0x2005, # four per em space 0x2006, # six per em space 0x2007, # figure space 0x2008, # punctuation space 0x2009, # thin space 0x200a, # hair space 0x2028, # line separator 0x2029, # paragraph separator 0x202f, # narrow no break space 0x205f, # medium mathematical space 0x3000, # ideographic space ] emptyPen = EmptyPen() glyph.drawPoints(emptyPen) if emptyPen.isEmpty(): # we're empty? if glyph.unicode in whiteSpace and allowWhiteSpace: # are we allowed to be? return False return True return False
python
def checkGlyphIsEmpty(glyph, allowWhiteSpace=True): """ This will establish if the glyph is completely empty by drawing the glyph with an EmptyPen. Additionally, the unicode of the glyph is checked against a list of known unicode whitespace characters. This makes it possible to filter out glyphs that have a valid reason to be empty and those that can be ignored. """ whiteSpace = [ 0x9, # horizontal tab 0xa, # line feed 0xb, # vertical tab 0xc, # form feed 0xd, # carriage return 0x20, # space 0x85, # next line 0xa0, # nobreak space 0x1680, # ogham space mark 0x180e, # mongolian vowel separator 0x2000, # en quad 0x2001, # em quad 0x2003, # en space 0x2004, # three per em space 0x2005, # four per em space 0x2006, # six per em space 0x2007, # figure space 0x2008, # punctuation space 0x2009, # thin space 0x200a, # hair space 0x2028, # line separator 0x2029, # paragraph separator 0x202f, # narrow no break space 0x205f, # medium mathematical space 0x3000, # ideographic space ] emptyPen = EmptyPen() glyph.drawPoints(emptyPen) if emptyPen.isEmpty(): # we're empty? if glyph.unicode in whiteSpace and allowWhiteSpace: # are we allowed to be? return False return True return False
['def', 'checkGlyphIsEmpty', '(', 'glyph', ',', 'allowWhiteSpace', '=', 'True', ')', ':', 'whiteSpace', '=', '[', '0x9', ',', '# horizontal tab', '0xa', ',', '# line feed', '0xb', ',', '# vertical tab', '0xc', ',', '# form feed', '0xd', ',', '# carriage return', '0x20', ',', '# space', '0x85', ',', '# next line', '0xa0', ',', '# nobreak space', '0x1680', ',', '# ogham space mark', '0x180e', ',', '# mongolian vowel separator', '0x2000', ',', '# en quad', '0x2001', ',', '# em quad', '0x2003', ',', '# en space', '0x2004', ',', '# three per em space', '0x2005', ',', '# four per em space', '0x2006', ',', '# six per em space', '0x2007', ',', '# figure space', '0x2008', ',', '# punctuation space', '0x2009', ',', '# thin space', '0x200a', ',', '# hair space', '0x2028', ',', '# line separator', '0x2029', ',', '# paragraph separator', '0x202f', ',', '# narrow no break space', '0x205f', ',', '# medium mathematical space', '0x3000', ',', '# ideographic space', ']', 'emptyPen', '=', 'EmptyPen', '(', ')', 'glyph', '.', 'drawPoints', '(', 'emptyPen', ')', 'if', 'emptyPen', '.', 'isEmpty', '(', ')', ':', "# we're empty?", 'if', 'glyph', '.', 'unicode', 'in', 'whiteSpace', 'and', 'allowWhiteSpace', ':', '# are we allowed to be?', 'return', 'False', 'return', 'True', 'return', 'False']
This will establish if the glyph is completely empty by drawing the glyph with an EmptyPen. Additionally, the unicode of the glyph is checked against a list of known unicode whitespace characters. This makes it possible to filter out glyphs that have a valid reason to be empty and those that can be ignored.
['This', 'will', 'establish', 'if', 'the', 'glyph', 'is', 'completely', 'empty', 'by', 'drawing', 'the', 'glyph', 'with', 'an', 'EmptyPen', '.', 'Additionally', 'the', 'unicode', 'of', 'the', 'glyph', 'is', 'checked', 'against', 'a', 'list', 'of', 'known', 'unicode', 'whitespace', 'characters', '.', 'This', 'makes', 'it', 'possible', 'to', 'filter', 'out', 'glyphs', 'that', 'have', 'a', 'valid', 'reason', 'to', 'be', 'empty', 'and', 'those', 'that', 'can', 'be', 'ignored', '.']
train
https://github.com/LettError/ufoProcessor/blob/7c63e1c8aba2f2ef9b12edb6560aa6c58024a89a/Lib/ufoProcessor/emptyPen.py#L34-L75
3,495
costastf/toonlib
toonlib/toonlib.py
Toon.get_smokedetector_by_name
def get_smokedetector_by_name(self, name): """Retrieves a smokedetector object by its name :param name: The name of the smokedetector to return :return: A smokedetector object """ return next((smokedetector for smokedetector in self.smokedetectors if smokedetector.name.lower() == name.lower()), None)
python
def get_smokedetector_by_name(self, name): """Retrieves a smokedetector object by its name :param name: The name of the smokedetector to return :return: A smokedetector object """ return next((smokedetector for smokedetector in self.smokedetectors if smokedetector.name.lower() == name.lower()), None)
['def', 'get_smokedetector_by_name', '(', 'self', ',', 'name', ')', ':', 'return', 'next', '(', '(', 'smokedetector', 'for', 'smokedetector', 'in', 'self', '.', 'smokedetectors', 'if', 'smokedetector', '.', 'name', '.', 'lower', '(', ')', '==', 'name', '.', 'lower', '(', ')', ')', ',', 'None', ')']
Retrieves a smokedetector object by its name :param name: The name of the smokedetector to return :return: A smokedetector object
['Retrieves', 'a', 'smokedetector', 'object', 'by', 'its', 'name']
train
https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L236-L243
3,496
twaldear/flask-secure-headers
flask_secure_headers/headers.py
Simple_Header.create_header
def create_header(self): """ return header dict """ try: self.check_valid() _header_list = [] for k,v in self.inputs.items(): if v is None: return {self.__class__.__name__.replace('_','-'):None} elif k == 'value': _header_list.insert(0,str(v)) elif isinstance(v,bool): if v is True: _header_list.append(k) else: _header_list.append('%s=%s' % (k,str(v))) return {self.__class__.__name__.replace('_','-'):'; '.join(_header_list)} except Exception, e: raise
python
def create_header(self): """ return header dict """ try: self.check_valid() _header_list = [] for k,v in self.inputs.items(): if v is None: return {self.__class__.__name__.replace('_','-'):None} elif k == 'value': _header_list.insert(0,str(v)) elif isinstance(v,bool): if v is True: _header_list.append(k) else: _header_list.append('%s=%s' % (k,str(v))) return {self.__class__.__name__.replace('_','-'):'; '.join(_header_list)} except Exception, e: raise
['def', 'create_header', '(', 'self', ')', ':', 'try', ':', 'self', '.', 'check_valid', '(', ')', '_header_list', '=', '[', ']', 'for', 'k', ',', 'v', 'in', 'self', '.', 'inputs', '.', 'items', '(', ')', ':', 'if', 'v', 'is', 'None', ':', 'return', '{', 'self', '.', '__class__', '.', '__name__', '.', 'replace', '(', "'_'", ',', "'-'", ')', ':', 'None', '}', 'elif', 'k', '==', "'value'", ':', '_header_list', '.', 'insert', '(', '0', ',', 'str', '(', 'v', ')', ')', 'elif', 'isinstance', '(', 'v', ',', 'bool', ')', ':', 'if', 'v', 'is', 'True', ':', '_header_list', '.', 'append', '(', 'k', ')', 'else', ':', '_header_list', '.', 'append', '(', "'%s=%s'", '%', '(', 'k', ',', 'str', '(', 'v', ')', ')', ')', 'return', '{', 'self', '.', '__class__', '.', '__name__', '.', 'replace', '(', "'_'", ',', "'-'", ')', ':', "'; '", '.', 'join', '(', '_header_list', ')', '}', 'except', 'Exception', ',', 'e', ':', 'raise']
return header dict
['return', 'header', 'dict']
train
https://github.com/twaldear/flask-secure-headers/blob/3eca972b369608a7669b67cbe66679570a6505ce/flask_secure_headers/headers.py#L40-L57
3,497
nerdvegas/rez
src/rez/packages_.py
iter_package_families
def iter_package_families(paths=None): """Iterate over package families, in no particular order. Note that multiple package families with the same name can be returned. Unlike packages, families later in the searchpath are not hidden by earlier families. Args: paths (list of str, optional): paths to search for package families, defaults to `config.packages_path`. Returns: `PackageFamily` iterator. """ for path in (paths or config.packages_path): repo = package_repository_manager.get_repository(path) for resource in repo.iter_package_families(): yield PackageFamily(resource)
python
def iter_package_families(paths=None): """Iterate over package families, in no particular order. Note that multiple package families with the same name can be returned. Unlike packages, families later in the searchpath are not hidden by earlier families. Args: paths (list of str, optional): paths to search for package families, defaults to `config.packages_path`. Returns: `PackageFamily` iterator. """ for path in (paths or config.packages_path): repo = package_repository_manager.get_repository(path) for resource in repo.iter_package_families(): yield PackageFamily(resource)
['def', 'iter_package_families', '(', 'paths', '=', 'None', ')', ':', 'for', 'path', 'in', '(', 'paths', 'or', 'config', '.', 'packages_path', ')', ':', 'repo', '=', 'package_repository_manager', '.', 'get_repository', '(', 'path', ')', 'for', 'resource', 'in', 'repo', '.', 'iter_package_families', '(', ')', ':', 'yield', 'PackageFamily', '(', 'resource', ')']
Iterate over package families, in no particular order. Note that multiple package families with the same name can be returned. Unlike packages, families later in the searchpath are not hidden by earlier families. Args: paths (list of str, optional): paths to search for package families, defaults to `config.packages_path`. Returns: `PackageFamily` iterator.
['Iterate', 'over', 'package', 'families', 'in', 'no', 'particular', 'order', '.']
train
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/packages_.py#L465-L482
3,498
trevisanj/a99
a99/textinterface.py
format_box
def format_box(title, ch="*"): """ Encloses title in a box. Result is a list >>> for line in format_box("Today's TODO list"): ... print(line) ************************* *** Today's TODO list *** ************************* """ lt = len(title) return [(ch * (lt + 8)), (ch * 3 + " " + title + " " + ch * 3), (ch * (lt + 8)) ]
python
def format_box(title, ch="*"): """ Encloses title in a box. Result is a list >>> for line in format_box("Today's TODO list"): ... print(line) ************************* *** Today's TODO list *** ************************* """ lt = len(title) return [(ch * (lt + 8)), (ch * 3 + " " + title + " " + ch * 3), (ch * (lt + 8)) ]
['def', 'format_box', '(', 'title', ',', 'ch', '=', '"*"', ')', ':', 'lt', '=', 'len', '(', 'title', ')', 'return', '[', '(', 'ch', '*', '(', 'lt', '+', '8', ')', ')', ',', '(', 'ch', '*', '3', '+', '" "', '+', 'title', '+', '" "', '+', 'ch', '*', '3', ')', ',', '(', 'ch', '*', '(', 'lt', '+', '8', ')', ')', ']']
Encloses title in a box. Result is a list >>> for line in format_box("Today's TODO list"): ... print(line) ************************* *** Today's TODO list *** *************************
['Encloses', 'title', 'in', 'a', 'box', '.', 'Result', 'is', 'a', 'list', '>>>', 'for', 'line', 'in', 'format_box', '(', 'Today', 's', 'TODO', 'list', ')', ':', '...', 'print', '(', 'line', ')', '*************************', '***', 'Today', 's', 'TODO', 'list', '***', '*************************']
train
https://github.com/trevisanj/a99/blob/193e6e3c9b3e4f4a0ba7eb3eece846fe7045c539/a99/textinterface.py#L267-L281
3,499
Hironsan/anago
anago/tagger.py
Tagger.predict
def predict(self, text): """Predict using the model. Args: text: string, the input text. Returns: tags: list, shape = (num_words,) Returns predicted values. """ pred = self.predict_proba(text) tags = self._get_tags(pred) return tags
python
def predict(self, text): """Predict using the model. Args: text: string, the input text. Returns: tags: list, shape = (num_words,) Returns predicted values. """ pred = self.predict_proba(text) tags = self._get_tags(pred) return tags
['def', 'predict', '(', 'self', ',', 'text', ')', ':', 'pred', '=', 'self', '.', 'predict_proba', '(', 'text', ')', 'tags', '=', 'self', '.', '_get_tags', '(', 'pred', ')', 'return', 'tags']
Predict using the model. Args: text: string, the input text. Returns: tags: list, shape = (num_words,) Returns predicted values.
['Predict', 'using', 'the', 'model', '.']
train
https://github.com/Hironsan/anago/blob/66a97f91c41f9613b736892e9762dccb9c28f623/anago/tagger.py#L126-L139