Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
2,600
has2k1/plotnine
plotnine/scales/limits.py
_lim.get_scale
def get_scale(self, gg): """ Create a scale """ # This method does some introspection to save users from # scale mismatch error. This could happen when the # aesthetic is mapped to a categorical but the limits # are not provided in categorical form. We only handle # the case where the mapping uses an expression to # conver to categorical e.g `aes(color='factor(cyl)')`. # However if `'cyl'` column is a categorical and the # mapping is `aes(color='cyl')`, that will result in # an error. If later case proves common enough then we # could inspect the data and be clever based on that too!! ae = self.aesthetic series = self.limits_series ae_values = [] # Look through all the mappings for this aesthetic, # if we detect any factor stuff then we convert the # limits data to categorical so that the right scale # can be choosen. This should take care of the most # common use cases. for layer in gg.layers: with suppress(KeyError): value = layer.mapping[ae] if isinstance(value, str): ae_values.append(value) for value in ae_values: if ('factor(' in value or 'Categorical(' in value): series = pd.Categorical(self.limits_series) break return make_scale(self.aesthetic, series, limits=self.limits, trans=self.trans)
python
def get_scale(self, gg): """ Create a scale """ # This method does some introspection to save users from # scale mismatch error. This could happen when the # aesthetic is mapped to a categorical but the limits # are not provided in categorical form. We only handle # the case where the mapping uses an expression to # conver to categorical e.g `aes(color='factor(cyl)')`. # However if `'cyl'` column is a categorical and the # mapping is `aes(color='cyl')`, that will result in # an error. If later case proves common enough then we # could inspect the data and be clever based on that too!! ae = self.aesthetic series = self.limits_series ae_values = [] # Look through all the mappings for this aesthetic, # if we detect any factor stuff then we convert the # limits data to categorical so that the right scale # can be choosen. This should take care of the most # common use cases. for layer in gg.layers: with suppress(KeyError): value = layer.mapping[ae] if isinstance(value, str): ae_values.append(value) for value in ae_values: if ('factor(' in value or 'Categorical(' in value): series = pd.Categorical(self.limits_series) break return make_scale(self.aesthetic, series, limits=self.limits, trans=self.trans)
['def', 'get_scale', '(', 'self', ',', 'gg', ')', ':', '# This method does some introspection to save users from', '# scale mismatch error. This could happen when the', '# aesthetic is mapped to a categorical but the limits', '# are not provided in categorical form. We only handle', '# the case where the mapping uses an expression to', "# conver to categorical e.g `aes(color='factor(cyl)')`.", "# However if `'cyl'` column is a categorical and the", "# mapping is `aes(color='cyl')`, that will result in", '# an error. If later case proves common enough then we', '# could inspect the data and be clever based on that too!!', 'ae', '=', 'self', '.', 'aesthetic', 'series', '=', 'self', '.', 'limits_series', 'ae_values', '=', '[', ']', '# Look through all the mappings for this aesthetic,', '# if we detect any factor stuff then we convert the', '# limits data to categorical so that the right scale', '# can be choosen. This should take care of the most', '# common use cases.', 'for', 'layer', 'in', 'gg', '.', 'layers', ':', 'with', 'suppress', '(', 'KeyError', ')', ':', 'value', '=', 'layer', '.', 'mapping', '[', 'ae', ']', 'if', 'isinstance', '(', 'value', ',', 'str', ')', ':', 'ae_values', '.', 'append', '(', 'value', ')', 'for', 'value', 'in', 'ae_values', ':', 'if', '(', "'factor('", 'in', 'value', 'or', "'Categorical('", 'in', 'value', ')', ':', 'series', '=', 'pd', '.', 'Categorical', '(', 'self', '.', 'limits_series', ')', 'break', 'return', 'make_scale', '(', 'self', '.', 'aesthetic', ',', 'series', ',', 'limits', '=', 'self', '.', 'limits', ',', 'trans', '=', 'self', '.', 'trans', ')']
Create a scale
['Create', 'a', 'scale']
train
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/scales/limits.py#L46-L83
2,601
tensorpack/tensorpack
tensorpack/utils/gpu.py
get_num_gpu
def get_num_gpu(): """ Returns: int: #available GPUs in CUDA_VISIBLE_DEVICES, or in the system. """ def warn_return(ret, message): try: import tensorflow as tf except ImportError: return ret built_with_cuda = tf.test.is_built_with_cuda() if not built_with_cuda and ret > 0: logger.warn(message + "But TensorFlow was not built with CUDA support and could not use GPUs!") return ret env = os.environ.get('CUDA_VISIBLE_DEVICES', None) if env: return warn_return(len(env.split(',')), "Found non-empty CUDA_VISIBLE_DEVICES. ") output, code = subproc_call("nvidia-smi -L", timeout=5) if code == 0: output = output.decode('utf-8') return warn_return(len(output.strip().split('\n')), "Found nvidia-smi. ") try: # Use NVML to query device properties with NVMLContext() as ctx: return warn_return(ctx.num_devices(), "NVML found nvidia devices. ") except Exception: # Fallback logger.info("Loading local devices by TensorFlow ...") try: import tensorflow as tf # available since TF 1.14 gpu_devices = tf.config.experimental.list_physical_devices('GPU') except AttributeError: from tensorflow.python.client import device_lib local_device_protos = device_lib.list_local_devices() # Note this will initialize all GPUs and therefore has side effect # https://github.com/tensorflow/tensorflow/issues/8136 gpu_devices = [x.name for x in local_device_protos if x.device_type == 'GPU'] return len(gpu_devices)
python
def get_num_gpu(): """ Returns: int: #available GPUs in CUDA_VISIBLE_DEVICES, or in the system. """ def warn_return(ret, message): try: import tensorflow as tf except ImportError: return ret built_with_cuda = tf.test.is_built_with_cuda() if not built_with_cuda and ret > 0: logger.warn(message + "But TensorFlow was not built with CUDA support and could not use GPUs!") return ret env = os.environ.get('CUDA_VISIBLE_DEVICES', None) if env: return warn_return(len(env.split(',')), "Found non-empty CUDA_VISIBLE_DEVICES. ") output, code = subproc_call("nvidia-smi -L", timeout=5) if code == 0: output = output.decode('utf-8') return warn_return(len(output.strip().split('\n')), "Found nvidia-smi. ") try: # Use NVML to query device properties with NVMLContext() as ctx: return warn_return(ctx.num_devices(), "NVML found nvidia devices. ") except Exception: # Fallback logger.info("Loading local devices by TensorFlow ...") try: import tensorflow as tf # available since TF 1.14 gpu_devices = tf.config.experimental.list_physical_devices('GPU') except AttributeError: from tensorflow.python.client import device_lib local_device_protos = device_lib.list_local_devices() # Note this will initialize all GPUs and therefore has side effect # https://github.com/tensorflow/tensorflow/issues/8136 gpu_devices = [x.name for x in local_device_protos if x.device_type == 'GPU'] return len(gpu_devices)
['def', 'get_num_gpu', '(', ')', ':', 'def', 'warn_return', '(', 'ret', ',', 'message', ')', ':', 'try', ':', 'import', 'tensorflow', 'as', 'tf', 'except', 'ImportError', ':', 'return', 'ret', 'built_with_cuda', '=', 'tf', '.', 'test', '.', 'is_built_with_cuda', '(', ')', 'if', 'not', 'built_with_cuda', 'and', 'ret', '>', '0', ':', 'logger', '.', 'warn', '(', 'message', '+', '"But TensorFlow was not built with CUDA support and could not use GPUs!"', ')', 'return', 'ret', 'env', '=', 'os', '.', 'environ', '.', 'get', '(', "'CUDA_VISIBLE_DEVICES'", ',', 'None', ')', 'if', 'env', ':', 'return', 'warn_return', '(', 'len', '(', 'env', '.', 'split', '(', "','", ')', ')', ',', '"Found non-empty CUDA_VISIBLE_DEVICES. "', ')', 'output', ',', 'code', '=', 'subproc_call', '(', '"nvidia-smi -L"', ',', 'timeout', '=', '5', ')', 'if', 'code', '==', '0', ':', 'output', '=', 'output', '.', 'decode', '(', "'utf-8'", ')', 'return', 'warn_return', '(', 'len', '(', 'output', '.', 'strip', '(', ')', '.', 'split', '(', "'\\n'", ')', ')', ',', '"Found nvidia-smi. "', ')', 'try', ':', '# Use NVML to query device properties', 'with', 'NVMLContext', '(', ')', 'as', 'ctx', ':', 'return', 'warn_return', '(', 'ctx', '.', 'num_devices', '(', ')', ',', '"NVML found nvidia devices. "', ')', 'except', 'Exception', ':', '# Fallback', 'logger', '.', 'info', '(', '"Loading local devices by TensorFlow ..."', ')', 'try', ':', 'import', 'tensorflow', 'as', 'tf', '# available since TF 1.14', 'gpu_devices', '=', 'tf', '.', 'config', '.', 'experimental', '.', 'list_physical_devices', '(', "'GPU'", ')', 'except', 'AttributeError', ':', 'from', 'tensorflow', '.', 'python', '.', 'client', 'import', 'device_lib', 'local_device_protos', '=', 'device_lib', '.', 'list_local_devices', '(', ')', '# Note this will initialize all GPUs and therefore has side effect', '# https://github.com/tensorflow/tensorflow/issues/8136', 'gpu_devices', '=', '[', 'x', '.', 'name', 'for', 'x', 'in', 'local_device_protos', 'if', 'x', '.', 'device_type', '==', "'GPU'", ']', 'return', 'len', '(', 'gpu_devices', ')']
Returns: int: #available GPUs in CUDA_VISIBLE_DEVICES, or in the system.
['Returns', ':', 'int', ':', '#available', 'GPUs', 'in', 'CUDA_VISIBLE_DEVICES', 'or', 'in', 'the', 'system', '.']
train
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/utils/gpu.py#L29-L71
2,602
GoogleCloudPlatform/datastore-ndb-python
ndb/model.py
Model._query
def _query(cls, *args, **kwds): """Create a Query object for this class. Args: distinct: Optional bool, short hand for group_by = projection. *args: Used to apply an initial filter **kwds: are passed to the Query() constructor. Returns: A Query object. """ # Validating distinct. if 'distinct' in kwds: if 'group_by' in kwds: raise TypeError( 'cannot use distinct= and group_by= at the same time') projection = kwds.get('projection') if not projection: raise TypeError( 'cannot use distinct= without projection=') if kwds.pop('distinct'): kwds['group_by'] = projection # TODO: Disallow non-empty args and filter=. from .query import Query # Import late to avoid circular imports. qry = Query(kind=cls._get_kind(), **kwds) qry = qry.filter(*cls._default_filters()) qry = qry.filter(*args) return qry
python
def _query(cls, *args, **kwds): """Create a Query object for this class. Args: distinct: Optional bool, short hand for group_by = projection. *args: Used to apply an initial filter **kwds: are passed to the Query() constructor. Returns: A Query object. """ # Validating distinct. if 'distinct' in kwds: if 'group_by' in kwds: raise TypeError( 'cannot use distinct= and group_by= at the same time') projection = kwds.get('projection') if not projection: raise TypeError( 'cannot use distinct= without projection=') if kwds.pop('distinct'): kwds['group_by'] = projection # TODO: Disallow non-empty args and filter=. from .query import Query # Import late to avoid circular imports. qry = Query(kind=cls._get_kind(), **kwds) qry = qry.filter(*cls._default_filters()) qry = qry.filter(*args) return qry
['def', '_query', '(', 'cls', ',', '*', 'args', ',', '*', '*', 'kwds', ')', ':', '# Validating distinct.', 'if', "'distinct'", 'in', 'kwds', ':', 'if', "'group_by'", 'in', 'kwds', ':', 'raise', 'TypeError', '(', "'cannot use distinct= and group_by= at the same time'", ')', 'projection', '=', 'kwds', '.', 'get', '(', "'projection'", ')', 'if', 'not', 'projection', ':', 'raise', 'TypeError', '(', "'cannot use distinct= without projection='", ')', 'if', 'kwds', '.', 'pop', '(', "'distinct'", ')', ':', 'kwds', '[', "'group_by'", ']', '=', 'projection', '# TODO: Disallow non-empty args and filter=.', 'from', '.', 'query', 'import', 'Query', '# Import late to avoid circular imports.', 'qry', '=', 'Query', '(', 'kind', '=', 'cls', '.', '_get_kind', '(', ')', ',', '*', '*', 'kwds', ')', 'qry', '=', 'qry', '.', 'filter', '(', '*', 'cls', '.', '_default_filters', '(', ')', ')', 'qry', '=', 'qry', '.', 'filter', '(', '*', 'args', ')', 'return', 'qry']
Create a Query object for this class. Args: distinct: Optional bool, short hand for group_by = projection. *args: Used to apply an initial filter **kwds: are passed to the Query() constructor. Returns: A Query object.
['Create', 'a', 'Query', 'object', 'for', 'this', 'class', '.']
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/model.py#L3410-L3438
2,603
solocompt/plugs-mail
plugs_mail/management/commands/load_email_templates.py
Command.override_default_templates
def override_default_templates(self): """ Override the default emails already defined by other apps """ if plugs_mail_settings['OVERRIDE_TEMPLATE_DIR']: dir_ = plugs_mail_settings['OVERRIDE_TEMPLATE_DIR'] for file_ in os.listdir(dir_): if file_.endswith(('.html', 'txt')): self.overrides[file_] = dir_
python
def override_default_templates(self): """ Override the default emails already defined by other apps """ if plugs_mail_settings['OVERRIDE_TEMPLATE_DIR']: dir_ = plugs_mail_settings['OVERRIDE_TEMPLATE_DIR'] for file_ in os.listdir(dir_): if file_.endswith(('.html', 'txt')): self.overrides[file_] = dir_
['def', 'override_default_templates', '(', 'self', ')', ':', 'if', 'plugs_mail_settings', '[', "'OVERRIDE_TEMPLATE_DIR'", ']', ':', 'dir_', '=', 'plugs_mail_settings', '[', "'OVERRIDE_TEMPLATE_DIR'", ']', 'for', 'file_', 'in', 'os', '.', 'listdir', '(', 'dir_', ')', ':', 'if', 'file_', '.', 'endswith', '(', '(', "'.html'", ',', "'txt'", ')', ')', ':', 'self', '.', 'overrides', '[', 'file_', ']', '=', 'dir_']
Override the default emails already defined by other apps
['Override', 'the', 'default', 'emails', 'already', 'defined', 'by', 'other', 'apps']
train
https://github.com/solocompt/plugs-mail/blob/6139fa79ddb437562db1769d03bd3098c25a06fa/plugs_mail/management/commands/load_email_templates.py#L29-L37
2,604
guykisel/inline-plz
inlineplz/main.py
load_config
def load_config(args, config_path=".inlineplz.yml"): """Load inline-plz config from yaml config file with reasonable defaults.""" config = {} try: with open(config_path) as configfile: config = yaml.safe_load(configfile) or {} if config: print("Loaded config from {}".format(config_path)) pprint.pprint(config) except (IOError, OSError, yaml.parser.ParserError): traceback.print_exc() args = update_from_config(args, config) args.ignore_paths = args.__dict__.get("ignore_paths") or [ "node_modules", ".git", ".tox", "godeps", "vendor", "site-packages", "venv", ".env", "spec", "migrate", "bin", "fixtures", "cassettes", ".cache", ".idea", ".pytest_cache", "__pycache__", "dist", ] if config_path != ".inlineplz.yml": return args # fall back to config_dir inlineplz yaml if we didn't find one locally if args.config_dir and not config: new_config_path = os.path.join(args.config_dir, config_path) if os.path.exists(new_config_path): return load_config(args, new_config_path) return args
python
def load_config(args, config_path=".inlineplz.yml"): """Load inline-plz config from yaml config file with reasonable defaults.""" config = {} try: with open(config_path) as configfile: config = yaml.safe_load(configfile) or {} if config: print("Loaded config from {}".format(config_path)) pprint.pprint(config) except (IOError, OSError, yaml.parser.ParserError): traceback.print_exc() args = update_from_config(args, config) args.ignore_paths = args.__dict__.get("ignore_paths") or [ "node_modules", ".git", ".tox", "godeps", "vendor", "site-packages", "venv", ".env", "spec", "migrate", "bin", "fixtures", "cassettes", ".cache", ".idea", ".pytest_cache", "__pycache__", "dist", ] if config_path != ".inlineplz.yml": return args # fall back to config_dir inlineplz yaml if we didn't find one locally if args.config_dir and not config: new_config_path = os.path.join(args.config_dir, config_path) if os.path.exists(new_config_path): return load_config(args, new_config_path) return args
['def', 'load_config', '(', 'args', ',', 'config_path', '=', '".inlineplz.yml"', ')', ':', 'config', '=', '{', '}', 'try', ':', 'with', 'open', '(', 'config_path', ')', 'as', 'configfile', ':', 'config', '=', 'yaml', '.', 'safe_load', '(', 'configfile', ')', 'or', '{', '}', 'if', 'config', ':', 'print', '(', '"Loaded config from {}"', '.', 'format', '(', 'config_path', ')', ')', 'pprint', '.', 'pprint', '(', 'config', ')', 'except', '(', 'IOError', ',', 'OSError', ',', 'yaml', '.', 'parser', '.', 'ParserError', ')', ':', 'traceback', '.', 'print_exc', '(', ')', 'args', '=', 'update_from_config', '(', 'args', ',', 'config', ')', 'args', '.', 'ignore_paths', '=', 'args', '.', '__dict__', '.', 'get', '(', '"ignore_paths"', ')', 'or', '[', '"node_modules"', ',', '".git"', ',', '".tox"', ',', '"godeps"', ',', '"vendor"', ',', '"site-packages"', ',', '"venv"', ',', '".env"', ',', '"spec"', ',', '"migrate"', ',', '"bin"', ',', '"fixtures"', ',', '"cassettes"', ',', '".cache"', ',', '".idea"', ',', '".pytest_cache"', ',', '"__pycache__"', ',', '"dist"', ',', ']', 'if', 'config_path', '!=', '".inlineplz.yml"', ':', 'return', 'args', "# fall back to config_dir inlineplz yaml if we didn't find one locally", 'if', 'args', '.', 'config_dir', 'and', 'not', 'config', ':', 'new_config_path', '=', 'os', '.', 'path', '.', 'join', '(', 'args', '.', 'config_dir', ',', 'config_path', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'new_config_path', ')', ':', 'return', 'load_config', '(', 'args', ',', 'new_config_path', ')', 'return', 'args']
Load inline-plz config from yaml config file with reasonable defaults.
['Load', 'inline', '-', 'plz', 'config', 'from', 'yaml', 'config', 'file', 'with', 'reasonable', 'defaults', '.']
train
https://github.com/guykisel/inline-plz/blob/b5b1744e9156e31f68b519c0d8022feff79888ae/inlineplz/main.py#L102-L143
2,605
jordanh/neurio-python
neurio/__init__.py
Client.get_appliances
def get_appliances(self, location_id): """Get the appliances added for a specified location. Args: location_id (string): identifiying string of appliance Returns: list: dictionary objects containing appliances data """ url = "https://api.neur.io/v1/appliances" headers = self.__gen_headers() headers["Content-Type"] = "application/json" params = { "locationId": location_id, } url = self.__append_url_params(url, params) r = requests.get(url, headers=headers) return r.json()
python
def get_appliances(self, location_id): """Get the appliances added for a specified location. Args: location_id (string): identifiying string of appliance Returns: list: dictionary objects containing appliances data """ url = "https://api.neur.io/v1/appliances" headers = self.__gen_headers() headers["Content-Type"] = "application/json" params = { "locationId": location_id, } url = self.__append_url_params(url, params) r = requests.get(url, headers=headers) return r.json()
['def', 'get_appliances', '(', 'self', ',', 'location_id', ')', ':', 'url', '=', '"https://api.neur.io/v1/appliances"', 'headers', '=', 'self', '.', '__gen_headers', '(', ')', 'headers', '[', '"Content-Type"', ']', '=', '"application/json"', 'params', '=', '{', '"locationId"', ':', 'location_id', ',', '}', 'url', '=', 'self', '.', '__append_url_params', '(', 'url', ',', 'params', ')', 'r', '=', 'requests', '.', 'get', '(', 'url', ',', 'headers', '=', 'headers', ')', 'return', 'r', '.', 'json', '(', ')']
Get the appliances added for a specified location. Args: location_id (string): identifiying string of appliance Returns: list: dictionary objects containing appliances data
['Get', 'the', 'appliances', 'added', 'for', 'a', 'specified', 'location', '.']
train
https://github.com/jordanh/neurio-python/blob/3a1bcadadb3bb3ad48f2df41c039d8b828ffd9c8/neurio/__init__.py#L132-L152
2,606
simoninireland/epyc
epyc/labnotebook.py
LabNotebook.cancelAllPendingResults
def cancelAllPendingResults( self ): """Cancel all pending results. Note that this only affects the notebook's record, not any job running in a lab.""" for k in self._results.keys(): rs = self._results[k] self._results[k] = [ j for j in rs if isinstance(j, dict) ] self._pending = dict()
python
def cancelAllPendingResults( self ): """Cancel all pending results. Note that this only affects the notebook's record, not any job running in a lab.""" for k in self._results.keys(): rs = self._results[k] self._results[k] = [ j for j in rs if isinstance(j, dict) ] self._pending = dict()
['def', 'cancelAllPendingResults', '(', 'self', ')', ':', 'for', 'k', 'in', 'self', '.', '_results', '.', 'keys', '(', ')', ':', 'rs', '=', 'self', '.', '_results', '[', 'k', ']', 'self', '.', '_results', '[', 'k', ']', '=', '[', 'j', 'for', 'j', 'in', 'rs', 'if', 'isinstance', '(', 'j', ',', 'dict', ')', ']', 'self', '.', '_pending', '=', 'dict', '(', ')']
Cancel all pending results. Note that this only affects the notebook's record, not any job running in a lab.
['Cancel', 'all', 'pending', 'results', '.', 'Note', 'that', 'this', 'only', 'affects', 'the', 'notebook', 's', 'record', 'not', 'any', 'job', 'running', 'in', 'a', 'lab', '.']
train
https://github.com/simoninireland/epyc/blob/b3b61007741a0ab3de64df89070a6f30de8ec268/epyc/labnotebook.py#L225-L231
2,607
GoogleCloudPlatform/compute-image-packages
packages/python-google-compute-engine/google_compute_engine/metadata_scripts/script_retriever.py
ScriptRetriever._DownloadScript
def _DownloadScript(self, url, dest_dir): """Download the contents of the URL to the destination. Args: url: string, the URL to download. dest_dir: string, the path to a directory for storing metadata scripts. Returns: string, the path to the file storing the metadata script. """ # Check for the preferred Google Storage URL format: # gs://<bucket>/<object> if url.startswith(r'gs://'): # Convert the string into a standard URL. url = re.sub('^gs://', 'https://storage.googleapis.com/', url) return self._DownloadAuthUrl(url, dest_dir) header = r'http[s]?://' domain = r'storage\.googleapis\.com' # Many of the Google Storage URLs are supported below. # It is prefered that customers specify their object using # its gs://<bucket>/<object> url. bucket = r'(?P<bucket>[a-z0-9][-_.a-z0-9]*[a-z0-9])' # Accept any non-empty string that doesn't contain a wildcard character obj = r'(?P<obj>[^\*\?]+)' # Check for the Google Storage URLs: # http://<bucket>.storage.googleapis.com/<object> # https://<bucket>.storage.googleapis.com/<object> gs_regex = re.compile(r'\A%s%s\.%s/%s\Z' % (header, bucket, domain, obj)) match = gs_regex.match(url) if match: return self._DownloadAuthUrl(url, dest_dir) # Check for the other possible Google Storage URLs: # http://storage.googleapis.com/<bucket>/<object> # https://storage.googleapis.com/<bucket>/<object> # # The following are deprecated but checked: # http://commondatastorage.googleapis.com/<bucket>/<object> # https://commondatastorage.googleapis.com/<bucket>/<object> gs_regex = re.compile( r'\A%s(commondata)?%s/%s/%s\Z' % (header, domain, bucket, obj)) match = gs_regex.match(url) if match: return self._DownloadAuthUrl(url, dest_dir) # Unauthenticated download of the object. return self._DownloadUrl(url, dest_dir)
python
def _DownloadScript(self, url, dest_dir): """Download the contents of the URL to the destination. Args: url: string, the URL to download. dest_dir: string, the path to a directory for storing metadata scripts. Returns: string, the path to the file storing the metadata script. """ # Check for the preferred Google Storage URL format: # gs://<bucket>/<object> if url.startswith(r'gs://'): # Convert the string into a standard URL. url = re.sub('^gs://', 'https://storage.googleapis.com/', url) return self._DownloadAuthUrl(url, dest_dir) header = r'http[s]?://' domain = r'storage\.googleapis\.com' # Many of the Google Storage URLs are supported below. # It is prefered that customers specify their object using # its gs://<bucket>/<object> url. bucket = r'(?P<bucket>[a-z0-9][-_.a-z0-9]*[a-z0-9])' # Accept any non-empty string that doesn't contain a wildcard character obj = r'(?P<obj>[^\*\?]+)' # Check for the Google Storage URLs: # http://<bucket>.storage.googleapis.com/<object> # https://<bucket>.storage.googleapis.com/<object> gs_regex = re.compile(r'\A%s%s\.%s/%s\Z' % (header, bucket, domain, obj)) match = gs_regex.match(url) if match: return self._DownloadAuthUrl(url, dest_dir) # Check for the other possible Google Storage URLs: # http://storage.googleapis.com/<bucket>/<object> # https://storage.googleapis.com/<bucket>/<object> # # The following are deprecated but checked: # http://commondatastorage.googleapis.com/<bucket>/<object> # https://commondatastorage.googleapis.com/<bucket>/<object> gs_regex = re.compile( r'\A%s(commondata)?%s/%s/%s\Z' % (header, domain, bucket, obj)) match = gs_regex.match(url) if match: return self._DownloadAuthUrl(url, dest_dir) # Unauthenticated download of the object. return self._DownloadUrl(url, dest_dir)
['def', '_DownloadScript', '(', 'self', ',', 'url', ',', 'dest_dir', ')', ':', '# Check for the preferred Google Storage URL format:', '# gs://<bucket>/<object>', 'if', 'url', '.', 'startswith', '(', "r'gs://'", ')', ':', '# Convert the string into a standard URL.', 'url', '=', 're', '.', 'sub', '(', "'^gs://'", ',', "'https://storage.googleapis.com/'", ',', 'url', ')', 'return', 'self', '.', '_DownloadAuthUrl', '(', 'url', ',', 'dest_dir', ')', 'header', '=', "r'http[s]?://'", 'domain', '=', "r'storage\\.googleapis\\.com'", '# Many of the Google Storage URLs are supported below.', '# It is prefered that customers specify their object using', '# its gs://<bucket>/<object> url.', 'bucket', '=', "r'(?P<bucket>[a-z0-9][-_.a-z0-9]*[a-z0-9])'", "# Accept any non-empty string that doesn't contain a wildcard character", 'obj', '=', "r'(?P<obj>[^\\*\\?]+)'", '# Check for the Google Storage URLs:', '# http://<bucket>.storage.googleapis.com/<object>', '# https://<bucket>.storage.googleapis.com/<object>', 'gs_regex', '=', 're', '.', 'compile', '(', "r'\\A%s%s\\.%s/%s\\Z'", '%', '(', 'header', ',', 'bucket', ',', 'domain', ',', 'obj', ')', ')', 'match', '=', 'gs_regex', '.', 'match', '(', 'url', ')', 'if', 'match', ':', 'return', 'self', '.', '_DownloadAuthUrl', '(', 'url', ',', 'dest_dir', ')', '# Check for the other possible Google Storage URLs:', '# http://storage.googleapis.com/<bucket>/<object>', '# https://storage.googleapis.com/<bucket>/<object>', '#', '# The following are deprecated but checked:', '# http://commondatastorage.googleapis.com/<bucket>/<object>', '# https://commondatastorage.googleapis.com/<bucket>/<object>', 'gs_regex', '=', 're', '.', 'compile', '(', "r'\\A%s(commondata)?%s/%s/%s\\Z'", '%', '(', 'header', ',', 'domain', ',', 'bucket', ',', 'obj', ')', ')', 'match', '=', 'gs_regex', '.', 'match', '(', 'url', ')', 'if', 'match', ':', 'return', 'self', '.', '_DownloadAuthUrl', '(', 'url', ',', 'dest_dir', ')', '# Unauthenticated download of the object.', 'return', 'self', '.', '_DownloadUrl', '(', 'url', ',', 'dest_dir', ')']
Download the contents of the URL to the destination. Args: url: string, the URL to download. dest_dir: string, the path to a directory for storing metadata scripts. Returns: string, the path to the file storing the metadata script.
['Download', 'the', 'contents', 'of', 'the', 'URL', 'to', 'the', 'destination', '.']
train
https://github.com/GoogleCloudPlatform/compute-image-packages/blob/53ea8cd069fb4d9a1984d1c167e54c133033f8da/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/script_retriever.py#L118-L168
2,608
saltstack/salt
salt/modules/mount.py
automaster
def automaster(config='/etc/auto_salt'): ''' List the contents of the auto master CLI Example: .. code-block:: bash salt '*' mount.automaster ''' ret = {} if not os.path.isfile(config): return ret with salt.utils.files.fopen(config) as ifile: for line in ifile: line = salt.utils.stringutils.to_unicode(line) if line.startswith('#'): # Commented continue if not line.strip(): # Blank line continue comps = line.split() if len(comps) != 3: # Invalid entry continue prefix = "/.." name = comps[0].replace(prefix, "") device_fmt = comps[2].split(":") opts = comps[1].split(',') ret[name] = {'device': device_fmt[1], 'fstype': opts[0], 'opts': opts[1:]} return ret
python
def automaster(config='/etc/auto_salt'): ''' List the contents of the auto master CLI Example: .. code-block:: bash salt '*' mount.automaster ''' ret = {} if not os.path.isfile(config): return ret with salt.utils.files.fopen(config) as ifile: for line in ifile: line = salt.utils.stringutils.to_unicode(line) if line.startswith('#'): # Commented continue if not line.strip(): # Blank line continue comps = line.split() if len(comps) != 3: # Invalid entry continue prefix = "/.." name = comps[0].replace(prefix, "") device_fmt = comps[2].split(":") opts = comps[1].split(',') ret[name] = {'device': device_fmt[1], 'fstype': opts[0], 'opts': opts[1:]} return ret
['def', 'automaster', '(', 'config', '=', "'/etc/auto_salt'", ')', ':', 'ret', '=', '{', '}', 'if', 'not', 'os', '.', 'path', '.', 'isfile', '(', 'config', ')', ':', 'return', 'ret', 'with', 'salt', '.', 'utils', '.', 'files', '.', 'fopen', '(', 'config', ')', 'as', 'ifile', ':', 'for', 'line', 'in', 'ifile', ':', 'line', '=', 'salt', '.', 'utils', '.', 'stringutils', '.', 'to_unicode', '(', 'line', ')', 'if', 'line', '.', 'startswith', '(', "'#'", ')', ':', '# Commented', 'continue', 'if', 'not', 'line', '.', 'strip', '(', ')', ':', '# Blank line', 'continue', 'comps', '=', 'line', '.', 'split', '(', ')', 'if', 'len', '(', 'comps', ')', '!=', '3', ':', '# Invalid entry', 'continue', 'prefix', '=', '"/.."', 'name', '=', 'comps', '[', '0', ']', '.', 'replace', '(', 'prefix', ',', '""', ')', 'device_fmt', '=', 'comps', '[', '2', ']', '.', 'split', '(', '":"', ')', 'opts', '=', 'comps', '[', '1', ']', '.', 'split', '(', "','", ')', 'ret', '[', 'name', ']', '=', '{', "'device'", ':', 'device_fmt', '[', '1', ']', ',', "'fstype'", ':', 'opts', '[', '0', ']', ',', "'opts'", ':', 'opts', '[', '1', ':', ']', '}', 'return', 'ret']
List the contents of the auto master CLI Example: .. code-block:: bash salt '*' mount.automaster
['List', 'the', 'contents', 'of', 'the', 'auto', 'master']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mount.py#L1139-L1174
2,609
carlcarl/grabflickr
grabflickr/grabflickr.py
single_download_photos
def single_download_photos(photos): """Use single process to download photos :param photos: The photos to be downloaded :type photos: list of dicts """ global counter counter = len(photos) for photo in photos: download_photo(photo)
python
def single_download_photos(photos): """Use single process to download photos :param photos: The photos to be downloaded :type photos: list of dicts """ global counter counter = len(photos) for photo in photos: download_photo(photo)
['def', 'single_download_photos', '(', 'photos', ')', ':', 'global', 'counter', 'counter', '=', 'len', '(', 'photos', ')', 'for', 'photo', 'in', 'photos', ':', 'download_photo', '(', 'photo', ')']
Use single process to download photos :param photos: The photos to be downloaded :type photos: list of dicts
['Use', 'single', 'process', 'to', 'download', 'photos']
train
https://github.com/carlcarl/grabflickr/blob/e9cb2365de80c1819cfd5083c032d0d985f3c614/grabflickr/grabflickr.py#L224-L233
2,610
FutunnOpen/futuquant
futuquant/examples/TinyQuant/TinyQuantBase.py
ArrayManager.cci
def cci(self, n, array=False): """CCI指标""" result = talib.CCI(self.high, self.low, self.close, n) if array: return result return result[-1]
python
def cci(self, n, array=False): """CCI指标""" result = talib.CCI(self.high, self.low, self.close, n) if array: return result return result[-1]
['def', 'cci', '(', 'self', ',', 'n', ',', 'array', '=', 'False', ')', ':', 'result', '=', 'talib', '.', 'CCI', '(', 'self', '.', 'high', ',', 'self', '.', 'low', ',', 'self', '.', 'close', ',', 'n', ')', 'if', 'array', ':', 'return', 'result', 'return', 'result', '[', '-', '1', ']']
CCI指标
['CCI指标']
train
https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/examples/TinyQuant/TinyQuantBase.py#L138-L143
2,611
openvax/varlens
varlens/read_evidence/pileup.py
Pileup.update
def update(self, other): ''' Add all pileup elements from other into self. ''' assert self.locus == other.locus self.elements.update(other.elements)
python
def update(self, other): ''' Add all pileup elements from other into self. ''' assert self.locus == other.locus self.elements.update(other.elements)
['def', 'update', '(', 'self', ',', 'other', ')', ':', 'assert', 'self', '.', 'locus', '==', 'other', '.', 'locus', 'self', '.', 'elements', '.', 'update', '(', 'other', '.', 'elements', ')']
Add all pileup elements from other into self.
['Add', 'all', 'pileup', 'elements', 'from', 'other', 'into', 'self', '.']
train
https://github.com/openvax/varlens/blob/715d3ede5893757b2fcba4117515621bca7b1e5d/varlens/read_evidence/pileup.py#L65-L70
2,612
fermiPy/fermipy
fermipy/gtanalysis.py
GTAnalysis.print_params
def print_params(self, allpars=False, loglevel=logging.INFO): """Print information about the model parameters (values, errors, bounds, scale).""" pars = self.get_params() o = '\n' o += '%4s %-20s%10s%10s%10s%10s%10s%5s\n' % ( 'idx', 'parname', 'value', 'error', 'min', 'max', 'scale', 'free') o += '-' * 80 + '\n' src_pars = collections.OrderedDict() for p in pars: src_pars.setdefault(p['src_name'], []) src_pars[p['src_name']] += [p] free_sources = [] for k, v in src_pars.items(): for p in v: if not p['free']: continue free_sources += [k] for k, v in src_pars.items(): if not allpars and k not in free_sources: continue o += '%s\n' % k for p in v: o += '%4i %-20.19s' % (p['idx'], p['par_name']) o += '%10.3g%10.3g' % (p['value'], p['error']) o += '%10.3g%10.3g%10.3g' % (p['min'], p['max'], p['scale']) if p['free']: o += ' *' else: o += ' ' o += '\n' self.logger.log(loglevel, o)
python
def print_params(self, allpars=False, loglevel=logging.INFO): """Print information about the model parameters (values, errors, bounds, scale).""" pars = self.get_params() o = '\n' o += '%4s %-20s%10s%10s%10s%10s%10s%5s\n' % ( 'idx', 'parname', 'value', 'error', 'min', 'max', 'scale', 'free') o += '-' * 80 + '\n' src_pars = collections.OrderedDict() for p in pars: src_pars.setdefault(p['src_name'], []) src_pars[p['src_name']] += [p] free_sources = [] for k, v in src_pars.items(): for p in v: if not p['free']: continue free_sources += [k] for k, v in src_pars.items(): if not allpars and k not in free_sources: continue o += '%s\n' % k for p in v: o += '%4i %-20.19s' % (p['idx'], p['par_name']) o += '%10.3g%10.3g' % (p['value'], p['error']) o += '%10.3g%10.3g%10.3g' % (p['min'], p['max'], p['scale']) if p['free']: o += ' *' else: o += ' ' o += '\n' self.logger.log(loglevel, o)
['def', 'print_params', '(', 'self', ',', 'allpars', '=', 'False', ',', 'loglevel', '=', 'logging', '.', 'INFO', ')', ':', 'pars', '=', 'self', '.', 'get_params', '(', ')', 'o', '=', "'\\n'", 'o', '+=', "'%4s %-20s%10s%10s%10s%10s%10s%5s\\n'", '%', '(', "'idx'", ',', "'parname'", ',', "'value'", ',', "'error'", ',', "'min'", ',', "'max'", ',', "'scale'", ',', "'free'", ')', 'o', '+=', "'-'", '*', '80', '+', "'\\n'", 'src_pars', '=', 'collections', '.', 'OrderedDict', '(', ')', 'for', 'p', 'in', 'pars', ':', 'src_pars', '.', 'setdefault', '(', 'p', '[', "'src_name'", ']', ',', '[', ']', ')', 'src_pars', '[', 'p', '[', "'src_name'", ']', ']', '+=', '[', 'p', ']', 'free_sources', '=', '[', ']', 'for', 'k', ',', 'v', 'in', 'src_pars', '.', 'items', '(', ')', ':', 'for', 'p', 'in', 'v', ':', 'if', 'not', 'p', '[', "'free'", ']', ':', 'continue', 'free_sources', '+=', '[', 'k', ']', 'for', 'k', ',', 'v', 'in', 'src_pars', '.', 'items', '(', ')', ':', 'if', 'not', 'allpars', 'and', 'k', 'not', 'in', 'free_sources', ':', 'continue', 'o', '+=', "'%s\\n'", '%', 'k', 'for', 'p', 'in', 'v', ':', 'o', '+=', "'%4i %-20.19s'", '%', '(', 'p', '[', "'idx'", ']', ',', 'p', '[', "'par_name'", ']', ')', 'o', '+=', "'%10.3g%10.3g'", '%', '(', 'p', '[', "'value'", ']', ',', 'p', '[', "'error'", ']', ')', 'o', '+=', "'%10.3g%10.3g%10.3g'", '%', '(', 'p', '[', "'min'", ']', ',', 'p', '[', "'max'", ']', ',', 'p', '[', "'scale'", ']', ')', 'if', 'p', '[', "'free'", ']', ':', 'o', '+=', "' *'", 'else', ':', 'o', '+=', "' '", 'o', '+=', "'\\n'", 'self', '.', 'logger', '.', 'log', '(', 'loglevel', ',', 'o', ')']
Print information about the model parameters (values, errors, bounds, scale).
['Print', 'information', 'about', 'the', 'model', 'parameters', '(', 'values', 'errors', 'bounds', 'scale', ')', '.']
train
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/gtanalysis.py#L3415-L3463
2,613
apache/incubator-mxnet
python/mxnet/gluon/data/dataloader.py
ConnectionWrapper.send
def send(self, obj): """Send object""" buf = io.BytesIO() ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(obj) self.send_bytes(buf.getvalue())
python
def send(self, obj): """Send object""" buf = io.BytesIO() ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(obj) self.send_bytes(buf.getvalue())
['def', 'send', '(', 'self', ',', 'obj', ')', ':', 'buf', '=', 'io', '.', 'BytesIO', '(', ')', 'ForkingPickler', '(', 'buf', ',', 'pickle', '.', 'HIGHEST_PROTOCOL', ')', '.', 'dump', '(', 'obj', ')', 'self', '.', 'send_bytes', '(', 'buf', '.', 'getvalue', '(', ')', ')']
Send object
['Send', 'object']
train
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/data/dataloader.py#L81-L85
2,614
jwass/geojsonio.py
geojsonio/geojsonio.py
make_url
def make_url(contents, domain=DEFAULT_DOMAIN, force_gist=False, size_for_gist=MAX_URL_LEN): """ Returns the URL to open given the domain and contents. If the file contents are large, an anonymous gist will be created. Parameters ---------- contents * string - assumed to be GeoJSON * an object that implements __geo_interface__ A FeatureCollection will be constructed with one feature, the object. * a sequence of objects that each implement __geo_interface__ A FeatureCollection will be constructed with the objects as the features domain - string, default http://geojson.io force_gist - force gist creation regardless of file size. For more information about __geo_interface__ see: https://gist.github.com/sgillies/2217756 If the contents are large, then a gist will be created. """ contents = make_geojson(contents) if len(contents) <= size_for_gist and not force_gist: url = data_url(contents, domain) else: gist = _make_gist(contents) url = gist_url(gist.id, domain) return url
python
def make_url(contents, domain=DEFAULT_DOMAIN, force_gist=False, size_for_gist=MAX_URL_LEN): """ Returns the URL to open given the domain and contents. If the file contents are large, an anonymous gist will be created. Parameters ---------- contents * string - assumed to be GeoJSON * an object that implements __geo_interface__ A FeatureCollection will be constructed with one feature, the object. * a sequence of objects that each implement __geo_interface__ A FeatureCollection will be constructed with the objects as the features domain - string, default http://geojson.io force_gist - force gist creation regardless of file size. For more information about __geo_interface__ see: https://gist.github.com/sgillies/2217756 If the contents are large, then a gist will be created. """ contents = make_geojson(contents) if len(contents) <= size_for_gist and not force_gist: url = data_url(contents, domain) else: gist = _make_gist(contents) url = gist_url(gist.id, domain) return url
['def', 'make_url', '(', 'contents', ',', 'domain', '=', 'DEFAULT_DOMAIN', ',', 'force_gist', '=', 'False', ',', 'size_for_gist', '=', 'MAX_URL_LEN', ')', ':', 'contents', '=', 'make_geojson', '(', 'contents', ')', 'if', 'len', '(', 'contents', ')', '<=', 'size_for_gist', 'and', 'not', 'force_gist', ':', 'url', '=', 'data_url', '(', 'contents', ',', 'domain', ')', 'else', ':', 'gist', '=', '_make_gist', '(', 'contents', ')', 'url', '=', 'gist_url', '(', 'gist', '.', 'id', ',', 'domain', ')', 'return', 'url']
Returns the URL to open given the domain and contents. If the file contents are large, an anonymous gist will be created. Parameters ---------- contents * string - assumed to be GeoJSON * an object that implements __geo_interface__ A FeatureCollection will be constructed with one feature, the object. * a sequence of objects that each implement __geo_interface__ A FeatureCollection will be constructed with the objects as the features domain - string, default http://geojson.io force_gist - force gist creation regardless of file size. For more information about __geo_interface__ see: https://gist.github.com/sgillies/2217756 If the contents are large, then a gist will be created.
['Returns', 'the', 'URL', 'to', 'open', 'given', 'the', 'domain', 'and', 'contents', '.']
train
https://github.com/jwass/geojsonio.py/blob/8229a48238f128837e6dce49f18310df84968825/geojsonio/geojsonio.py#L63-L96
2,615
agoragames/kairos
kairos/timeseries.py
Histogram._transform
def _transform(self, data, transform, step_size): ''' Transform the data. If the transform is not supported by this series, returns the data unaltered. ''' if transform=='mean': total = sum( k*v for k,v in data.items() ) count = sum( data.values() ) data = float(total)/float(count) if count>0 else 0 elif transform=='count': data = sum(data.values()) elif transform=='min': data = min(data.keys() or [0]) elif transform=='max': data = max(data.keys() or [0]) elif transform=='sum': data = sum( k*v for k,v in data.items() ) elif transform=='rate': data = { k:v/float(step_size) for k,v in data.items() } elif callable(transform): data = transform(data, step_size) return data
python
def _transform(self, data, transform, step_size): ''' Transform the data. If the transform is not supported by this series, returns the data unaltered. ''' if transform=='mean': total = sum( k*v for k,v in data.items() ) count = sum( data.values() ) data = float(total)/float(count) if count>0 else 0 elif transform=='count': data = sum(data.values()) elif transform=='min': data = min(data.keys() or [0]) elif transform=='max': data = max(data.keys() or [0]) elif transform=='sum': data = sum( k*v for k,v in data.items() ) elif transform=='rate': data = { k:v/float(step_size) for k,v in data.items() } elif callable(transform): data = transform(data, step_size) return data
['def', '_transform', '(', 'self', ',', 'data', ',', 'transform', ',', 'step_size', ')', ':', 'if', 'transform', '==', "'mean'", ':', 'total', '=', 'sum', '(', 'k', '*', 'v', 'for', 'k', ',', 'v', 'in', 'data', '.', 'items', '(', ')', ')', 'count', '=', 'sum', '(', 'data', '.', 'values', '(', ')', ')', 'data', '=', 'float', '(', 'total', ')', '/', 'float', '(', 'count', ')', 'if', 'count', '>', '0', 'else', '0', 'elif', 'transform', '==', "'count'", ':', 'data', '=', 'sum', '(', 'data', '.', 'values', '(', ')', ')', 'elif', 'transform', '==', "'min'", ':', 'data', '=', 'min', '(', 'data', '.', 'keys', '(', ')', 'or', '[', '0', ']', ')', 'elif', 'transform', '==', "'max'", ':', 'data', '=', 'max', '(', 'data', '.', 'keys', '(', ')', 'or', '[', '0', ']', ')', 'elif', 'transform', '==', "'sum'", ':', 'data', '=', 'sum', '(', 'k', '*', 'v', 'for', 'k', ',', 'v', 'in', 'data', '.', 'items', '(', ')', ')', 'elif', 'transform', '==', "'rate'", ':', 'data', '=', '{', 'k', ':', 'v', '/', 'float', '(', 'step_size', ')', 'for', 'k', ',', 'v', 'in', 'data', '.', 'items', '(', ')', '}', 'elif', 'callable', '(', 'transform', ')', ':', 'data', '=', 'transform', '(', 'data', ',', 'step_size', ')', 'return', 'data']
Transform the data. If the transform is not supported by this series, returns the data unaltered.
['Transform', 'the', 'data', '.', 'If', 'the', 'transform', 'is', 'not', 'supported', 'by', 'this', 'series', 'returns', 'the', 'data', 'unaltered', '.']
train
https://github.com/agoragames/kairos/blob/0b062d543b0f4a46df460fa0eb6ec281232ab179/kairos/timeseries.py#L855-L876
2,616
gwastro/pycbc-glue
pycbc_glue/ligolw/table.py
reset_next_ids
def reset_next_ids(classes): """ For each class in the list, if the .next_id attribute is not None (meaning the table has an ID generator associated with it), set .next_id to 0. This has the effect of reseting the ID generators, and is useful in applications that process multiple documents and add new rows to tables in those documents. Calling this function between documents prevents new row IDs from growing continuously from document to document. There is no need to do this, it's purpose is merely aesthetic, but it can be confusing to open a document and find process ID 300 in the process table and wonder what happened to the other 299 processes. Example: >>> import lsctables >>> reset_next_ids(lsctables.TableByName.values()) """ for cls in classes: if cls.next_id is not None: cls.set_next_id(type(cls.next_id)(0))
python
def reset_next_ids(classes): """ For each class in the list, if the .next_id attribute is not None (meaning the table has an ID generator associated with it), set .next_id to 0. This has the effect of reseting the ID generators, and is useful in applications that process multiple documents and add new rows to tables in those documents. Calling this function between documents prevents new row IDs from growing continuously from document to document. There is no need to do this, it's purpose is merely aesthetic, but it can be confusing to open a document and find process ID 300 in the process table and wonder what happened to the other 299 processes. Example: >>> import lsctables >>> reset_next_ids(lsctables.TableByName.values()) """ for cls in classes: if cls.next_id is not None: cls.set_next_id(type(cls.next_id)(0))
['def', 'reset_next_ids', '(', 'classes', ')', ':', 'for', 'cls', 'in', 'classes', ':', 'if', 'cls', '.', 'next_id', 'is', 'not', 'None', ':', 'cls', '.', 'set_next_id', '(', 'type', '(', 'cls', '.', 'next_id', ')', '(', '0', ')', ')']
For each class in the list, if the .next_id attribute is not None (meaning the table has an ID generator associated with it), set .next_id to 0. This has the effect of reseting the ID generators, and is useful in applications that process multiple documents and add new rows to tables in those documents. Calling this function between documents prevents new row IDs from growing continuously from document to document. There is no need to do this, it's purpose is merely aesthetic, but it can be confusing to open a document and find process ID 300 in the process table and wonder what happened to the other 299 processes. Example: >>> import lsctables >>> reset_next_ids(lsctables.TableByName.values())
['For', 'each', 'class', 'in', 'the', 'list', 'if', 'the', '.', 'next_id', 'attribute', 'is', 'not', 'None', '(', 'meaning', 'the', 'table', 'has', 'an', 'ID', 'generator', 'associated', 'with', 'it', ')', 'set', '.', 'next_id', 'to', '0', '.', 'This', 'has', 'the', 'effect', 'of', 'reseting', 'the', 'ID', 'generators', 'and', 'is', 'useful', 'in', 'applications', 'that', 'process', 'multiple', 'documents', 'and', 'add', 'new', 'rows', 'to', 'tables', 'in', 'those', 'documents', '.', 'Calling', 'this', 'function', 'between', 'documents', 'prevents', 'new', 'row', 'IDs', 'from', 'growing', 'continuously', 'from', 'document', 'to', 'document', '.', 'There', 'is', 'no', 'need', 'to', 'do', 'this', 'it', 's', 'purpose', 'is', 'merely', 'aesthetic', 'but', 'it', 'can', 'be', 'confusing', 'to', 'open', 'a', 'document', 'and', 'find', 'process', 'ID', '300', 'in', 'the', 'process', 'table', 'and', 'wonder', 'what', 'happened', 'to', 'the', 'other', '299', 'processes', '.']
train
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/ligolw/table.py#L303-L323
2,617
tanghaibao/goatools
goatools/semantic.py
lin_sim_calc
def lin_sim_calc(goid1, goid2, sim_r, termcnts): ''' Computes Lin's similarity measure using pre-calculated Resnik's similarities. ''' if sim_r is not None: info = get_info_content(goid1, termcnts) + get_info_content(goid2, termcnts) if info != 0: return (2*sim_r)/info
python
def lin_sim_calc(goid1, goid2, sim_r, termcnts): ''' Computes Lin's similarity measure using pre-calculated Resnik's similarities. ''' if sim_r is not None: info = get_info_content(goid1, termcnts) + get_info_content(goid2, termcnts) if info != 0: return (2*sim_r)/info
['def', 'lin_sim_calc', '(', 'goid1', ',', 'goid2', ',', 'sim_r', ',', 'termcnts', ')', ':', 'if', 'sim_r', 'is', 'not', 'None', ':', 'info', '=', 'get_info_content', '(', 'goid1', ',', 'termcnts', ')', '+', 'get_info_content', '(', 'goid2', ',', 'termcnts', ')', 'if', 'info', '!=', '0', ':', 'return', '(', '2', '*', 'sim_r', ')', '/', 'info']
Computes Lin's similarity measure using pre-calculated Resnik's similarities.
['Computes', 'Lin', 's', 'similarity', 'measure', 'using', 'pre', '-', 'calculated', 'Resnik', 's', 'similarities', '.']
train
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/semantic.py#L150-L157
2,618
tensorflow/hub
tensorflow_hub/feature_column.py
_TextEmbeddingColumn._get_dense_tensor
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): """Returns a `Tensor`.""" del weight_collections text_batch = tf.reshape(inputs.get(self), shape=[-1]) m = module.Module(self.module_spec, trainable=self.trainable and trainable) return m(text_batch)
python
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): """Returns a `Tensor`.""" del weight_collections text_batch = tf.reshape(inputs.get(self), shape=[-1]) m = module.Module(self.module_spec, trainable=self.trainable and trainable) return m(text_batch)
['def', '_get_dense_tensor', '(', 'self', ',', 'inputs', ',', 'weight_collections', '=', 'None', ',', 'trainable', '=', 'None', ')', ':', 'del', 'weight_collections', 'text_batch', '=', 'tf', '.', 'reshape', '(', 'inputs', '.', 'get', '(', 'self', ')', ',', 'shape', '=', '[', '-', '1', ']', ')', 'm', '=', 'module', '.', 'Module', '(', 'self', '.', 'module_spec', ',', 'trainable', '=', 'self', '.', 'trainable', 'and', 'trainable', ')', 'return', 'm', '(', 'text_batch', ')']
Returns a `Tensor`.
['Returns', 'a', 'Tensor', '.']
train
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/feature_column.py#L154-L159
2,619
fermiPy/fermipy
fermipy/diffuse/name_policy.py
NameFactory.bexpcube_moon
def bexpcube_moon(self, **kwargs): """ return the name of a binned exposure cube file """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.bexpcubemoon_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
python
def bexpcube_moon(self, **kwargs): """ return the name of a binned exposure cube file """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.bexpcubemoon_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
['def', 'bexpcube_moon', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'kwargs_copy', '=', 'self', '.', 'base_dict', '.', 'copy', '(', ')', 'kwargs_copy', '.', 'update', '(', '*', '*', 'kwargs', ')', 'kwargs_copy', '[', "'dataset'", ']', '=', 'kwargs', '.', 'get', '(', "'dataset'", ',', 'self', '.', 'dataset', '(', '*', '*', 'kwargs', ')', ')', 'kwargs_copy', '[', "'component'", ']', '=', 'kwargs', '.', 'get', '(', "'component'", ',', 'self', '.', 'component', '(', '*', '*', 'kwargs', ')', ')', 'self', '.', '_replace_none', '(', 'kwargs_copy', ')', 'localpath', '=', 'NameFactory', '.', 'bexpcubemoon_format', '.', 'format', '(', '*', '*', 'kwargs_copy', ')', 'if', 'kwargs', '.', 'get', '(', "'fullpath'", ',', 'False', ')', ':', 'return', 'self', '.', 'fullpath', '(', 'localpath', '=', 'localpath', ')', 'return', 'localpath']
return the name of a binned exposure cube file
['return', 'the', 'name', 'of', 'a', 'binned', 'exposure', 'cube', 'file']
train
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/name_policy.py#L456-L468
2,620
androguard/androguard
androguard/core/androconf.py
is_android_raw
def is_android_raw(raw): """ Returns a string that describes the type of file, for common Android specific formats """ val = None # We do not check for META-INF/MANIFEST.MF, # as you also want to analyze unsigned APKs... # AndroidManifest.xml should be in every APK. # classes.dex and resources.arsc are not required! # if raw[0:2] == b"PK" and b'META-INF/MANIFEST.MF' in raw: # TODO this check might be still invalid. A ZIP file with stored APK inside would match as well. # probably it would be better to rewrite this and add more sanity checks. if raw[0:2] == b"PK" and b'AndroidManifest.xml' in raw: val = "APK" elif raw[0:3] == b"dex": val = "DEX" elif raw[0:3] == b"dey": val = "DEY" elif raw[0:4] == b"\x03\x00\x08\x00" or raw[0:4] == b"\x00\x00\x08\x00": val = "AXML" elif raw[0:4] == b"\x02\x00\x0C\x00": val = "ARSC" return val
python
def is_android_raw(raw): """ Returns a string that describes the type of file, for common Android specific formats """ val = None # We do not check for META-INF/MANIFEST.MF, # as you also want to analyze unsigned APKs... # AndroidManifest.xml should be in every APK. # classes.dex and resources.arsc are not required! # if raw[0:2] == b"PK" and b'META-INF/MANIFEST.MF' in raw: # TODO this check might be still invalid. A ZIP file with stored APK inside would match as well. # probably it would be better to rewrite this and add more sanity checks. if raw[0:2] == b"PK" and b'AndroidManifest.xml' in raw: val = "APK" elif raw[0:3] == b"dex": val = "DEX" elif raw[0:3] == b"dey": val = "DEY" elif raw[0:4] == b"\x03\x00\x08\x00" or raw[0:4] == b"\x00\x00\x08\x00": val = "AXML" elif raw[0:4] == b"\x02\x00\x0C\x00": val = "ARSC" return val
['def', 'is_android_raw', '(', 'raw', ')', ':', 'val', '=', 'None', '# We do not check for META-INF/MANIFEST.MF,', '# as you also want to analyze unsigned APKs...', '# AndroidManifest.xml should be in every APK.', '# classes.dex and resources.arsc are not required!', '# if raw[0:2] == b"PK" and b\'META-INF/MANIFEST.MF\' in raw:', '# TODO this check might be still invalid. A ZIP file with stored APK inside would match as well.', '# probably it would be better to rewrite this and add more sanity checks.', 'if', 'raw', '[', '0', ':', '2', ']', '==', 'b"PK"', 'and', "b'AndroidManifest.xml'", 'in', 'raw', ':', 'val', '=', '"APK"', 'elif', 'raw', '[', '0', ':', '3', ']', '==', 'b"dex"', ':', 'val', '=', '"DEX"', 'elif', 'raw', '[', '0', ':', '3', ']', '==', 'b"dey"', ':', 'val', '=', '"DEY"', 'elif', 'raw', '[', '0', ':', '4', ']', '==', 'b"\\x03\\x00\\x08\\x00"', 'or', 'raw', '[', '0', ':', '4', ']', '==', 'b"\\x00\\x00\\x08\\x00"', ':', 'val', '=', '"AXML"', 'elif', 'raw', '[', '0', ':', '4', ']', '==', 'b"\\x02\\x00\\x0C\\x00"', ':', 'val', '=', '"ARSC"', 'return', 'val']
Returns a string that describes the type of file, for common Android specific formats
['Returns', 'a', 'string', 'that', 'describes', 'the', 'type', 'of', 'file', 'for', 'common', 'Android', 'specific', 'formats']
train
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/androconf.py#L215-L240
2,621
brocade/pynos
pynos/versions/base/yang/brocade_ras.py
brocade_ras.logging_raslog_message_msgId_msgId
def logging_raslog_message_msgId_msgId(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") logging = ET.SubElement(config, "logging", xmlns="urn:brocade.com:mgmt:brocade-ras") raslog = ET.SubElement(logging, "raslog") message = ET.SubElement(raslog, "message") msgId = ET.SubElement(message, "msgId") msgId = ET.SubElement(msgId, "msgId") msgId.text = kwargs.pop('msgId') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def logging_raslog_message_msgId_msgId(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") logging = ET.SubElement(config, "logging", xmlns="urn:brocade.com:mgmt:brocade-ras") raslog = ET.SubElement(logging, "raslog") message = ET.SubElement(raslog, "message") msgId = ET.SubElement(message, "msgId") msgId = ET.SubElement(msgId, "msgId") msgId.text = kwargs.pop('msgId') callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'logging_raslog_message_msgId_msgId', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'logging', '=', 'ET', '.', 'SubElement', '(', 'config', ',', '"logging"', ',', 'xmlns', '=', '"urn:brocade.com:mgmt:brocade-ras"', ')', 'raslog', '=', 'ET', '.', 'SubElement', '(', 'logging', ',', '"raslog"', ')', 'message', '=', 'ET', '.', 'SubElement', '(', 'raslog', ',', '"message"', ')', 'msgId', '=', 'ET', '.', 'SubElement', '(', 'message', ',', '"msgId"', ')', 'msgId', '=', 'ET', '.', 'SubElement', '(', 'msgId', ',', '"msgId"', ')', 'msgId', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'msgId'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/yang/brocade_ras.py#L12-L24
2,622
dwavesystems/dwave-system
dwave/embedding/utils.py
chain_to_quadratic
def chain_to_quadratic(chain, target_adjacency, chain_strength): """Determine the quadratic biases that induce the given chain. Args: chain (iterable): The variables that make up a chain. target_adjacency (dict/:class:`networkx.Graph`): Should be a dict of the form {s: Ns, ...} where s is a variable in the target graph and Ns is the set of neighbours of s. chain_strength (float): The magnitude of the quadratic bias that should be used to create chains. Returns: dict[edge, float]: The quadratic biases that induce the given chain. Raises: ValueError: If the variables in chain do not form a connected subgraph of target. Examples: >>> chain = {1, 2} >>> target_adjacency = {0: {1, 2}, 1: {0, 2}, 2: {0, 1}} >>> dimod.embedding.chain_to_quadratic(chain, target_adjacency, 1) {(1, 2): -1} """ quadratic = {} # we will be adding the edges that make the chain here # do a breadth first search seen = set() try: next_level = {next(iter(chain))} except StopIteration: raise ValueError("chain must have at least one variable") while next_level: this_level = next_level next_level = set() for v in this_level: if v not in seen: seen.add(v) for u in target_adjacency[v]: if u not in chain: continue next_level.add(u) if u != v and (u, v) not in quadratic: quadratic[(v, u)] = -chain_strength if len(chain) != len(seen): raise ValueError('{} is not a connected chain'.format(chain)) return quadratic
python
def chain_to_quadratic(chain, target_adjacency, chain_strength): """Determine the quadratic biases that induce the given chain. Args: chain (iterable): The variables that make up a chain. target_adjacency (dict/:class:`networkx.Graph`): Should be a dict of the form {s: Ns, ...} where s is a variable in the target graph and Ns is the set of neighbours of s. chain_strength (float): The magnitude of the quadratic bias that should be used to create chains. Returns: dict[edge, float]: The quadratic biases that induce the given chain. Raises: ValueError: If the variables in chain do not form a connected subgraph of target. Examples: >>> chain = {1, 2} >>> target_adjacency = {0: {1, 2}, 1: {0, 2}, 2: {0, 1}} >>> dimod.embedding.chain_to_quadratic(chain, target_adjacency, 1) {(1, 2): -1} """ quadratic = {} # we will be adding the edges that make the chain here # do a breadth first search seen = set() try: next_level = {next(iter(chain))} except StopIteration: raise ValueError("chain must have at least one variable") while next_level: this_level = next_level next_level = set() for v in this_level: if v not in seen: seen.add(v) for u in target_adjacency[v]: if u not in chain: continue next_level.add(u) if u != v and (u, v) not in quadratic: quadratic[(v, u)] = -chain_strength if len(chain) != len(seen): raise ValueError('{} is not a connected chain'.format(chain)) return quadratic
['def', 'chain_to_quadratic', '(', 'chain', ',', 'target_adjacency', ',', 'chain_strength', ')', ':', 'quadratic', '=', '{', '}', '# we will be adding the edges that make the chain here', '# do a breadth first search', 'seen', '=', 'set', '(', ')', 'try', ':', 'next_level', '=', '{', 'next', '(', 'iter', '(', 'chain', ')', ')', '}', 'except', 'StopIteration', ':', 'raise', 'ValueError', '(', '"chain must have at least one variable"', ')', 'while', 'next_level', ':', 'this_level', '=', 'next_level', 'next_level', '=', 'set', '(', ')', 'for', 'v', 'in', 'this_level', ':', 'if', 'v', 'not', 'in', 'seen', ':', 'seen', '.', 'add', '(', 'v', ')', 'for', 'u', 'in', 'target_adjacency', '[', 'v', ']', ':', 'if', 'u', 'not', 'in', 'chain', ':', 'continue', 'next_level', '.', 'add', '(', 'u', ')', 'if', 'u', '!=', 'v', 'and', '(', 'u', ',', 'v', ')', 'not', 'in', 'quadratic', ':', 'quadratic', '[', '(', 'v', ',', 'u', ')', ']', '=', '-', 'chain_strength', 'if', 'len', '(', 'chain', ')', '!=', 'len', '(', 'seen', ')', ':', 'raise', 'ValueError', '(', "'{} is not a connected chain'", '.', 'format', '(', 'chain', ')', ')', 'return', 'quadratic']
Determine the quadratic biases that induce the given chain. Args: chain (iterable): The variables that make up a chain. target_adjacency (dict/:class:`networkx.Graph`): Should be a dict of the form {s: Ns, ...} where s is a variable in the target graph and Ns is the set of neighbours of s. chain_strength (float): The magnitude of the quadratic bias that should be used to create chains. Returns: dict[edge, float]: The quadratic biases that induce the given chain. Raises: ValueError: If the variables in chain do not form a connected subgraph of target. Examples: >>> chain = {1, 2} >>> target_adjacency = {0: {1, 2}, 1: {0, 2}, 2: {0, 1}} >>> dimod.embedding.chain_to_quadratic(chain, target_adjacency, 1) {(1, 2): -1}
['Determine', 'the', 'quadratic', 'biases', 'that', 'induce', 'the', 'given', 'chain', '.']
train
https://github.com/dwavesystems/dwave-system/blob/86a1698f15ccd8b0ece0ed868ee49292d3f67f5b/dwave/embedding/utils.py#L98-L150
2,623
scidash/sciunit
sciunit/scores/complete.py
ZScore.norm_score
def norm_score(self): """Return the normalized score. Equals 1.0 for a z-score of 0, falling to 0.0 for extremely positive or negative values. """ cdf = (1.0 + math.erf(self.score / math.sqrt(2.0))) / 2.0 return 1 - 2*math.fabs(0.5 - cdf)
python
def norm_score(self): """Return the normalized score. Equals 1.0 for a z-score of 0, falling to 0.0 for extremely positive or negative values. """ cdf = (1.0 + math.erf(self.score / math.sqrt(2.0))) / 2.0 return 1 - 2*math.fabs(0.5 - cdf)
['def', 'norm_score', '(', 'self', ')', ':', 'cdf', '=', '(', '1.0', '+', 'math', '.', 'erf', '(', 'self', '.', 'score', '/', 'math', '.', 'sqrt', '(', '2.0', ')', ')', ')', '/', '2.0', 'return', '1', '-', '2', '*', 'math', '.', 'fabs', '(', '0.5', '-', 'cdf', ')']
Return the normalized score. Equals 1.0 for a z-score of 0, falling to 0.0 for extremely positive or negative values.
['Return', 'the', 'normalized', 'score', '.']
train
https://github.com/scidash/sciunit/blob/41b2e38c45c0776727ab1f281a572b65be19cea1/sciunit/scores/complete.py#L76-L83
2,624
caseyjlaw/rtpipe
rtpipe/RT.py
sample_image
def sample_image(d, data, u, v, w, i=-1, verbose=0, imager='xy', wres=100): """ Samples one integration and returns image i is integration to image. Default is mid int. """ if i == -1: i = len(data)/2 if imager == 'xy': image = rtlib.imgonefullxy(n.outer(u, d['freq']/d['freq_orig'][0]), n.outer(v, d['freq']/d['freq_orig'][0]), data[i], d['npixx'], d['npixy'], d['uvres'], verbose=verbose) elif imager == 'w': npix = max(d['npixx'], d['npixy']) bls, uvkers = rtlib.genuvkernels(w, wres, npix, d['uvres'], ksize=21, oversample=1) image = rtlib.imgonefullw(n.outer(u, d['freq']/d['freq_orig'][0]), n.outer(v, d['freq']/d['freq_orig'][0]), data[i], npix, d['uvres'], bls, uvkers, verbose=verbose) # bls, lmkers = rtlib.genlmkernels(w, wres, npix, d['uvres']) # image = rtlib.imgonefullw(n.outer(u, d['freq']/d['freq_orig'][0]), n.outer(v, d['freq']/d['freq_orig'][0]), data[i], npix, d['uvres'], [bls[0]], [lmkers[0]], verbose=verbose) return image
python
def sample_image(d, data, u, v, w, i=-1, verbose=0, imager='xy', wres=100): """ Samples one integration and returns image i is integration to image. Default is mid int. """ if i == -1: i = len(data)/2 if imager == 'xy': image = rtlib.imgonefullxy(n.outer(u, d['freq']/d['freq_orig'][0]), n.outer(v, d['freq']/d['freq_orig'][0]), data[i], d['npixx'], d['npixy'], d['uvres'], verbose=verbose) elif imager == 'w': npix = max(d['npixx'], d['npixy']) bls, uvkers = rtlib.genuvkernels(w, wres, npix, d['uvres'], ksize=21, oversample=1) image = rtlib.imgonefullw(n.outer(u, d['freq']/d['freq_orig'][0]), n.outer(v, d['freq']/d['freq_orig'][0]), data[i], npix, d['uvres'], bls, uvkers, verbose=verbose) # bls, lmkers = rtlib.genlmkernels(w, wres, npix, d['uvres']) # image = rtlib.imgonefullw(n.outer(u, d['freq']/d['freq_orig'][0]), n.outer(v, d['freq']/d['freq_orig'][0]), data[i], npix, d['uvres'], [bls[0]], [lmkers[0]], verbose=verbose) return image
['def', 'sample_image', '(', 'd', ',', 'data', ',', 'u', ',', 'v', ',', 'w', ',', 'i', '=', '-', '1', ',', 'verbose', '=', '0', ',', 'imager', '=', "'xy'", ',', 'wres', '=', '100', ')', ':', 'if', 'i', '==', '-', '1', ':', 'i', '=', 'len', '(', 'data', ')', '/', '2', 'if', 'imager', '==', "'xy'", ':', 'image', '=', 'rtlib', '.', 'imgonefullxy', '(', 'n', '.', 'outer', '(', 'u', ',', 'd', '[', "'freq'", ']', '/', 'd', '[', "'freq_orig'", ']', '[', '0', ']', ')', ',', 'n', '.', 'outer', '(', 'v', ',', 'd', '[', "'freq'", ']', '/', 'd', '[', "'freq_orig'", ']', '[', '0', ']', ')', ',', 'data', '[', 'i', ']', ',', 'd', '[', "'npixx'", ']', ',', 'd', '[', "'npixy'", ']', ',', 'd', '[', "'uvres'", ']', ',', 'verbose', '=', 'verbose', ')', 'elif', 'imager', '==', "'w'", ':', 'npix', '=', 'max', '(', 'd', '[', "'npixx'", ']', ',', 'd', '[', "'npixy'", ']', ')', 'bls', ',', 'uvkers', '=', 'rtlib', '.', 'genuvkernels', '(', 'w', ',', 'wres', ',', 'npix', ',', 'd', '[', "'uvres'", ']', ',', 'ksize', '=', '21', ',', 'oversample', '=', '1', ')', 'image', '=', 'rtlib', '.', 'imgonefullw', '(', 'n', '.', 'outer', '(', 'u', ',', 'd', '[', "'freq'", ']', '/', 'd', '[', "'freq_orig'", ']', '[', '0', ']', ')', ',', 'n', '.', 'outer', '(', 'v', ',', 'd', '[', "'freq'", ']', '/', 'd', '[', "'freq_orig'", ']', '[', '0', ']', ')', ',', 'data', '[', 'i', ']', ',', 'npix', ',', 'd', '[', "'uvres'", ']', ',', 'bls', ',', 'uvkers', ',', 'verbose', '=', 'verbose', ')', "# bls, lmkers = rtlib.genlmkernels(w, wres, npix, d['uvres'])", "# image = rtlib.imgonefullw(n.outer(u, d['freq']/d['freq_orig'][0]), n.outer(v, d['freq']/d['freq_orig'][0]), data[i], npix, d['uvres'], [bls[0]], [lmkers[0]], verbose=verbose)", 'return', 'image']
Samples one integration and returns image i is integration to image. Default is mid int.
['Samples', 'one', 'integration', 'and', 'returns', 'image', 'i', 'is', 'integration', 'to', 'image', '.', 'Default', 'is', 'mid', 'int', '.']
train
https://github.com/caseyjlaw/rtpipe/blob/ac33e4332cf215091a63afbb3137850876d73ec0/rtpipe/RT.py#L1414-L1432
2,625
PGower/PyCanvas
pycanvas/apis/quiz_submission_questions.py
QuizSubmissionQuestionsAPI.answering_questions
def answering_questions(self, attempt, validation_token, quiz_submission_id, access_code=None, quiz_questions=None): """ Answering questions. Provide or update an answer to one or more QuizQuestions. """ path = {} data = {} params = {} # REQUIRED - PATH - quiz_submission_id """ID""" path["quiz_submission_id"] = quiz_submission_id # REQUIRED - attempt """The attempt number of the quiz submission being taken. Note that this must be the latest attempt index, as questions for earlier attempts can not be modified.""" data["attempt"] = attempt # REQUIRED - validation_token """The unique validation token you received when the Quiz Submission was created.""" data["validation_token"] = validation_token # OPTIONAL - access_code """Access code for the Quiz, if any.""" if access_code is not None: data["access_code"] = access_code # OPTIONAL - quiz_questions """Set of question IDs and the answer value. See {Appendix: Question Answer Formats} for the accepted answer formats for each question type.""" if quiz_questions is not None: data["quiz_questions"] = quiz_questions self.logger.debug("POST /api/v1/quiz_submissions/{quiz_submission_id}/questions with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/quiz_submissions/{quiz_submission_id}/questions".format(**path), data=data, params=params, all_pages=True)
python
def answering_questions(self, attempt, validation_token, quiz_submission_id, access_code=None, quiz_questions=None): """ Answering questions. Provide or update an answer to one or more QuizQuestions. """ path = {} data = {} params = {} # REQUIRED - PATH - quiz_submission_id """ID""" path["quiz_submission_id"] = quiz_submission_id # REQUIRED - attempt """The attempt number of the quiz submission being taken. Note that this must be the latest attempt index, as questions for earlier attempts can not be modified.""" data["attempt"] = attempt # REQUIRED - validation_token """The unique validation token you received when the Quiz Submission was created.""" data["validation_token"] = validation_token # OPTIONAL - access_code """Access code for the Quiz, if any.""" if access_code is not None: data["access_code"] = access_code # OPTIONAL - quiz_questions """Set of question IDs and the answer value. See {Appendix: Question Answer Formats} for the accepted answer formats for each question type.""" if quiz_questions is not None: data["quiz_questions"] = quiz_questions self.logger.debug("POST /api/v1/quiz_submissions/{quiz_submission_id}/questions with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/quiz_submissions/{quiz_submission_id}/questions".format(**path), data=data, params=params, all_pages=True)
['def', 'answering_questions', '(', 'self', ',', 'attempt', ',', 'validation_token', ',', 'quiz_submission_id', ',', 'access_code', '=', 'None', ',', 'quiz_questions', '=', 'None', ')', ':', 'path', '=', '{', '}', 'data', '=', '{', '}', 'params', '=', '{', '}', '# REQUIRED - PATH - quiz_submission_id\r', '"""ID"""', 'path', '[', '"quiz_submission_id"', ']', '=', 'quiz_submission_id', '# REQUIRED - attempt\r', '"""The attempt number of the quiz submission being taken. Note that this\r\n must be the latest attempt index, as questions for earlier attempts can\r\n not be modified."""', 'data', '[', '"attempt"', ']', '=', 'attempt', '# REQUIRED - validation_token\r', '"""The unique validation token you received when the Quiz Submission was\r\n created."""', 'data', '[', '"validation_token"', ']', '=', 'validation_token', '# OPTIONAL - access_code\r', '"""Access code for the Quiz, if any."""', 'if', 'access_code', 'is', 'not', 'None', ':', 'data', '[', '"access_code"', ']', '=', 'access_code', '# OPTIONAL - quiz_questions\r', '"""Set of question IDs and the answer value.\r\n \r\n See {Appendix: Question Answer Formats} for the accepted answer formats\r\n for each question type."""', 'if', 'quiz_questions', 'is', 'not', 'None', ':', 'data', '[', '"quiz_questions"', ']', '=', 'quiz_questions', 'self', '.', 'logger', '.', 'debug', '(', '"POST /api/v1/quiz_submissions/{quiz_submission_id}/questions with query params: {params} and form data: {data}"', '.', 'format', '(', 'params', '=', 'params', ',', 'data', '=', 'data', ',', '*', '*', 'path', ')', ')', 'return', 'self', '.', 'generic_request', '(', '"POST"', ',', '"/api/v1/quiz_submissions/{quiz_submission_id}/questions"', '.', 'format', '(', '*', '*', 'path', ')', ',', 'data', '=', 'data', ',', 'params', '=', 'params', ',', 'all_pages', '=', 'True', ')']
Answering questions. Provide or update an answer to one or more QuizQuestions.
['Answering', 'questions', '.', 'Provide', 'or', 'update', 'an', 'answer', 'to', 'one', 'or', 'more', 'QuizQuestions', '.']
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/quiz_submission_questions.py#L44-L83
2,626
materialsproject/pymatgen
pymatgen/analysis/adsorption.py
AdsorbateSiteFinder.generate_substitution_structures
def generate_substitution_structures(self, atom, target_species=[], sub_both_sides=False, range_tol=1e-2, dist_from_surf=0): """ Function that performs substitution-type doping on the surface and returns all possible configurations where one dopant is substituted per surface. Can substitute one surface or both. Args: atom (str): atom corresponding to substitutional dopant sub_both_sides (bool): If true, substitute an equivalent site on the other surface target_species (list): List of specific species to substitute range_tol (float): Find viable substitution sites at a specific distance from the surface +- this tolerance dist_from_surf (float): Distance from the surface to find viable substitution sites, defaults to 0 to substitute at the surface """ # Get symmetrized structure in case we want to substitue both sides sym_slab = SpacegroupAnalyzer(self.slab).get_symmetrized_structure() # Define a function for substituting a site def substitute(site, i): slab = self.slab.copy() props = self.slab.site_properties if sub_both_sides: # Find an equivalent site on the other surface eq_indices = [indices for indices in sym_slab.equivalent_indices if i in indices][0] for ii in eq_indices: if "%.6f" % (sym_slab[ii].frac_coords[2]) != \ "%.6f" % (site.frac_coords[2]): props["surface_properties"][ii] = "substitute" slab.replace(ii, atom) break props["surface_properties"][i] = "substitute" slab.replace(i, atom) slab.add_site_property("surface_properties", props["surface_properties"]) return slab # Get all possible substitution sites substituted_slabs = [] # Sort sites so that we can define a range relative to the position of the # surface atoms, i.e. search for sites above (below) the bottom (top) surface sorted_sites = sorted(sym_slab, key=lambda site: site.frac_coords[2]) if sorted_sites[0].surface_properties == "surface": d = sorted_sites[0].frac_coords[2] + dist_from_surf else: d = sorted_sites[-1].frac_coords[2] - dist_from_surf for i, site in enumerate(sym_slab): if d - range_tol < site.frac_coords[2] < d + range_tol: if target_species and site.species_string in target_species: substituted_slabs.append(substitute(site, i)) elif not target_species: substituted_slabs.append(substitute(site, i)) matcher = StructureMatcher() return [s[0] for s in matcher.group_structures(substituted_slabs)]
python
def generate_substitution_structures(self, atom, target_species=[], sub_both_sides=False, range_tol=1e-2, dist_from_surf=0): """ Function that performs substitution-type doping on the surface and returns all possible configurations where one dopant is substituted per surface. Can substitute one surface or both. Args: atom (str): atom corresponding to substitutional dopant sub_both_sides (bool): If true, substitute an equivalent site on the other surface target_species (list): List of specific species to substitute range_tol (float): Find viable substitution sites at a specific distance from the surface +- this tolerance dist_from_surf (float): Distance from the surface to find viable substitution sites, defaults to 0 to substitute at the surface """ # Get symmetrized structure in case we want to substitue both sides sym_slab = SpacegroupAnalyzer(self.slab).get_symmetrized_structure() # Define a function for substituting a site def substitute(site, i): slab = self.slab.copy() props = self.slab.site_properties if sub_both_sides: # Find an equivalent site on the other surface eq_indices = [indices for indices in sym_slab.equivalent_indices if i in indices][0] for ii in eq_indices: if "%.6f" % (sym_slab[ii].frac_coords[2]) != \ "%.6f" % (site.frac_coords[2]): props["surface_properties"][ii] = "substitute" slab.replace(ii, atom) break props["surface_properties"][i] = "substitute" slab.replace(i, atom) slab.add_site_property("surface_properties", props["surface_properties"]) return slab # Get all possible substitution sites substituted_slabs = [] # Sort sites so that we can define a range relative to the position of the # surface atoms, i.e. search for sites above (below) the bottom (top) surface sorted_sites = sorted(sym_slab, key=lambda site: site.frac_coords[2]) if sorted_sites[0].surface_properties == "surface": d = sorted_sites[0].frac_coords[2] + dist_from_surf else: d = sorted_sites[-1].frac_coords[2] - dist_from_surf for i, site in enumerate(sym_slab): if d - range_tol < site.frac_coords[2] < d + range_tol: if target_species and site.species_string in target_species: substituted_slabs.append(substitute(site, i)) elif not target_species: substituted_slabs.append(substitute(site, i)) matcher = StructureMatcher() return [s[0] for s in matcher.group_structures(substituted_slabs)]
['def', 'generate_substitution_structures', '(', 'self', ',', 'atom', ',', 'target_species', '=', '[', ']', ',', 'sub_both_sides', '=', 'False', ',', 'range_tol', '=', '1e-2', ',', 'dist_from_surf', '=', '0', ')', ':', '# Get symmetrized structure in case we want to substitue both sides', 'sym_slab', '=', 'SpacegroupAnalyzer', '(', 'self', '.', 'slab', ')', '.', 'get_symmetrized_structure', '(', ')', '# Define a function for substituting a site', 'def', 'substitute', '(', 'site', ',', 'i', ')', ':', 'slab', '=', 'self', '.', 'slab', '.', 'copy', '(', ')', 'props', '=', 'self', '.', 'slab', '.', 'site_properties', 'if', 'sub_both_sides', ':', '# Find an equivalent site on the other surface', 'eq_indices', '=', '[', 'indices', 'for', 'indices', 'in', 'sym_slab', '.', 'equivalent_indices', 'if', 'i', 'in', 'indices', ']', '[', '0', ']', 'for', 'ii', 'in', 'eq_indices', ':', 'if', '"%.6f"', '%', '(', 'sym_slab', '[', 'ii', ']', '.', 'frac_coords', '[', '2', ']', ')', '!=', '"%.6f"', '%', '(', 'site', '.', 'frac_coords', '[', '2', ']', ')', ':', 'props', '[', '"surface_properties"', ']', '[', 'ii', ']', '=', '"substitute"', 'slab', '.', 'replace', '(', 'ii', ',', 'atom', ')', 'break', 'props', '[', '"surface_properties"', ']', '[', 'i', ']', '=', '"substitute"', 'slab', '.', 'replace', '(', 'i', ',', 'atom', ')', 'slab', '.', 'add_site_property', '(', '"surface_properties"', ',', 'props', '[', '"surface_properties"', ']', ')', 'return', 'slab', '# Get all possible substitution sites', 'substituted_slabs', '=', '[', ']', '# Sort sites so that we can define a range relative to the position of the', '# surface atoms, i.e. search for sites above (below) the bottom (top) surface', 'sorted_sites', '=', 'sorted', '(', 'sym_slab', ',', 'key', '=', 'lambda', 'site', ':', 'site', '.', 'frac_coords', '[', '2', ']', ')', 'if', 'sorted_sites', '[', '0', ']', '.', 'surface_properties', '==', '"surface"', ':', 'd', '=', 'sorted_sites', '[', '0', ']', '.', 'frac_coords', '[', '2', ']', '+', 'dist_from_surf', 'else', ':', 'd', '=', 'sorted_sites', '[', '-', '1', ']', '.', 'frac_coords', '[', '2', ']', '-', 'dist_from_surf', 'for', 'i', ',', 'site', 'in', 'enumerate', '(', 'sym_slab', ')', ':', 'if', 'd', '-', 'range_tol', '<', 'site', '.', 'frac_coords', '[', '2', ']', '<', 'd', '+', 'range_tol', ':', 'if', 'target_species', 'and', 'site', '.', 'species_string', 'in', 'target_species', ':', 'substituted_slabs', '.', 'append', '(', 'substitute', '(', 'site', ',', 'i', ')', ')', 'elif', 'not', 'target_species', ':', 'substituted_slabs', '.', 'append', '(', 'substitute', '(', 'site', ',', 'i', ')', ')', 'matcher', '=', 'StructureMatcher', '(', ')', 'return', '[', 's', '[', '0', ']', 'for', 's', 'in', 'matcher', '.', 'group_structures', '(', 'substituted_slabs', ')', ']']
Function that performs substitution-type doping on the surface and returns all possible configurations where one dopant is substituted per surface. Can substitute one surface or both. Args: atom (str): atom corresponding to substitutional dopant sub_both_sides (bool): If true, substitute an equivalent site on the other surface target_species (list): List of specific species to substitute range_tol (float): Find viable substitution sites at a specific distance from the surface +- this tolerance dist_from_surf (float): Distance from the surface to find viable substitution sites, defaults to 0 to substitute at the surface
['Function', 'that', 'performs', 'substitution', '-', 'type', 'doping', 'on', 'the', 'surface', 'and', 'returns', 'all', 'possible', 'configurations', 'where', 'one', 'dopant', 'is', 'substituted', 'per', 'surface', '.', 'Can', 'substitute', 'one', 'surface', 'or', 'both', '.']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/adsorption.py#L503-L564
2,627
turbidsoul/tsutil
tsutil/sync.py
wrap_object
def wrap_object(func, before, after): ''' before/after call will encapsulate callable object ''' def _wrapper(*args, **kwargs): before() try: return func(*args, **kwargs) except Exception as e: raise e finally: after() return _wrapper
python
def wrap_object(func, before, after): ''' before/after call will encapsulate callable object ''' def _wrapper(*args, **kwargs): before() try: return func(*args, **kwargs) except Exception as e: raise e finally: after() return _wrapper
['def', 'wrap_object', '(', 'func', ',', 'before', ',', 'after', ')', ':', 'def', '_wrapper', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'before', '(', ')', 'try', ':', 'return', 'func', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'except', 'Exception', 'as', 'e', ':', 'raise', 'e', 'finally', ':', 'after', '(', ')', 'return', '_wrapper']
before/after call will encapsulate callable object
['before', '/', 'after', 'call', 'will', 'encapsulate', 'callable', 'object']
train
https://github.com/turbidsoul/tsutil/blob/2c86d872791edc0f17f2c48b6f15d5c79b4551f7/tsutil/sync.py#L14-L26
2,628
assamite/creamas
creamas/vote.py
VoteOrganizer.gather_candidates
def gather_candidates(self): """Gather candidates from the slave environments. The candidates are stored in :attr:`candidates`, overriding any previous candidates. """ async def slave_task(addr): r_manager = await self.env.connect(addr) return await r_manager.get_candidates() if self._single_env: self._candidates = self.env.candidates else: mgrs = self.get_managers() tasks = create_tasks(slave_task, mgrs) self._candidates = run(tasks)
python
def gather_candidates(self): """Gather candidates from the slave environments. The candidates are stored in :attr:`candidates`, overriding any previous candidates. """ async def slave_task(addr): r_manager = await self.env.connect(addr) return await r_manager.get_candidates() if self._single_env: self._candidates = self.env.candidates else: mgrs = self.get_managers() tasks = create_tasks(slave_task, mgrs) self._candidates = run(tasks)
['def', 'gather_candidates', '(', 'self', ')', ':', 'async', 'def', 'slave_task', '(', 'addr', ')', ':', 'r_manager', '=', 'await', 'self', '.', 'env', '.', 'connect', '(', 'addr', ')', 'return', 'await', 'r_manager', '.', 'get_candidates', '(', ')', 'if', 'self', '.', '_single_env', ':', 'self', '.', '_candidates', '=', 'self', '.', 'env', '.', 'candidates', 'else', ':', 'mgrs', '=', 'self', '.', 'get_managers', '(', ')', 'tasks', '=', 'create_tasks', '(', 'slave_task', ',', 'mgrs', ')', 'self', '.', '_candidates', '=', 'run', '(', 'tasks', ')']
Gather candidates from the slave environments. The candidates are stored in :attr:`candidates`, overriding any previous candidates.
['Gather', 'candidates', 'from', 'the', 'slave', 'environments', '.']
train
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/vote.py#L299-L314
2,629
pypa/pipenv
pipenv/vendor/jinja2/filters.py
do_title
def do_title(s): """Return a titlecased version of the value. I.e. words will start with uppercase letters, all remaining characters are lowercase. """ return ''.join( [item[0].upper() + item[1:].lower() for item in _word_beginning_split_re.split(soft_unicode(s)) if item])
python
def do_title(s): """Return a titlecased version of the value. I.e. words will start with uppercase letters, all remaining characters are lowercase. """ return ''.join( [item[0].upper() + item[1:].lower() for item in _word_beginning_split_re.split(soft_unicode(s)) if item])
['def', 'do_title', '(', 's', ')', ':', 'return', "''", '.', 'join', '(', '[', 'item', '[', '0', ']', '.', 'upper', '(', ')', '+', 'item', '[', '1', ':', ']', '.', 'lower', '(', ')', 'for', 'item', 'in', '_word_beginning_split_re', '.', 'split', '(', 'soft_unicode', '(', 's', ')', ')', 'if', 'item', ']', ')']
Return a titlecased version of the value. I.e. words will start with uppercase letters, all remaining characters are lowercase.
['Return', 'a', 'titlecased', 'version', 'of', 'the', 'value', '.', 'I', '.', 'e', '.', 'words', 'will', 'start', 'with', 'uppercase', 'letters', 'all', 'remaining', 'characters', 'are', 'lowercase', '.']
train
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/filters.py#L196-L203
2,630
richardkiss/pycoin
pycoin/encoding/sec.py
sec_to_public_pair
def sec_to_public_pair(sec, generator=None, strict=True): """Convert a public key in sec binary format to a public pair.""" byte_count = (generator.p().bit_length() + 7) >> 3 if generator else (len(sec) - 1) x = from_bytes_32(sec[1:1 + byte_count]) sec0 = sec[:1] if len(sec) == 1 + byte_count * 2: isok = sec0 == b'\4' if not strict: isok = isok or (sec0 in [b'\6', b'\7']) if isok: y = from_bytes_32(sec[1+byte_count:1+2*byte_count]) return (x, y) elif len(sec) == 1 + byte_count: if not strict or (sec0 in (b'\2', b'\3')): is_y_odd = (sec0 != b'\2') return generator.points_for_x(x)[is_y_odd] raise EncodingError("bad sec encoding for public key")
python
def sec_to_public_pair(sec, generator=None, strict=True): """Convert a public key in sec binary format to a public pair.""" byte_count = (generator.p().bit_length() + 7) >> 3 if generator else (len(sec) - 1) x = from_bytes_32(sec[1:1 + byte_count]) sec0 = sec[:1] if len(sec) == 1 + byte_count * 2: isok = sec0 == b'\4' if not strict: isok = isok or (sec0 in [b'\6', b'\7']) if isok: y = from_bytes_32(sec[1+byte_count:1+2*byte_count]) return (x, y) elif len(sec) == 1 + byte_count: if not strict or (sec0 in (b'\2', b'\3')): is_y_odd = (sec0 != b'\2') return generator.points_for_x(x)[is_y_odd] raise EncodingError("bad sec encoding for public key")
['def', 'sec_to_public_pair', '(', 'sec', ',', 'generator', '=', 'None', ',', 'strict', '=', 'True', ')', ':', 'byte_count', '=', '(', 'generator', '.', 'p', '(', ')', '.', 'bit_length', '(', ')', '+', '7', ')', '>>', '3', 'if', 'generator', 'else', '(', 'len', '(', 'sec', ')', '-', '1', ')', 'x', '=', 'from_bytes_32', '(', 'sec', '[', '1', ':', '1', '+', 'byte_count', ']', ')', 'sec0', '=', 'sec', '[', ':', '1', ']', 'if', 'len', '(', 'sec', ')', '==', '1', '+', 'byte_count', '*', '2', ':', 'isok', '=', 'sec0', '==', "b'\\4'", 'if', 'not', 'strict', ':', 'isok', '=', 'isok', 'or', '(', 'sec0', 'in', '[', "b'\\6'", ',', "b'\\7'", ']', ')', 'if', 'isok', ':', 'y', '=', 'from_bytes_32', '(', 'sec', '[', '1', '+', 'byte_count', ':', '1', '+', '2', '*', 'byte_count', ']', ')', 'return', '(', 'x', ',', 'y', ')', 'elif', 'len', '(', 'sec', ')', '==', '1', '+', 'byte_count', ':', 'if', 'not', 'strict', 'or', '(', 'sec0', 'in', '(', "b'\\2'", ',', "b'\\3'", ')', ')', ':', 'is_y_odd', '=', '(', 'sec0', '!=', "b'\\2'", ')', 'return', 'generator', '.', 'points_for_x', '(', 'x', ')', '[', 'is_y_odd', ']', 'raise', 'EncodingError', '(', '"bad sec encoding for public key"', ')']
Convert a public key in sec binary format to a public pair.
['Convert', 'a', 'public', 'key', 'in', 'sec', 'binary', 'format', 'to', 'a', 'public', 'pair', '.']
train
https://github.com/richardkiss/pycoin/blob/1e8d0d9fe20ce0347b97847bb529cd1bd84c7442/pycoin/encoding/sec.py#L18-L34
2,631
mdickinson/bigfloat
bigfloat/core.py
hypot
def hypot(x, y, context=None): """ Return the Euclidean norm of x and y, i.e., the square root of the sum of the squares of x and y. """ return _apply_function_in_current_context( BigFloat, mpfr.mpfr_hypot, ( BigFloat._implicit_convert(x), BigFloat._implicit_convert(y), ), context, )
python
def hypot(x, y, context=None): """ Return the Euclidean norm of x and y, i.e., the square root of the sum of the squares of x and y. """ return _apply_function_in_current_context( BigFloat, mpfr.mpfr_hypot, ( BigFloat._implicit_convert(x), BigFloat._implicit_convert(y), ), context, )
['def', 'hypot', '(', 'x', ',', 'y', ',', 'context', '=', 'None', ')', ':', 'return', '_apply_function_in_current_context', '(', 'BigFloat', ',', 'mpfr', '.', 'mpfr_hypot', ',', '(', 'BigFloat', '.', '_implicit_convert', '(', 'x', ')', ',', 'BigFloat', '.', '_implicit_convert', '(', 'y', ')', ',', ')', ',', 'context', ',', ')']
Return the Euclidean norm of x and y, i.e., the square root of the sum of the squares of x and y.
['Return', 'the', 'Euclidean', 'norm', 'of', 'x', 'and', 'y', 'i', '.', 'e', '.', 'the', 'square', 'root', 'of', 'the', 'sum', 'of', 'the', 'squares', 'of', 'x', 'and', 'y', '.']
train
https://github.com/mdickinson/bigfloat/blob/e5fdd1048615191ed32a2b7460e14b3b3ff24662/bigfloat/core.py#L2226-L2240
2,632
gc3-uzh-ch/elasticluster
elasticluster/providers/azure_provider.py
AzureCloudProvider._init_az_api
def _init_az_api(self): """ Initialise client objects for talking to Azure API. This is in a separate function so to be called by ``__init__`` and ``__setstate__``. """ with self.__lock: if self._resource_client is None: log.debug("Making Azure `ServicePrincipalcredentials` object" " with tenant=%r, client_id=%r, secret=%r ...", self.tenant_id, self.client_id, ('<redacted>' if self.secret else None)) credentials = ServicePrincipalCredentials( tenant=self.tenant_id, client_id=self.client_id, secret=self.secret, ) log.debug("Initializing Azure `ComputeManagementclient` ...") self._compute_client = ComputeManagementClient(credentials, self.subscription_id) log.debug("Initializing Azure `NetworkManagementclient` ...") self._network_client = NetworkManagementClient(credentials, self.subscription_id) log.debug("Initializing Azure `ResourceManagementclient` ...") self._resource_client = ResourceManagementClient(credentials, self.subscription_id) log.info("Azure API clients initialized.")
python
def _init_az_api(self): """ Initialise client objects for talking to Azure API. This is in a separate function so to be called by ``__init__`` and ``__setstate__``. """ with self.__lock: if self._resource_client is None: log.debug("Making Azure `ServicePrincipalcredentials` object" " with tenant=%r, client_id=%r, secret=%r ...", self.tenant_id, self.client_id, ('<redacted>' if self.secret else None)) credentials = ServicePrincipalCredentials( tenant=self.tenant_id, client_id=self.client_id, secret=self.secret, ) log.debug("Initializing Azure `ComputeManagementclient` ...") self._compute_client = ComputeManagementClient(credentials, self.subscription_id) log.debug("Initializing Azure `NetworkManagementclient` ...") self._network_client = NetworkManagementClient(credentials, self.subscription_id) log.debug("Initializing Azure `ResourceManagementclient` ...") self._resource_client = ResourceManagementClient(credentials, self.subscription_id) log.info("Azure API clients initialized.")
['def', '_init_az_api', '(', 'self', ')', ':', 'with', 'self', '.', '__lock', ':', 'if', 'self', '.', '_resource_client', 'is', 'None', ':', 'log', '.', 'debug', '(', '"Making Azure `ServicePrincipalcredentials` object"', '" with tenant=%r, client_id=%r, secret=%r ..."', ',', 'self', '.', 'tenant_id', ',', 'self', '.', 'client_id', ',', '(', "'<redacted>'", 'if', 'self', '.', 'secret', 'else', 'None', ')', ')', 'credentials', '=', 'ServicePrincipalCredentials', '(', 'tenant', '=', 'self', '.', 'tenant_id', ',', 'client_id', '=', 'self', '.', 'client_id', ',', 'secret', '=', 'self', '.', 'secret', ',', ')', 'log', '.', 'debug', '(', '"Initializing Azure `ComputeManagementclient` ..."', ')', 'self', '.', '_compute_client', '=', 'ComputeManagementClient', '(', 'credentials', ',', 'self', '.', 'subscription_id', ')', 'log', '.', 'debug', '(', '"Initializing Azure `NetworkManagementclient` ..."', ')', 'self', '.', '_network_client', '=', 'NetworkManagementClient', '(', 'credentials', ',', 'self', '.', 'subscription_id', ')', 'log', '.', 'debug', '(', '"Initializing Azure `ResourceManagementclient` ..."', ')', 'self', '.', '_resource_client', '=', 'ResourceManagementClient', '(', 'credentials', ',', 'self', '.', 'subscription_id', ')', 'log', '.', 'info', '(', '"Azure API clients initialized."', ')']
Initialise client objects for talking to Azure API. This is in a separate function so to be called by ``__init__`` and ``__setstate__``.
['Initialise', 'client', 'objects', 'for', 'talking', 'to', 'Azure', 'API', '.']
train
https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/azure_provider.py#L179-L203
2,633
Julian/Filesystems
filesystems/common.py
_exists
def _exists(fs, path): """ Check that the given path exists on the filesystem. Note that unlike `os.path.exists`, we *do* propagate file system errors other than a non-existent path or non-existent directory component. E.g., should EPERM or ELOOP be raised, an exception will bubble up. """ try: fs.stat(path) except (exceptions.FileNotFound, exceptions.NotADirectory): return False return True
python
def _exists(fs, path): """ Check that the given path exists on the filesystem. Note that unlike `os.path.exists`, we *do* propagate file system errors other than a non-existent path or non-existent directory component. E.g., should EPERM or ELOOP be raised, an exception will bubble up. """ try: fs.stat(path) except (exceptions.FileNotFound, exceptions.NotADirectory): return False return True
['def', '_exists', '(', 'fs', ',', 'path', ')', ':', 'try', ':', 'fs', '.', 'stat', '(', 'path', ')', 'except', '(', 'exceptions', '.', 'FileNotFound', ',', 'exceptions', '.', 'NotADirectory', ')', ':', 'return', 'False', 'return', 'True']
Check that the given path exists on the filesystem. Note that unlike `os.path.exists`, we *do* propagate file system errors other than a non-existent path or non-existent directory component. E.g., should EPERM or ELOOP be raised, an exception will bubble up.
['Check', 'that', 'the', 'given', 'path', 'exists', 'on', 'the', 'filesystem', '.']
train
https://github.com/Julian/Filesystems/blob/f366e877d6970712bb91d47167209ee2d1e489c5/filesystems/common.py#L157-L170
2,634
twisted/mantissa
xmantissa/liveform.py
ListChangeParameter._prepareSubForm
def _prepareSubForm(self, liveForm): """ Utility for turning liveforms into subforms, and compacting them as necessary. @param liveForm: a liveform. @type liveForm: L{LiveForm} @return: a sub form. @rtype: L{LiveForm} """ liveForm = liveForm.asSubForm(self.name) # XXX Why did this work??? # if we are compact, tell the liveform so it can tell its parameters # also if self._parameterIsCompact: liveForm.compact() return liveForm
python
def _prepareSubForm(self, liveForm): """ Utility for turning liveforms into subforms, and compacting them as necessary. @param liveForm: a liveform. @type liveForm: L{LiveForm} @return: a sub form. @rtype: L{LiveForm} """ liveForm = liveForm.asSubForm(self.name) # XXX Why did this work??? # if we are compact, tell the liveform so it can tell its parameters # also if self._parameterIsCompact: liveForm.compact() return liveForm
['def', '_prepareSubForm', '(', 'self', ',', 'liveForm', ')', ':', 'liveForm', '=', 'liveForm', '.', 'asSubForm', '(', 'self', '.', 'name', ')', '# XXX Why did this work???', '# if we are compact, tell the liveform so it can tell its parameters', '# also', 'if', 'self', '.', '_parameterIsCompact', ':', 'liveForm', '.', 'compact', '(', ')', 'return', 'liveForm']
Utility for turning liveforms into subforms, and compacting them as necessary. @param liveForm: a liveform. @type liveForm: L{LiveForm} @return: a sub form. @rtype: L{LiveForm}
['Utility', 'for', 'turning', 'liveforms', 'into', 'subforms', 'and', 'compacting', 'them', 'as', 'necessary', '.']
train
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/liveform.py#L334-L350
2,635
eyeseast/propublica-congress
congress/bills.py
BillsClient.upcoming
def upcoming(self, chamber, congress=CURRENT_CONGRESS): "Shortcut for upcoming bills" path = "bills/upcoming/{chamber}.json".format(chamber=chamber) return self.fetch(path)
python
def upcoming(self, chamber, congress=CURRENT_CONGRESS): "Shortcut for upcoming bills" path = "bills/upcoming/{chamber}.json".format(chamber=chamber) return self.fetch(path)
['def', 'upcoming', '(', 'self', ',', 'chamber', ',', 'congress', '=', 'CURRENT_CONGRESS', ')', ':', 'path', '=', '"bills/upcoming/{chamber}.json"', '.', 'format', '(', 'chamber', '=', 'chamber', ')', 'return', 'self', '.', 'fetch', '(', 'path', ')']
Shortcut for upcoming bills
['Shortcut', 'for', 'upcoming', 'bills']
train
https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/bills.py#L66-L69
2,636
tensorflow/tensor2tensor
tensor2tensor/models/image_transformer_2d.py
img2img_transformer_base_tpu
def img2img_transformer_base_tpu(): """Hparams for training img2img_transformer on tpu.""" hparams = img2img_transformer_base() update_hparams_for_tpu(hparams) hparams.batch_size = 2 hparams.num_heads = 4 # heads are expensive on tpu hparams.num_decoder_layers = 8 hparams.num_encoder_layers = 4 hparams.shared_embedding_and_softmax_weights = False return hparams
python
def img2img_transformer_base_tpu(): """Hparams for training img2img_transformer on tpu.""" hparams = img2img_transformer_base() update_hparams_for_tpu(hparams) hparams.batch_size = 2 hparams.num_heads = 4 # heads are expensive on tpu hparams.num_decoder_layers = 8 hparams.num_encoder_layers = 4 hparams.shared_embedding_and_softmax_weights = False return hparams
['def', 'img2img_transformer_base_tpu', '(', ')', ':', 'hparams', '=', 'img2img_transformer_base', '(', ')', 'update_hparams_for_tpu', '(', 'hparams', ')', 'hparams', '.', 'batch_size', '=', '2', 'hparams', '.', 'num_heads', '=', '4', '# heads are expensive on tpu', 'hparams', '.', 'num_decoder_layers', '=', '8', 'hparams', '.', 'num_encoder_layers', '=', '4', 'hparams', '.', 'shared_embedding_and_softmax_weights', '=', 'False', 'return', 'hparams']
Hparams for training img2img_transformer on tpu.
['Hparams', 'for', 'training', 'img2img_transformer', 'on', 'tpu', '.']
train
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/image_transformer_2d.py#L794-L803
2,637
saltstack/salt
salt/modules/gentoo_service.py
disable
def disable(name, **kwargs): ''' Disable the named service to start at boot CLI Example: .. code-block:: bash salt '*' service.disable <service name> <runlevels=single-runlevel> salt '*' service.disable <service name> <runlevels=[runlevel1,runlevel2]> ''' levels = [] if 'runlevels' in kwargs: requested_levels = set(kwargs['runlevels'] if isinstance(kwargs['runlevels'], list) else [kwargs['runlevels']]) levels = _disable_delta(name, requested_levels) if not levels: return True cmd = _enable_disable_cmd(name, 'delete', levels) return not _ret_code(cmd)
python
def disable(name, **kwargs): ''' Disable the named service to start at boot CLI Example: .. code-block:: bash salt '*' service.disable <service name> <runlevels=single-runlevel> salt '*' service.disable <service name> <runlevels=[runlevel1,runlevel2]> ''' levels = [] if 'runlevels' in kwargs: requested_levels = set(kwargs['runlevels'] if isinstance(kwargs['runlevels'], list) else [kwargs['runlevels']]) levels = _disable_delta(name, requested_levels) if not levels: return True cmd = _enable_disable_cmd(name, 'delete', levels) return not _ret_code(cmd)
['def', 'disable', '(', 'name', ',', '*', '*', 'kwargs', ')', ':', 'levels', '=', '[', ']', 'if', "'runlevels'", 'in', 'kwargs', ':', 'requested_levels', '=', 'set', '(', 'kwargs', '[', "'runlevels'", ']', 'if', 'isinstance', '(', 'kwargs', '[', "'runlevels'", ']', ',', 'list', ')', 'else', '[', 'kwargs', '[', "'runlevels'", ']', ']', ')', 'levels', '=', '_disable_delta', '(', 'name', ',', 'requested_levels', ')', 'if', 'not', 'levels', ':', 'return', 'True', 'cmd', '=', '_enable_disable_cmd', '(', 'name', ',', "'delete'", ',', 'levels', ')', 'return', 'not', '_ret_code', '(', 'cmd', ')']
Disable the named service to start at boot CLI Example: .. code-block:: bash salt '*' service.disable <service name> <runlevels=single-runlevel> salt '*' service.disable <service name> <runlevels=[runlevel1,runlevel2]>
['Disable', 'the', 'named', 'service', 'to', 'start', 'at', 'boot']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gentoo_service.py#L309-L328
2,638
boriel/zxbasic
arch/zx48k/translator.py
Translator.has_control_chars
def has_control_chars(i): """ Returns true if the passed token is an unknown string or a constant string having control chars (inverse, etc """ if not hasattr(i, 'type_'): return False if i.type_ != Type.string: return False if i.token in ('VAR', 'PARAMDECL'): return True # We don't know what an alphanumeric variable will hold if i.token == 'STRING': for c in i.value: if 15 < ord(c) < 22: # is it an attr char? return True return False for j in i.children: if Translator.has_control_chars(j): return True return False
python
def has_control_chars(i): """ Returns true if the passed token is an unknown string or a constant string having control chars (inverse, etc """ if not hasattr(i, 'type_'): return False if i.type_ != Type.string: return False if i.token in ('VAR', 'PARAMDECL'): return True # We don't know what an alphanumeric variable will hold if i.token == 'STRING': for c in i.value: if 15 < ord(c) < 22: # is it an attr char? return True return False for j in i.children: if Translator.has_control_chars(j): return True return False
['def', 'has_control_chars', '(', 'i', ')', ':', 'if', 'not', 'hasattr', '(', 'i', ',', "'type_'", ')', ':', 'return', 'False', 'if', 'i', '.', 'type_', '!=', 'Type', '.', 'string', ':', 'return', 'False', 'if', 'i', '.', 'token', 'in', '(', "'VAR'", ',', "'PARAMDECL'", ')', ':', 'return', 'True', "# We don't know what an alphanumeric variable will hold", 'if', 'i', '.', 'token', '==', "'STRING'", ':', 'for', 'c', 'in', 'i', '.', 'value', ':', 'if', '15', '<', 'ord', '(', 'c', ')', '<', '22', ':', '# is it an attr char?', 'return', 'True', 'return', 'False', 'for', 'j', 'in', 'i', '.', 'children', ':', 'if', 'Translator', '.', 'has_control_chars', '(', 'j', ')', ':', 'return', 'True', 'return', 'False']
Returns true if the passed token is an unknown string or a constant string having control chars (inverse, etc
['Returns', 'true', 'if', 'the', 'passed', 'token', 'is', 'an', 'unknown', 'string', 'or', 'a', 'constant', 'string', 'having', 'control', 'chars', '(', 'inverse', 'etc']
train
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/translator.py#L1291-L1314
2,639
rvswift/EB
EB/builder/utilities/performance.py
get_sort_order
def get_sort_order(molecules): """ Count up the total number of scores whose values are positve and negative. If a greater number are negative, then sort in ascending order (e.g. for binding energy estimates) Otherwise, sort in descending order (e.g. for similarity values) """ neg_count = 0 pos_count = 0 for index in range(len(molecules)): scoreList = molecules[index].GetProp('scores') for element in scoreList: if float(element) > 0: pos_count += 1 elif float(element) < 0: neg_count += 1 if pos_count > neg_count: sort_order = 'dsc' else: sort_order = 'asc' return sort_order
python
def get_sort_order(molecules): """ Count up the total number of scores whose values are positve and negative. If a greater number are negative, then sort in ascending order (e.g. for binding energy estimates) Otherwise, sort in descending order (e.g. for similarity values) """ neg_count = 0 pos_count = 0 for index in range(len(molecules)): scoreList = molecules[index].GetProp('scores') for element in scoreList: if float(element) > 0: pos_count += 1 elif float(element) < 0: neg_count += 1 if pos_count > neg_count: sort_order = 'dsc' else: sort_order = 'asc' return sort_order
['def', 'get_sort_order', '(', 'molecules', ')', ':', 'neg_count', '=', '0', 'pos_count', '=', '0', 'for', 'index', 'in', 'range', '(', 'len', '(', 'molecules', ')', ')', ':', 'scoreList', '=', 'molecules', '[', 'index', ']', '.', 'GetProp', '(', "'scores'", ')', 'for', 'element', 'in', 'scoreList', ':', 'if', 'float', '(', 'element', ')', '>', '0', ':', 'pos_count', '+=', '1', 'elif', 'float', '(', 'element', ')', '<', '0', ':', 'neg_count', '+=', '1', 'if', 'pos_count', '>', 'neg_count', ':', 'sort_order', '=', "'dsc'", 'else', ':', 'sort_order', '=', "'asc'", 'return', 'sort_order']
Count up the total number of scores whose values are positve and negative. If a greater number are negative, then sort in ascending order (e.g. for binding energy estimates) Otherwise, sort in descending order (e.g. for similarity values)
['Count', 'up', 'the', 'total', 'number', 'of', 'scores', 'whose', 'values', 'are', 'positve', 'and', 'negative', '.', 'If', 'a', 'greater', 'number', 'are', 'negative', 'then', 'sort', 'in', 'ascending', 'order', '(', 'e', '.', 'g', '.', 'for', 'binding', 'energy', 'estimates', ')', 'Otherwise', 'sort', 'in', 'descending', 'order', '(', 'e', '.', 'g', '.', 'for', 'similarity', 'values', ')']
train
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/utilities/performance.py#L5-L29
2,640
pyQode/pyqode.core
pyqode/core/panels/global_checker.py
GlobalCheckerPanel.get_marker_size
def get_marker_size(self): """ Gets the size of a message marker. :return: QSize """ h = self.get_marker_height() if h < 1: h = 1 return QtCore.QSize(self.sizeHint().width() / 2, h)
python
def get_marker_size(self): """ Gets the size of a message marker. :return: QSize """ h = self.get_marker_height() if h < 1: h = 1 return QtCore.QSize(self.sizeHint().width() / 2, h)
['def', 'get_marker_size', '(', 'self', ')', ':', 'h', '=', 'self', '.', 'get_marker_height', '(', ')', 'if', 'h', '<', '1', ':', 'h', '=', '1', 'return', 'QtCore', '.', 'QSize', '(', 'self', '.', 'sizeHint', '(', ')', '.', 'width', '(', ')', '/', '2', ',', 'h', ')']
Gets the size of a message marker. :return: QSize
['Gets', 'the', 'size', 'of', 'a', 'message', 'marker', '.', ':', 'return', ':', 'QSize']
train
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/panels/global_checker.py#L92-L100
2,641
ssalentin/plip
plip/modules/preparation.py
BindingSite.find_hal
def find_hal(self, atoms): """Look for halogen bond acceptors (Y-{O|P|N|S}, with Y=C,P,S)""" data = namedtuple('hal_acceptor', 'o o_orig_idx y y_orig_idx') a_set = [] # All oxygens, nitrogen, sulfurs with neighboring carbon, phosphor, nitrogen or sulfur for a in [at for at in atoms if at.atomicnum in [8, 7, 16]]: n_atoms = [na for na in pybel.ob.OBAtomAtomIter(a.OBAtom) if na.GetAtomicNum() in [6, 7, 15, 16]] if len(n_atoms) == 1: # Proximal atom o_orig_idx = self.Mapper.mapid(a.idx, mtype=self.mtype, bsid=self.bsid) y_orig_idx = self.Mapper.mapid(n_atoms[0].GetIdx(), mtype=self.mtype, bsid=self.bsid) a_set.append(data(o=a, o_orig_idx=o_orig_idx, y=pybel.Atom(n_atoms[0]), y_orig_idx=y_orig_idx)) return a_set
python
def find_hal(self, atoms): """Look for halogen bond acceptors (Y-{O|P|N|S}, with Y=C,P,S)""" data = namedtuple('hal_acceptor', 'o o_orig_idx y y_orig_idx') a_set = [] # All oxygens, nitrogen, sulfurs with neighboring carbon, phosphor, nitrogen or sulfur for a in [at for at in atoms if at.atomicnum in [8, 7, 16]]: n_atoms = [na for na in pybel.ob.OBAtomAtomIter(a.OBAtom) if na.GetAtomicNum() in [6, 7, 15, 16]] if len(n_atoms) == 1: # Proximal atom o_orig_idx = self.Mapper.mapid(a.idx, mtype=self.mtype, bsid=self.bsid) y_orig_idx = self.Mapper.mapid(n_atoms[0].GetIdx(), mtype=self.mtype, bsid=self.bsid) a_set.append(data(o=a, o_orig_idx=o_orig_idx, y=pybel.Atom(n_atoms[0]), y_orig_idx=y_orig_idx)) return a_set
['def', 'find_hal', '(', 'self', ',', 'atoms', ')', ':', 'data', '=', 'namedtuple', '(', "'hal_acceptor'", ',', "'o o_orig_idx y y_orig_idx'", ')', 'a_set', '=', '[', ']', '# All oxygens, nitrogen, sulfurs with neighboring carbon, phosphor, nitrogen or sulfur', 'for', 'a', 'in', '[', 'at', 'for', 'at', 'in', 'atoms', 'if', 'at', '.', 'atomicnum', 'in', '[', '8', ',', '7', ',', '16', ']', ']', ':', 'n_atoms', '=', '[', 'na', 'for', 'na', 'in', 'pybel', '.', 'ob', '.', 'OBAtomAtomIter', '(', 'a', '.', 'OBAtom', ')', 'if', 'na', '.', 'GetAtomicNum', '(', ')', 'in', '[', '6', ',', '7', ',', '15', ',', '16', ']', ']', 'if', 'len', '(', 'n_atoms', ')', '==', '1', ':', '# Proximal atom', 'o_orig_idx', '=', 'self', '.', 'Mapper', '.', 'mapid', '(', 'a', '.', 'idx', ',', 'mtype', '=', 'self', '.', 'mtype', ',', 'bsid', '=', 'self', '.', 'bsid', ')', 'y_orig_idx', '=', 'self', '.', 'Mapper', '.', 'mapid', '(', 'n_atoms', '[', '0', ']', '.', 'GetIdx', '(', ')', ',', 'mtype', '=', 'self', '.', 'mtype', ',', 'bsid', '=', 'self', '.', 'bsid', ')', 'a_set', '.', 'append', '(', 'data', '(', 'o', '=', 'a', ',', 'o_orig_idx', '=', 'o_orig_idx', ',', 'y', '=', 'pybel', '.', 'Atom', '(', 'n_atoms', '[', '0', ']', ')', ',', 'y_orig_idx', '=', 'y_orig_idx', ')', ')', 'return', 'a_set']
Look for halogen bond acceptors (Y-{O|P|N|S}, with Y=C,P,S)
['Look', 'for', 'halogen', 'bond', 'acceptors', '(', 'Y', '-', '{', 'O|P|N|S', '}', 'with', 'Y', '=', 'C', 'P', 'S', ')']
train
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/preparation.py#L893-L904
2,642
jkeyes/python-docraptor
docraptor/__init__.py
DocRaptor.status
def status(self, status_id, raise_exception_on_failure=False): """Return the status of the generation job.""" query = {"output": "json", "user_credentials": self.api_key} resp = requests.get( "%sstatus/%s" % (self._url, status_id), params=query, timeout=self._timeout ) if raise_exception_on_failure and resp.status_code != 200: raise DocumentStatusFailure(resp.content, resp.status_code) if resp.status_code == 200: as_json = json.loads(resp.content) if as_json["status"] == "completed": as_json["download_key"] = _get_download_key(as_json["download_url"]) return as_json return resp
python
def status(self, status_id, raise_exception_on_failure=False): """Return the status of the generation job.""" query = {"output": "json", "user_credentials": self.api_key} resp = requests.get( "%sstatus/%s" % (self._url, status_id), params=query, timeout=self._timeout ) if raise_exception_on_failure and resp.status_code != 200: raise DocumentStatusFailure(resp.content, resp.status_code) if resp.status_code == 200: as_json = json.loads(resp.content) if as_json["status"] == "completed": as_json["download_key"] = _get_download_key(as_json["download_url"]) return as_json return resp
['def', 'status', '(', 'self', ',', 'status_id', ',', 'raise_exception_on_failure', '=', 'False', ')', ':', 'query', '=', '{', '"output"', ':', '"json"', ',', '"user_credentials"', ':', 'self', '.', 'api_key', '}', 'resp', '=', 'requests', '.', 'get', '(', '"%sstatus/%s"', '%', '(', 'self', '.', '_url', ',', 'status_id', ')', ',', 'params', '=', 'query', ',', 'timeout', '=', 'self', '.', '_timeout', ')', 'if', 'raise_exception_on_failure', 'and', 'resp', '.', 'status_code', '!=', '200', ':', 'raise', 'DocumentStatusFailure', '(', 'resp', '.', 'content', ',', 'resp', '.', 'status_code', ')', 'if', 'resp', '.', 'status_code', '==', '200', ':', 'as_json', '=', 'json', '.', 'loads', '(', 'resp', '.', 'content', ')', 'if', 'as_json', '[', '"status"', ']', '==', '"completed"', ':', 'as_json', '[', '"download_key"', ']', '=', '_get_download_key', '(', 'as_json', '[', '"download_url"', ']', ')', 'return', 'as_json', 'return', 'resp']
Return the status of the generation job.
['Return', 'the', 'status', 'of', 'the', 'generation', 'job', '.']
train
https://github.com/jkeyes/python-docraptor/blob/4be5b641f92820539b2c42165fec9251a6603dea/docraptor/__init__.py#L124-L140
2,643
wandb/client
wandb/vendor/prompt_toolkit/layout/containers.py
VSplit._divide_widths
def _divide_widths(self, cli, width): """ Return the widths for all columns. Or None when there is not enough space. """ if not self.children: return [] # Calculate widths. given_dimensions = self.get_dimensions(cli) if self.get_dimensions else None def get_dimension_for_child(c, index): if given_dimensions and given_dimensions[index] is not None: return given_dimensions[index] else: return c.preferred_width(cli, width) dimensions = [get_dimension_for_child(c, index) for index, c in enumerate(self.children)] # Sum dimensions sum_dimensions = sum_layout_dimensions(dimensions) # If there is not enough space for both. # Don't do anything. if sum_dimensions.min > width: return # Find optimal sizes. (Start with minimal size, increase until we cover # the whole height.) sizes = [d.min for d in dimensions] child_generator = take_using_weights( items=list(range(len(dimensions))), weights=[d.weight for d in dimensions]) i = next(child_generator) while sum(sizes) < min(width, sum_dimensions.preferred): # Increase until we meet at least the 'preferred' size. if sizes[i] < dimensions[i].preferred: sizes[i] += 1 i = next(child_generator) while sum(sizes) < min(width, sum_dimensions.max): # Increase until we use all the available space. if sizes[i] < dimensions[i].max: sizes[i] += 1 i = next(child_generator) return sizes
python
def _divide_widths(self, cli, width): """ Return the widths for all columns. Or None when there is not enough space. """ if not self.children: return [] # Calculate widths. given_dimensions = self.get_dimensions(cli) if self.get_dimensions else None def get_dimension_for_child(c, index): if given_dimensions and given_dimensions[index] is not None: return given_dimensions[index] else: return c.preferred_width(cli, width) dimensions = [get_dimension_for_child(c, index) for index, c in enumerate(self.children)] # Sum dimensions sum_dimensions = sum_layout_dimensions(dimensions) # If there is not enough space for both. # Don't do anything. if sum_dimensions.min > width: return # Find optimal sizes. (Start with minimal size, increase until we cover # the whole height.) sizes = [d.min for d in dimensions] child_generator = take_using_weights( items=list(range(len(dimensions))), weights=[d.weight for d in dimensions]) i = next(child_generator) while sum(sizes) < min(width, sum_dimensions.preferred): # Increase until we meet at least the 'preferred' size. if sizes[i] < dimensions[i].preferred: sizes[i] += 1 i = next(child_generator) while sum(sizes) < min(width, sum_dimensions.max): # Increase until we use all the available space. if sizes[i] < dimensions[i].max: sizes[i] += 1 i = next(child_generator) return sizes
['def', '_divide_widths', '(', 'self', ',', 'cli', ',', 'width', ')', ':', 'if', 'not', 'self', '.', 'children', ':', 'return', '[', ']', '# Calculate widths.', 'given_dimensions', '=', 'self', '.', 'get_dimensions', '(', 'cli', ')', 'if', 'self', '.', 'get_dimensions', 'else', 'None', 'def', 'get_dimension_for_child', '(', 'c', ',', 'index', ')', ':', 'if', 'given_dimensions', 'and', 'given_dimensions', '[', 'index', ']', 'is', 'not', 'None', ':', 'return', 'given_dimensions', '[', 'index', ']', 'else', ':', 'return', 'c', '.', 'preferred_width', '(', 'cli', ',', 'width', ')', 'dimensions', '=', '[', 'get_dimension_for_child', '(', 'c', ',', 'index', ')', 'for', 'index', ',', 'c', 'in', 'enumerate', '(', 'self', '.', 'children', ')', ']', '# Sum dimensions', 'sum_dimensions', '=', 'sum_layout_dimensions', '(', 'dimensions', ')', '# If there is not enough space for both.', "# Don't do anything.", 'if', 'sum_dimensions', '.', 'min', '>', 'width', ':', 'return', '# Find optimal sizes. (Start with minimal size, increase until we cover', '# the whole height.)', 'sizes', '=', '[', 'd', '.', 'min', 'for', 'd', 'in', 'dimensions', ']', 'child_generator', '=', 'take_using_weights', '(', 'items', '=', 'list', '(', 'range', '(', 'len', '(', 'dimensions', ')', ')', ')', ',', 'weights', '=', '[', 'd', '.', 'weight', 'for', 'd', 'in', 'dimensions', ']', ')', 'i', '=', 'next', '(', 'child_generator', ')', 'while', 'sum', '(', 'sizes', ')', '<', 'min', '(', 'width', ',', 'sum_dimensions', '.', 'preferred', ')', ':', "# Increase until we meet at least the 'preferred' size.", 'if', 'sizes', '[', 'i', ']', '<', 'dimensions', '[', 'i', ']', '.', 'preferred', ':', 'sizes', '[', 'i', ']', '+=', '1', 'i', '=', 'next', '(', 'child_generator', ')', 'while', 'sum', '(', 'sizes', ')', '<', 'min', '(', 'width', ',', 'sum_dimensions', '.', 'max', ')', ':', '# Increase until we use all the available space.', 'if', 'sizes', '[', 'i', ']', '<', 'dimensions', '[', 'i', ']', '.', 'max', ':', 'sizes', '[', 'i', ']', '+=', '1', 'i', '=', 'next', '(', 'child_generator', ')', 'return', 'sizes']
Return the widths for all columns. Or None when there is not enough space.
['Return', 'the', 'widths', 'for', 'all', 'columns', '.', 'Or', 'None', 'when', 'there', 'is', 'not', 'enough', 'space', '.']
train
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/containers.py#L266-L315
2,644
IAMconsortium/pyam
pyam/core.py
IamDataFrame.check_aggregate
def check_aggregate(self, variable, components=None, exclude_on_fail=False, multiplier=1, **kwargs): """Check whether a timeseries matches the aggregation of its components Parameters ---------- variable: str variable to be checked for matching aggregation of sub-categories components: list of str, default None list of variables, defaults to all sub-categories of `variable` exclude_on_fail: boolean, default False flag scenarios failing validation as `exclude: True` multiplier: number, default 1 factor when comparing variable and sum of components kwargs: passed to `np.isclose()` """ # compute aggregate from components, return None if no components df_components = self.aggregate(variable, components) if df_components is None: return # filter and groupby data, use `pd.Series.align` for matching index rows = self._apply_filters(variable=variable) df_variable, df_components = ( _aggregate(self.data[rows], 'variable').align(df_components) ) # use `np.isclose` for checking match diff = df_variable[~np.isclose(df_variable, multiplier * df_components, **kwargs)] if len(diff): msg = '`{}` - {} of {} rows are not aggregates of components' logger().info(msg.format(variable, len(diff), len(df_variable))) if exclude_on_fail: self._exclude_on_fail(diff.index.droplevel([2, 3, 4])) return IamDataFrame(diff, variable=variable).timeseries()
python
def check_aggregate(self, variable, components=None, exclude_on_fail=False, multiplier=1, **kwargs): """Check whether a timeseries matches the aggregation of its components Parameters ---------- variable: str variable to be checked for matching aggregation of sub-categories components: list of str, default None list of variables, defaults to all sub-categories of `variable` exclude_on_fail: boolean, default False flag scenarios failing validation as `exclude: True` multiplier: number, default 1 factor when comparing variable and sum of components kwargs: passed to `np.isclose()` """ # compute aggregate from components, return None if no components df_components = self.aggregate(variable, components) if df_components is None: return # filter and groupby data, use `pd.Series.align` for matching index rows = self._apply_filters(variable=variable) df_variable, df_components = ( _aggregate(self.data[rows], 'variable').align(df_components) ) # use `np.isclose` for checking match diff = df_variable[~np.isclose(df_variable, multiplier * df_components, **kwargs)] if len(diff): msg = '`{}` - {} of {} rows are not aggregates of components' logger().info(msg.format(variable, len(diff), len(df_variable))) if exclude_on_fail: self._exclude_on_fail(diff.index.droplevel([2, 3, 4])) return IamDataFrame(diff, variable=variable).timeseries()
['def', 'check_aggregate', '(', 'self', ',', 'variable', ',', 'components', '=', 'None', ',', 'exclude_on_fail', '=', 'False', ',', 'multiplier', '=', '1', ',', '*', '*', 'kwargs', ')', ':', '# compute aggregate from components, return None if no components', 'df_components', '=', 'self', '.', 'aggregate', '(', 'variable', ',', 'components', ')', 'if', 'df_components', 'is', 'None', ':', 'return', '# filter and groupby data, use `pd.Series.align` for matching index', 'rows', '=', 'self', '.', '_apply_filters', '(', 'variable', '=', 'variable', ')', 'df_variable', ',', 'df_components', '=', '(', '_aggregate', '(', 'self', '.', 'data', '[', 'rows', ']', ',', "'variable'", ')', '.', 'align', '(', 'df_components', ')', ')', '# use `np.isclose` for checking match', 'diff', '=', 'df_variable', '[', '~', 'np', '.', 'isclose', '(', 'df_variable', ',', 'multiplier', '*', 'df_components', ',', '*', '*', 'kwargs', ')', ']', 'if', 'len', '(', 'diff', ')', ':', 'msg', '=', "'`{}` - {} of {} rows are not aggregates of components'", 'logger', '(', ')', '.', 'info', '(', 'msg', '.', 'format', '(', 'variable', ',', 'len', '(', 'diff', ')', ',', 'len', '(', 'df_variable', ')', ')', ')', 'if', 'exclude_on_fail', ':', 'self', '.', '_exclude_on_fail', '(', 'diff', '.', 'index', '.', 'droplevel', '(', '[', '2', ',', '3', ',', '4', ']', ')', ')', 'return', 'IamDataFrame', '(', 'diff', ',', 'variable', '=', 'variable', ')', '.', 'timeseries', '(', ')']
Check whether a timeseries matches the aggregation of its components Parameters ---------- variable: str variable to be checked for matching aggregation of sub-categories components: list of str, default None list of variables, defaults to all sub-categories of `variable` exclude_on_fail: boolean, default False flag scenarios failing validation as `exclude: True` multiplier: number, default 1 factor when comparing variable and sum of components kwargs: passed to `np.isclose()`
['Check', 'whether', 'a', 'timeseries', 'matches', 'the', 'aggregation', 'of', 'its', 'components']
train
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L709-L747
2,645
briancappello/flask-unchained
flask_unchained/bundles/security/services/security_service.py
SecurityService.send_mail
def send_mail(self, subject, to, template, **template_ctx): """ Utility method to send mail with the `mail` template context. """ if not self.mail: from warnings import warn warn('Attempting to send mail without the mail bundle installed! ' 'Please install it, or fix your configuration.') return self.mail.send(subject, to, template, **dict( **self.security.run_ctx_processor('mail'), **template_ctx))
python
def send_mail(self, subject, to, template, **template_ctx): """ Utility method to send mail with the `mail` template context. """ if not self.mail: from warnings import warn warn('Attempting to send mail without the mail bundle installed! ' 'Please install it, or fix your configuration.') return self.mail.send(subject, to, template, **dict( **self.security.run_ctx_processor('mail'), **template_ctx))
['def', 'send_mail', '(', 'self', ',', 'subject', ',', 'to', ',', 'template', ',', '*', '*', 'template_ctx', ')', ':', 'if', 'not', 'self', '.', 'mail', ':', 'from', 'warnings', 'import', 'warn', 'warn', '(', "'Attempting to send mail without the mail bundle installed! '", "'Please install it, or fix your configuration.'", ')', 'return', 'self', '.', 'mail', '.', 'send', '(', 'subject', ',', 'to', ',', 'template', ',', '*', '*', 'dict', '(', '*', '*', 'self', '.', 'security', '.', 'run_ctx_processor', '(', "'mail'", ')', ',', '*', '*', 'template_ctx', ')', ')']
Utility method to send mail with the `mail` template context.
['Utility', 'method', 'to', 'send', 'mail', 'with', 'the', 'mail', 'template', 'context', '.']
train
https://github.com/briancappello/flask-unchained/blob/4d536cb90e2cc4829c1c05f2c74d3e22901a1399/flask_unchained/bundles/security/services/security_service.py#L265-L277
2,646
mardix/pylot
pylot/component/views.py
error_view
def error_view(template_dir=None): """ Create the Error view Must be instantiated import error_view ErrorView = error_view() :param template_dir: The directory containing the view pages :return: """ if not template_dir: template_dir = "Pylot/Error" template_page = "%s/index.html" % template_dir class Error(Pylot): """ Error Views """ @classmethod def register(cls, app, **kwargs): super(cls, cls).register(app, **kwargs) @app.errorhandler(400) def error_400(error): return cls.index(error, 400) @app.errorhandler(401) def error_401(error): return cls.index(error, 401) @app.errorhandler(403) def error_403(error): return cls.index(error, 403) @app.errorhandler(404) def error_404(error): return cls.index(error, 404) @app.errorhandler(500) def error_500(error): return cls.index(error, 500) @app.errorhandler(503) def error_503(error): return cls.index(error, 503) @classmethod def index(cls, error, code): cls.meta_(title="Error %s" % code) return cls.render(error=error, view_template=template_page), code return Error
python
def error_view(template_dir=None): """ Create the Error view Must be instantiated import error_view ErrorView = error_view() :param template_dir: The directory containing the view pages :return: """ if not template_dir: template_dir = "Pylot/Error" template_page = "%s/index.html" % template_dir class Error(Pylot): """ Error Views """ @classmethod def register(cls, app, **kwargs): super(cls, cls).register(app, **kwargs) @app.errorhandler(400) def error_400(error): return cls.index(error, 400) @app.errorhandler(401) def error_401(error): return cls.index(error, 401) @app.errorhandler(403) def error_403(error): return cls.index(error, 403) @app.errorhandler(404) def error_404(error): return cls.index(error, 404) @app.errorhandler(500) def error_500(error): return cls.index(error, 500) @app.errorhandler(503) def error_503(error): return cls.index(error, 503) @classmethod def index(cls, error, code): cls.meta_(title="Error %s" % code) return cls.render(error=error, view_template=template_page), code return Error
['def', 'error_view', '(', 'template_dir', '=', 'None', ')', ':', 'if', 'not', 'template_dir', ':', 'template_dir', '=', '"Pylot/Error"', 'template_page', '=', '"%s/index.html"', '%', 'template_dir', 'class', 'Error', '(', 'Pylot', ')', ':', '"""\n Error Views\n """', '@', 'classmethod', 'def', 'register', '(', 'cls', ',', 'app', ',', '*', '*', 'kwargs', ')', ':', 'super', '(', 'cls', ',', 'cls', ')', '.', 'register', '(', 'app', ',', '*', '*', 'kwargs', ')', '@', 'app', '.', 'errorhandler', '(', '400', ')', 'def', 'error_400', '(', 'error', ')', ':', 'return', 'cls', '.', 'index', '(', 'error', ',', '400', ')', '@', 'app', '.', 'errorhandler', '(', '401', ')', 'def', 'error_401', '(', 'error', ')', ':', 'return', 'cls', '.', 'index', '(', 'error', ',', '401', ')', '@', 'app', '.', 'errorhandler', '(', '403', ')', 'def', 'error_403', '(', 'error', ')', ':', 'return', 'cls', '.', 'index', '(', 'error', ',', '403', ')', '@', 'app', '.', 'errorhandler', '(', '404', ')', 'def', 'error_404', '(', 'error', ')', ':', 'return', 'cls', '.', 'index', '(', 'error', ',', '404', ')', '@', 'app', '.', 'errorhandler', '(', '500', ')', 'def', 'error_500', '(', 'error', ')', ':', 'return', 'cls', '.', 'index', '(', 'error', ',', '500', ')', '@', 'app', '.', 'errorhandler', '(', '503', ')', 'def', 'error_503', '(', 'error', ')', ':', 'return', 'cls', '.', 'index', '(', 'error', ',', '503', ')', '@', 'classmethod', 'def', 'index', '(', 'cls', ',', 'error', ',', 'code', ')', ':', 'cls', '.', 'meta_', '(', 'title', '=', '"Error %s"', '%', 'code', ')', 'return', 'cls', '.', 'render', '(', 'error', '=', 'error', ',', 'view_template', '=', 'template_page', ')', ',', 'code', 'return', 'Error']
Create the Error view Must be instantiated import error_view ErrorView = error_view() :param template_dir: The directory containing the view pages :return:
['Create', 'the', 'Error', 'view', 'Must', 'be', 'instantiated']
train
https://github.com/mardix/pylot/blob/506a33a56ebdfc0925b94015e8cf98ccb16a143c/pylot/component/views.py#L1680-L1733
2,647
assamite/creamas
creamas/image.py
fractal_dimension
def fractal_dimension(image): '''Estimates the fractal dimension of an image with box counting. Counts pixels with value 0 as empty and everything else as non-empty. Input image has to be grayscale. See, e.g `Wikipedia <https://en.wikipedia.org/wiki/Fractal_dimension>`_. :param image: numpy.ndarray :returns: estimation of fractal dimension :rtype: float ''' pixels = [] for i in range(image.shape[0]): for j in range(image.shape[1]): if image[i, j] > 0: pixels.append((i, j)) lx = image.shape[1] ly = image.shape[0] pixels = np.array(pixels) if len(pixels) < 2: return 0 scales = np.logspace(1, 4, num=20, endpoint=False, base=2) Ns = [] for scale in scales: H, edges = np.histogramdd(pixels, bins=(np.arange(0, lx, scale), np.arange(0, ly, scale))) H_sum = np.sum(H > 0) if H_sum == 0: H_sum = 1 Ns.append(H_sum) coeffs = np.polyfit(np.log(scales), np.log(Ns), 1) hausdorff_dim = -coeffs[0] return hausdorff_dim
python
def fractal_dimension(image): '''Estimates the fractal dimension of an image with box counting. Counts pixels with value 0 as empty and everything else as non-empty. Input image has to be grayscale. See, e.g `Wikipedia <https://en.wikipedia.org/wiki/Fractal_dimension>`_. :param image: numpy.ndarray :returns: estimation of fractal dimension :rtype: float ''' pixels = [] for i in range(image.shape[0]): for j in range(image.shape[1]): if image[i, j] > 0: pixels.append((i, j)) lx = image.shape[1] ly = image.shape[0] pixels = np.array(pixels) if len(pixels) < 2: return 0 scales = np.logspace(1, 4, num=20, endpoint=False, base=2) Ns = [] for scale in scales: H, edges = np.histogramdd(pixels, bins=(np.arange(0, lx, scale), np.arange(0, ly, scale))) H_sum = np.sum(H > 0) if H_sum == 0: H_sum = 1 Ns.append(H_sum) coeffs = np.polyfit(np.log(scales), np.log(Ns), 1) hausdorff_dim = -coeffs[0] return hausdorff_dim
['def', 'fractal_dimension', '(', 'image', ')', ':', 'pixels', '=', '[', ']', 'for', 'i', 'in', 'range', '(', 'image', '.', 'shape', '[', '0', ']', ')', ':', 'for', 'j', 'in', 'range', '(', 'image', '.', 'shape', '[', '1', ']', ')', ':', 'if', 'image', '[', 'i', ',', 'j', ']', '>', '0', ':', 'pixels', '.', 'append', '(', '(', 'i', ',', 'j', ')', ')', 'lx', '=', 'image', '.', 'shape', '[', '1', ']', 'ly', '=', 'image', '.', 'shape', '[', '0', ']', 'pixels', '=', 'np', '.', 'array', '(', 'pixels', ')', 'if', 'len', '(', 'pixels', ')', '<', '2', ':', 'return', '0', 'scales', '=', 'np', '.', 'logspace', '(', '1', ',', '4', ',', 'num', '=', '20', ',', 'endpoint', '=', 'False', ',', 'base', '=', '2', ')', 'Ns', '=', '[', ']', 'for', 'scale', 'in', 'scales', ':', 'H', ',', 'edges', '=', 'np', '.', 'histogramdd', '(', 'pixels', ',', 'bins', '=', '(', 'np', '.', 'arange', '(', '0', ',', 'lx', ',', 'scale', ')', ',', 'np', '.', 'arange', '(', '0', ',', 'ly', ',', 'scale', ')', ')', ')', 'H_sum', '=', 'np', '.', 'sum', '(', 'H', '>', '0', ')', 'if', 'H_sum', '==', '0', ':', 'H_sum', '=', '1', 'Ns', '.', 'append', '(', 'H_sum', ')', 'coeffs', '=', 'np', '.', 'polyfit', '(', 'np', '.', 'log', '(', 'scales', ')', ',', 'np', '.', 'log', '(', 'Ns', ')', ',', '1', ')', 'hausdorff_dim', '=', '-', 'coeffs', '[', '0', ']', 'return', 'hausdorff_dim']
Estimates the fractal dimension of an image with box counting. Counts pixels with value 0 as empty and everything else as non-empty. Input image has to be grayscale. See, e.g `Wikipedia <https://en.wikipedia.org/wiki/Fractal_dimension>`_. :param image: numpy.ndarray :returns: estimation of fractal dimension :rtype: float
['Estimates', 'the', 'fractal', 'dimension', 'of', 'an', 'image', 'with', 'box', 'counting', '.', 'Counts', 'pixels', 'with', 'value', '0', 'as', 'empty', 'and', 'everything', 'else', 'as', 'non', '-', 'empty', '.', 'Input', 'image', 'has', 'to', 'be', 'grayscale', '.']
train
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/image.py#L11-L46
2,648
googleapis/google-cloud-python
api_core/google/api_core/page_iterator.py
HTTPIterator._verify_params
def _verify_params(self): """Verifies the parameters don't use any reserved parameter. Raises: ValueError: If a reserved parameter is used. """ reserved_in_use = self._RESERVED_PARAMS.intersection(self.extra_params) if reserved_in_use: raise ValueError("Using a reserved parameter", reserved_in_use)
python
def _verify_params(self): """Verifies the parameters don't use any reserved parameter. Raises: ValueError: If a reserved parameter is used. """ reserved_in_use = self._RESERVED_PARAMS.intersection(self.extra_params) if reserved_in_use: raise ValueError("Using a reserved parameter", reserved_in_use)
['def', '_verify_params', '(', 'self', ')', ':', 'reserved_in_use', '=', 'self', '.', '_RESERVED_PARAMS', '.', 'intersection', '(', 'self', '.', 'extra_params', ')', 'if', 'reserved_in_use', ':', 'raise', 'ValueError', '(', '"Using a reserved parameter"', ',', 'reserved_in_use', ')']
Verifies the parameters don't use any reserved parameter. Raises: ValueError: If a reserved parameter is used.
['Verifies', 'the', 'parameters', 'don', 't', 'use', 'any', 'reserved', 'parameter', '.']
train
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/api_core/google/api_core/page_iterator.py#L343-L351
2,649
pytorch/text
torchtext/datasets/sst.py
SST.iters
def iters(cls, batch_size=32, device=0, root='.data', vectors=None, **kwargs): """Create iterator objects for splits of the SST dataset. Arguments: batch_size: Batch_size device: Device to create batches on. Use - 1 for CPU and None for the currently active GPU device. root: The root directory that the dataset's zip archive will be expanded into; therefore the directory in whose trees subdirectory the data files will be stored. vectors: one of the available pretrained vectors or a list with each element one of the available pretrained vectors (see Vocab.load_vectors) Remaining keyword arguments: Passed to the splits method. """ TEXT = data.Field() LABEL = data.Field(sequential=False) train, val, test = cls.splits(TEXT, LABEL, root=root, **kwargs) TEXT.build_vocab(train, vectors=vectors) LABEL.build_vocab(train) return data.BucketIterator.splits( (train, val, test), batch_size=batch_size, device=device)
python
def iters(cls, batch_size=32, device=0, root='.data', vectors=None, **kwargs): """Create iterator objects for splits of the SST dataset. Arguments: batch_size: Batch_size device: Device to create batches on. Use - 1 for CPU and None for the currently active GPU device. root: The root directory that the dataset's zip archive will be expanded into; therefore the directory in whose trees subdirectory the data files will be stored. vectors: one of the available pretrained vectors or a list with each element one of the available pretrained vectors (see Vocab.load_vectors) Remaining keyword arguments: Passed to the splits method. """ TEXT = data.Field() LABEL = data.Field(sequential=False) train, val, test = cls.splits(TEXT, LABEL, root=root, **kwargs) TEXT.build_vocab(train, vectors=vectors) LABEL.build_vocab(train) return data.BucketIterator.splits( (train, val, test), batch_size=batch_size, device=device)
['def', 'iters', '(', 'cls', ',', 'batch_size', '=', '32', ',', 'device', '=', '0', ',', 'root', '=', "'.data'", ',', 'vectors', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'TEXT', '=', 'data', '.', 'Field', '(', ')', 'LABEL', '=', 'data', '.', 'Field', '(', 'sequential', '=', 'False', ')', 'train', ',', 'val', ',', 'test', '=', 'cls', '.', 'splits', '(', 'TEXT', ',', 'LABEL', ',', 'root', '=', 'root', ',', '*', '*', 'kwargs', ')', 'TEXT', '.', 'build_vocab', '(', 'train', ',', 'vectors', '=', 'vectors', ')', 'LABEL', '.', 'build_vocab', '(', 'train', ')', 'return', 'data', '.', 'BucketIterator', '.', 'splits', '(', '(', 'train', ',', 'val', ',', 'test', ')', ',', 'batch_size', '=', 'batch_size', ',', 'device', '=', 'device', ')']
Create iterator objects for splits of the SST dataset. Arguments: batch_size: Batch_size device: Device to create batches on. Use - 1 for CPU and None for the currently active GPU device. root: The root directory that the dataset's zip archive will be expanded into; therefore the directory in whose trees subdirectory the data files will be stored. vectors: one of the available pretrained vectors or a list with each element one of the available pretrained vectors (see Vocab.load_vectors) Remaining keyword arguments: Passed to the splits method.
['Create', 'iterator', 'objects', 'for', 'splits', 'of', 'the', 'SST', 'dataset', '.']
train
https://github.com/pytorch/text/blob/26bfce6869dc704f1d86792f9a681d453d7e7bb8/torchtext/datasets/sst.py#L81-L104
2,650
SatelliteQE/nailgun
nailgun/entity_mixins.py
EntityReadMixin.read_raw
def read_raw(self, params=None): """Get information about the current entity. Make an HTTP GET call to ``self.path('self')``. Return the response. :return: A ``requests.response`` object. """ path_type = self._meta.get('read_type', 'self') return client.get( self.path(path_type), params=params, **self._server_config.get_client_kwargs() )
python
def read_raw(self, params=None): """Get information about the current entity. Make an HTTP GET call to ``self.path('self')``. Return the response. :return: A ``requests.response`` object. """ path_type = self._meta.get('read_type', 'self') return client.get( self.path(path_type), params=params, **self._server_config.get_client_kwargs() )
['def', 'read_raw', '(', 'self', ',', 'params', '=', 'None', ')', ':', 'path_type', '=', 'self', '.', '_meta', '.', 'get', '(', "'read_type'", ',', "'self'", ')', 'return', 'client', '.', 'get', '(', 'self', '.', 'path', '(', 'path_type', ')', ',', 'params', '=', 'params', ',', '*', '*', 'self', '.', '_server_config', '.', 'get_client_kwargs', '(', ')', ')']
Get information about the current entity. Make an HTTP GET call to ``self.path('self')``. Return the response. :return: A ``requests.response`` object.
['Get', 'information', 'about', 'the', 'current', 'entity', '.']
train
https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/entity_mixins.py#L719-L733
2,651
gem/oq-engine
openquake/baselib/hdf5.py
File.save_vlen
def save_vlen(self, key, data): """ Save a sequence of variable-length arrays :param key: name of the dataset :param data: data to store as a list of arrays """ shape = (None,) + data[0].shape[:-1] try: dset = self[key] except KeyError: vdt = h5py.special_dtype(vlen=data[0].dtype) dset = create(self, key, vdt, shape, fillvalue=None) nbytes = dset.attrs.get('nbytes', 0) totlen = dset.attrs.get('totlen', 0) for i, val in enumerate(data): nbytes += val.nbytes totlen += len(val) length = len(dset) dset.resize((length + len(data),) + shape[1:]) for i, arr in enumerate(data): dset[length + i] = arr dset.attrs['nbytes'] = nbytes dset.attrs['totlen'] = totlen
python
def save_vlen(self, key, data): """ Save a sequence of variable-length arrays :param key: name of the dataset :param data: data to store as a list of arrays """ shape = (None,) + data[0].shape[:-1] try: dset = self[key] except KeyError: vdt = h5py.special_dtype(vlen=data[0].dtype) dset = create(self, key, vdt, shape, fillvalue=None) nbytes = dset.attrs.get('nbytes', 0) totlen = dset.attrs.get('totlen', 0) for i, val in enumerate(data): nbytes += val.nbytes totlen += len(val) length = len(dset) dset.resize((length + len(data),) + shape[1:]) for i, arr in enumerate(data): dset[length + i] = arr dset.attrs['nbytes'] = nbytes dset.attrs['totlen'] = totlen
['def', 'save_vlen', '(', 'self', ',', 'key', ',', 'data', ')', ':', 'shape', '=', '(', 'None', ',', ')', '+', 'data', '[', '0', ']', '.', 'shape', '[', ':', '-', '1', ']', 'try', ':', 'dset', '=', 'self', '[', 'key', ']', 'except', 'KeyError', ':', 'vdt', '=', 'h5py', '.', 'special_dtype', '(', 'vlen', '=', 'data', '[', '0', ']', '.', 'dtype', ')', 'dset', '=', 'create', '(', 'self', ',', 'key', ',', 'vdt', ',', 'shape', ',', 'fillvalue', '=', 'None', ')', 'nbytes', '=', 'dset', '.', 'attrs', '.', 'get', '(', "'nbytes'", ',', '0', ')', 'totlen', '=', 'dset', '.', 'attrs', '.', 'get', '(', "'totlen'", ',', '0', ')', 'for', 'i', ',', 'val', 'in', 'enumerate', '(', 'data', ')', ':', 'nbytes', '+=', 'val', '.', 'nbytes', 'totlen', '+=', 'len', '(', 'val', ')', 'length', '=', 'len', '(', 'dset', ')', 'dset', '.', 'resize', '(', '(', 'length', '+', 'len', '(', 'data', ')', ',', ')', '+', 'shape', '[', '1', ':', ']', ')', 'for', 'i', ',', 'arr', 'in', 'enumerate', '(', 'data', ')', ':', 'dset', '[', 'length', '+', 'i', ']', '=', 'arr', 'dset', '.', 'attrs', '[', "'nbytes'", ']', '=', 'nbytes', 'dset', '.', 'attrs', '[', "'totlen'", ']', '=', 'totlen']
Save a sequence of variable-length arrays :param key: name of the dataset :param data: data to store as a list of arrays
['Save', 'a', 'sequence', 'of', 'variable', '-', 'length', 'arrays']
train
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/baselib/hdf5.py#L292-L315
2,652
galaxy-genome-annotation/python-apollo
arrow/commands/annotations/set_boundaries.py
cli
def cli(ctx, feature_id, start, end, organism="", sequence=""): """Set the boundaries of a genomic feature Output: A standard apollo feature dictionary ({"features": [{...}]}) """ return ctx.gi.annotations.set_boundaries(feature_id, start, end, organism=organism, sequence=sequence)
python
def cli(ctx, feature_id, start, end, organism="", sequence=""): """Set the boundaries of a genomic feature Output: A standard apollo feature dictionary ({"features": [{...}]}) """ return ctx.gi.annotations.set_boundaries(feature_id, start, end, organism=organism, sequence=sequence)
['def', 'cli', '(', 'ctx', ',', 'feature_id', ',', 'start', ',', 'end', ',', 'organism', '=', '""', ',', 'sequence', '=', '""', ')', ':', 'return', 'ctx', '.', 'gi', '.', 'annotations', '.', 'set_boundaries', '(', 'feature_id', ',', 'start', ',', 'end', ',', 'organism', '=', 'organism', ',', 'sequence', '=', 'sequence', ')']
Set the boundaries of a genomic feature Output: A standard apollo feature dictionary ({"features": [{...}]})
['Set', 'the', 'boundaries', 'of', 'a', 'genomic', 'feature']
train
https://github.com/galaxy-genome-annotation/python-apollo/blob/2bc9991302abe4402ec2885dcaac35915475b387/arrow/commands/annotations/set_boundaries.py#L23-L30
2,653
blockcypher/blockcypher-python
blockcypher/utils.py
get_txn_outputs
def get_txn_outputs(raw_tx_hex, output_addr_list, coin_symbol): ''' Used to verify a transaction hex does what's expected of it. Must supply a list of output addresses so that the library can try to convert from script to address using both pubkey and script. Returns a list of the following form: [{'value': 12345, 'address': '1abc...'}, ...] Uses @vbuterin's decoding methods. ''' # Defensive checks: err_msg = 'Library not able to parse %s transactions' % coin_symbol assert lib_can_deserialize_cs(coin_symbol), err_msg assert isinstance(output_addr_list, (list, tuple)) for output_addr in output_addr_list: assert is_valid_address(output_addr), output_addr output_addr_set = set(output_addr_list) # speed optimization outputs = [] deserialized_tx = deserialize(str(raw_tx_hex)) for out in deserialized_tx.get('outs', []): output = {'value': out['value']} # determine if the address is a pubkey address, script address, or op_return pubkey_addr = script_to_address(out['script'], vbyte=COIN_SYMBOL_MAPPINGS[coin_symbol]['vbyte_pubkey']) script_addr = script_to_address(out['script'], vbyte=COIN_SYMBOL_MAPPINGS[coin_symbol]['vbyte_script']) nulldata = out['script'] if out['script'][0:2] == '6a' else None if pubkey_addr in output_addr_set: address = pubkey_addr output['address'] = address elif script_addr in output_addr_set: address = script_addr output['address'] = address elif nulldata: output['script'] = nulldata output['script_type'] = 'null-data' else: raise Exception('Script %s Does Not Contain a Valid Output Address: %s' % ( out['script'], output_addr_set, )) outputs.append(output) return outputs
python
def get_txn_outputs(raw_tx_hex, output_addr_list, coin_symbol): ''' Used to verify a transaction hex does what's expected of it. Must supply a list of output addresses so that the library can try to convert from script to address using both pubkey and script. Returns a list of the following form: [{'value': 12345, 'address': '1abc...'}, ...] Uses @vbuterin's decoding methods. ''' # Defensive checks: err_msg = 'Library not able to parse %s transactions' % coin_symbol assert lib_can_deserialize_cs(coin_symbol), err_msg assert isinstance(output_addr_list, (list, tuple)) for output_addr in output_addr_list: assert is_valid_address(output_addr), output_addr output_addr_set = set(output_addr_list) # speed optimization outputs = [] deserialized_tx = deserialize(str(raw_tx_hex)) for out in deserialized_tx.get('outs', []): output = {'value': out['value']} # determine if the address is a pubkey address, script address, or op_return pubkey_addr = script_to_address(out['script'], vbyte=COIN_SYMBOL_MAPPINGS[coin_symbol]['vbyte_pubkey']) script_addr = script_to_address(out['script'], vbyte=COIN_SYMBOL_MAPPINGS[coin_symbol]['vbyte_script']) nulldata = out['script'] if out['script'][0:2] == '6a' else None if pubkey_addr in output_addr_set: address = pubkey_addr output['address'] = address elif script_addr in output_addr_set: address = script_addr output['address'] = address elif nulldata: output['script'] = nulldata output['script_type'] = 'null-data' else: raise Exception('Script %s Does Not Contain a Valid Output Address: %s' % ( out['script'], output_addr_set, )) outputs.append(output) return outputs
['def', 'get_txn_outputs', '(', 'raw_tx_hex', ',', 'output_addr_list', ',', 'coin_symbol', ')', ':', '# Defensive checks:', 'err_msg', '=', "'Library not able to parse %s transactions'", '%', 'coin_symbol', 'assert', 'lib_can_deserialize_cs', '(', 'coin_symbol', ')', ',', 'err_msg', 'assert', 'isinstance', '(', 'output_addr_list', ',', '(', 'list', ',', 'tuple', ')', ')', 'for', 'output_addr', 'in', 'output_addr_list', ':', 'assert', 'is_valid_address', '(', 'output_addr', ')', ',', 'output_addr', 'output_addr_set', '=', 'set', '(', 'output_addr_list', ')', '# speed optimization', 'outputs', '=', '[', ']', 'deserialized_tx', '=', 'deserialize', '(', 'str', '(', 'raw_tx_hex', ')', ')', 'for', 'out', 'in', 'deserialized_tx', '.', 'get', '(', "'outs'", ',', '[', ']', ')', ':', 'output', '=', '{', "'value'", ':', 'out', '[', "'value'", ']', '}', '# determine if the address is a pubkey address, script address, or op_return', 'pubkey_addr', '=', 'script_to_address', '(', 'out', '[', "'script'", ']', ',', 'vbyte', '=', 'COIN_SYMBOL_MAPPINGS', '[', 'coin_symbol', ']', '[', "'vbyte_pubkey'", ']', ')', 'script_addr', '=', 'script_to_address', '(', 'out', '[', "'script'", ']', ',', 'vbyte', '=', 'COIN_SYMBOL_MAPPINGS', '[', 'coin_symbol', ']', '[', "'vbyte_script'", ']', ')', 'nulldata', '=', 'out', '[', "'script'", ']', 'if', 'out', '[', "'script'", ']', '[', '0', ':', '2', ']', '==', "'6a'", 'else', 'None', 'if', 'pubkey_addr', 'in', 'output_addr_set', ':', 'address', '=', 'pubkey_addr', 'output', '[', "'address'", ']', '=', 'address', 'elif', 'script_addr', 'in', 'output_addr_set', ':', 'address', '=', 'script_addr', 'output', '[', "'address'", ']', '=', 'address', 'elif', 'nulldata', ':', 'output', '[', "'script'", ']', '=', 'nulldata', 'output', '[', "'script_type'", ']', '=', "'null-data'", 'else', ':', 'raise', 'Exception', '(', "'Script %s Does Not Contain a Valid Output Address: %s'", '%', '(', 'out', '[', "'script'", ']', ',', 'output_addr_set', ',', ')', ')', 'outputs', '.', 'append', '(', 'output', ')', 'return', 'outputs']
Used to verify a transaction hex does what's expected of it. Must supply a list of output addresses so that the library can try to convert from script to address using both pubkey and script. Returns a list of the following form: [{'value': 12345, 'address': '1abc...'}, ...] Uses @vbuterin's decoding methods.
['Used', 'to', 'verify', 'a', 'transaction', 'hex', 'does', 'what', 's', 'expected', 'of', 'it', '.']
train
https://github.com/blockcypher/blockcypher-python/blob/7601ea21916957ff279384fd699527ff9c28a56e/blockcypher/utils.py#L153-L201
2,654
log2timeline/plaso
plaso/engine/extractors.py
EventExtractor._GetSignatureMatchParserNames
def _GetSignatureMatchParserNames(self, file_object): """Determines if a file-like object matches one of the known signatures. Args: file_object (file): file-like object whose contents will be checked for known signatures. Returns: list[str]: parser names for which the contents of the file-like object matches their known signatures. """ parser_names = [] scan_state = pysigscan.scan_state() self._file_scanner.scan_file_object(scan_state, file_object) for scan_result in iter(scan_state.scan_results): format_specification = ( self._formats_with_signatures.GetSpecificationBySignature( scan_result.identifier)) if format_specification.identifier not in parser_names: parser_names.append(format_specification.identifier) return parser_names
python
def _GetSignatureMatchParserNames(self, file_object): """Determines if a file-like object matches one of the known signatures. Args: file_object (file): file-like object whose contents will be checked for known signatures. Returns: list[str]: parser names for which the contents of the file-like object matches their known signatures. """ parser_names = [] scan_state = pysigscan.scan_state() self._file_scanner.scan_file_object(scan_state, file_object) for scan_result in iter(scan_state.scan_results): format_specification = ( self._formats_with_signatures.GetSpecificationBySignature( scan_result.identifier)) if format_specification.identifier not in parser_names: parser_names.append(format_specification.identifier) return parser_names
['def', '_GetSignatureMatchParserNames', '(', 'self', ',', 'file_object', ')', ':', 'parser_names', '=', '[', ']', 'scan_state', '=', 'pysigscan', '.', 'scan_state', '(', ')', 'self', '.', '_file_scanner', '.', 'scan_file_object', '(', 'scan_state', ',', 'file_object', ')', 'for', 'scan_result', 'in', 'iter', '(', 'scan_state', '.', 'scan_results', ')', ':', 'format_specification', '=', '(', 'self', '.', '_formats_with_signatures', '.', 'GetSpecificationBySignature', '(', 'scan_result', '.', 'identifier', ')', ')', 'if', 'format_specification', '.', 'identifier', 'not', 'in', 'parser_names', ':', 'parser_names', '.', 'append', '(', 'format_specification', '.', 'identifier', ')', 'return', 'parser_names']
Determines if a file-like object matches one of the known signatures. Args: file_object (file): file-like object whose contents will be checked for known signatures. Returns: list[str]: parser names for which the contents of the file-like object matches their known signatures.
['Determines', 'if', 'a', 'file', '-', 'like', 'object', 'matches', 'one', 'of', 'the', 'known', 'signatures', '.']
train
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/engine/extractors.py#L80-L103
2,655
ethereum/web3.py
web3/_utils/decorators.py
deprecated_for
def deprecated_for(replace_message): """ Decorate a deprecated function, with info about what to use instead, like: @deprecated_for("toBytes()") def toAscii(arg): ... """ def decorator(to_wrap): @functools.wraps(to_wrap) def wrapper(*args, **kwargs): warnings.warn( "%s is deprecated in favor of %s" % (to_wrap.__name__, replace_message), category=DeprecationWarning, stacklevel=2) return to_wrap(*args, **kwargs) return wrapper return decorator
python
def deprecated_for(replace_message): """ Decorate a deprecated function, with info about what to use instead, like: @deprecated_for("toBytes()") def toAscii(arg): ... """ def decorator(to_wrap): @functools.wraps(to_wrap) def wrapper(*args, **kwargs): warnings.warn( "%s is deprecated in favor of %s" % (to_wrap.__name__, replace_message), category=DeprecationWarning, stacklevel=2) return to_wrap(*args, **kwargs) return wrapper return decorator
['def', 'deprecated_for', '(', 'replace_message', ')', ':', 'def', 'decorator', '(', 'to_wrap', ')', ':', '@', 'functools', '.', 'wraps', '(', 'to_wrap', ')', 'def', 'wrapper', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'warnings', '.', 'warn', '(', '"%s is deprecated in favor of %s"', '%', '(', 'to_wrap', '.', '__name__', ',', 'replace_message', ')', ',', 'category', '=', 'DeprecationWarning', ',', 'stacklevel', '=', '2', ')', 'return', 'to_wrap', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'wrapper', 'return', 'decorator']
Decorate a deprecated function, with info about what to use instead, like: @deprecated_for("toBytes()") def toAscii(arg): ...
['Decorate', 'a', 'deprecated', 'function', 'with', 'info', 'about', 'what', 'to', 'use', 'instead', 'like', ':']
train
https://github.com/ethereum/web3.py/blob/71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab/web3/_utils/decorators.py#L42-L59
2,656
kplindegaard/smbus2
smbus2/smbus2.py
SMBus.read_word_data
def read_word_data(self, i2c_addr, register, force=None): """ Read a single word (2 bytes) from a given register. :param i2c_addr: i2c address :type i2c_addr: int :param register: Register to read :type register: int :param force: :type force: Boolean :return: 2-byte word :rtype: int """ self._set_address(i2c_addr, force=force) msg = i2c_smbus_ioctl_data.create( read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_WORD_DATA ) ioctl(self.fd, I2C_SMBUS, msg) return msg.data.contents.word
python
def read_word_data(self, i2c_addr, register, force=None): """ Read a single word (2 bytes) from a given register. :param i2c_addr: i2c address :type i2c_addr: int :param register: Register to read :type register: int :param force: :type force: Boolean :return: 2-byte word :rtype: int """ self._set_address(i2c_addr, force=force) msg = i2c_smbus_ioctl_data.create( read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_WORD_DATA ) ioctl(self.fd, I2C_SMBUS, msg) return msg.data.contents.word
['def', 'read_word_data', '(', 'self', ',', 'i2c_addr', ',', 'register', ',', 'force', '=', 'None', ')', ':', 'self', '.', '_set_address', '(', 'i2c_addr', ',', 'force', '=', 'force', ')', 'msg', '=', 'i2c_smbus_ioctl_data', '.', 'create', '(', 'read_write', '=', 'I2C_SMBUS_READ', ',', 'command', '=', 'register', ',', 'size', '=', 'I2C_SMBUS_WORD_DATA', ')', 'ioctl', '(', 'self', '.', 'fd', ',', 'I2C_SMBUS', ',', 'msg', ')', 'return', 'msg', '.', 'data', '.', 'contents', '.', 'word']
Read a single word (2 bytes) from a given register. :param i2c_addr: i2c address :type i2c_addr: int :param register: Register to read :type register: int :param force: :type force: Boolean :return: 2-byte word :rtype: int
['Read', 'a', 'single', 'word', '(', '2', 'bytes', ')', 'from', 'a', 'given', 'register', '.']
train
https://github.com/kplindegaard/smbus2/blob/a1088a03438dba84c266b73ad61b0c06750d0961/smbus2/smbus2.py#L409-L427
2,657
andrewramsay/sk8-drivers
pysk8/calibration/sk8_calibration_gui.py
SK8Calibration.device_selected
def device_selected(self, index): """Handler for selecting a device from the list in the UI""" device = self.devicelist_model.itemFromIndex(index) print(device.device.addr) self.btnConnect.setEnabled(True)
python
def device_selected(self, index): """Handler for selecting a device from the list in the UI""" device = self.devicelist_model.itemFromIndex(index) print(device.device.addr) self.btnConnect.setEnabled(True)
['def', 'device_selected', '(', 'self', ',', 'index', ')', ':', 'device', '=', 'self', '.', 'devicelist_model', '.', 'itemFromIndex', '(', 'index', ')', 'print', '(', 'device', '.', 'device', '.', 'addr', ')', 'self', '.', 'btnConnect', '.', 'setEnabled', '(', 'True', ')']
Handler for selecting a device from the list in the UI
['Handler', 'for', 'selecting', 'a', 'device', 'from', 'the', 'list', 'in', 'the', 'UI']
train
https://github.com/andrewramsay/sk8-drivers/blob/67347a71762fb421f5ae65a595def5c7879e8b0c/pysk8/calibration/sk8_calibration_gui.py#L475-L479
2,658
HEPData/hepdata-converter
hepdata_converter/writers/array_writer.py
ArrayWriter.process_error_labels
def process_error_labels(value): """ Process the error labels of a dependent variable 'value' to ensure uniqueness. """ observed_error_labels = {} for error in value.get('errors', []): label = error.get('label', 'error') if label not in observed_error_labels: observed_error_labels[label] = 0 observed_error_labels[label] += 1 if observed_error_labels[label] > 1: error['label'] = label + '_' + str(observed_error_labels[label]) # append "_1" to first error label that has a duplicate if observed_error_labels[label] == 2: for error1 in value.get('errors', []): error1_label = error1.get('label', 'error') if error1_label == label: error1['label'] = label + "_1" break
python
def process_error_labels(value): """ Process the error labels of a dependent variable 'value' to ensure uniqueness. """ observed_error_labels = {} for error in value.get('errors', []): label = error.get('label', 'error') if label not in observed_error_labels: observed_error_labels[label] = 0 observed_error_labels[label] += 1 if observed_error_labels[label] > 1: error['label'] = label + '_' + str(observed_error_labels[label]) # append "_1" to first error label that has a duplicate if observed_error_labels[label] == 2: for error1 in value.get('errors', []): error1_label = error1.get('label', 'error') if error1_label == label: error1['label'] = label + "_1" break
['def', 'process_error_labels', '(', 'value', ')', ':', 'observed_error_labels', '=', '{', '}', 'for', 'error', 'in', 'value', '.', 'get', '(', "'errors'", ',', '[', ']', ')', ':', 'label', '=', 'error', '.', 'get', '(', "'label'", ',', "'error'", ')', 'if', 'label', 'not', 'in', 'observed_error_labels', ':', 'observed_error_labels', '[', 'label', ']', '=', '0', 'observed_error_labels', '[', 'label', ']', '+=', '1', 'if', 'observed_error_labels', '[', 'label', ']', '>', '1', ':', 'error', '[', "'label'", ']', '=', 'label', '+', "'_'", '+', 'str', '(', 'observed_error_labels', '[', 'label', ']', ')', '# append "_1" to first error label that has a duplicate', 'if', 'observed_error_labels', '[', 'label', ']', '==', '2', ':', 'for', 'error1', 'in', 'value', '.', 'get', '(', "'errors'", ',', '[', ']', ')', ':', 'error1_label', '=', 'error1', '.', 'get', '(', "'label'", ',', "'error'", ')', 'if', 'error1_label', '==', 'label', ':', 'error1', '[', "'label'", ']', '=', 'label', '+', '"_1"', 'break']
Process the error labels of a dependent variable 'value' to ensure uniqueness.
['Process', 'the', 'error', 'labels', 'of', 'a', 'dependent', 'variable', 'value', 'to', 'ensure', 'uniqueness', '.']
train
https://github.com/HEPData/hepdata-converter/blob/354271448980efba86f2f3d27b99d818e75fd90d/hepdata_converter/writers/array_writer.py#L140-L160
2,659
thorgate/django-esteid
esteid/generic.py
GenericDigitalSignViewMixin.destroy_digidoc_session
def destroy_digidoc_session(self): """ Closes DigiDocService session and clears request.session[I{DIGIDOC_SESSION_KEY}] """ # cleanup data too self.destroy_digidoc_session_data() try: session = self.request.session[self.DIGIDOC_SESSION_KEY] if session: try: service = self.flat_service() service.session_code = session service.close_session() except DigiDocError: pass del self.request.session[self.DIGIDOC_SESSION_KEY] except KeyError: pass
python
def destroy_digidoc_session(self): """ Closes DigiDocService session and clears request.session[I{DIGIDOC_SESSION_KEY}] """ # cleanup data too self.destroy_digidoc_session_data() try: session = self.request.session[self.DIGIDOC_SESSION_KEY] if session: try: service = self.flat_service() service.session_code = session service.close_session() except DigiDocError: pass del self.request.session[self.DIGIDOC_SESSION_KEY] except KeyError: pass
['def', 'destroy_digidoc_session', '(', 'self', ')', ':', '# cleanup data too', 'self', '.', 'destroy_digidoc_session_data', '(', ')', 'try', ':', 'session', '=', 'self', '.', 'request', '.', 'session', '[', 'self', '.', 'DIGIDOC_SESSION_KEY', ']', 'if', 'session', ':', 'try', ':', 'service', '=', 'self', '.', 'flat_service', '(', ')', 'service', '.', 'session_code', '=', 'session', 'service', '.', 'close_session', '(', ')', 'except', 'DigiDocError', ':', 'pass', 'del', 'self', '.', 'request', '.', 'session', '[', 'self', '.', 'DIGIDOC_SESSION_KEY', ']', 'except', 'KeyError', ':', 'pass']
Closes DigiDocService session and clears request.session[I{DIGIDOC_SESSION_KEY}]
['Closes', 'DigiDocService', 'session', 'and', 'clears', 'request', '.', 'session', '[', 'I', '{', 'DIGIDOC_SESSION_KEY', '}', ']']
train
https://github.com/thorgate/django-esteid/blob/407ae513e357fedea0e3e42198df8eb9d9ff0646/esteid/generic.py#L47-L69
2,660
vertexproject/synapse
synapse/common.py
listdir
def listdir(*paths, glob=None): ''' List the (optionally glob filtered) full paths from a dir. Args: *paths ([str,...]): A list of path elements glob (str): An optional fnmatch glob str ''' path = genpath(*paths) names = os.listdir(path) if glob is not None: names = fnmatch.filter(names, glob) retn = [os.path.join(path, name) for name in names] return retn
python
def listdir(*paths, glob=None): ''' List the (optionally glob filtered) full paths from a dir. Args: *paths ([str,...]): A list of path elements glob (str): An optional fnmatch glob str ''' path = genpath(*paths) names = os.listdir(path) if glob is not None: names = fnmatch.filter(names, glob) retn = [os.path.join(path, name) for name in names] return retn
['def', 'listdir', '(', '*', 'paths', ',', 'glob', '=', 'None', ')', ':', 'path', '=', 'genpath', '(', '*', 'paths', ')', 'names', '=', 'os', '.', 'listdir', '(', 'path', ')', 'if', 'glob', 'is', 'not', 'None', ':', 'names', '=', 'fnmatch', '.', 'filter', '(', 'names', ',', 'glob', ')', 'retn', '=', '[', 'os', '.', 'path', '.', 'join', '(', 'path', ',', 'name', ')', 'for', 'name', 'in', 'names', ']', 'return', 'retn']
List the (optionally glob filtered) full paths from a dir. Args: *paths ([str,...]): A list of path elements glob (str): An optional fnmatch glob str
['List', 'the', '(', 'optionally', 'glob', 'filtered', ')', 'full', 'paths', 'from', 'a', 'dir', '.']
train
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/common.py#L255-L270
2,661
benpryke/PyNewtonMath
pynewtonmath/core.py
expose_endpoints
def expose_endpoints (module, *args): """ Expose methods to the given module for each API endpoint """ for op in args: # Capture the closure state def create_method (o): return lambda exp: send_request(o, exp) setattr(sys.modules[__name__], op, create_method(op)) setattr(module, op, getattr(sys.modules[__name__], op))
python
def expose_endpoints (module, *args): """ Expose methods to the given module for each API endpoint """ for op in args: # Capture the closure state def create_method (o): return lambda exp: send_request(o, exp) setattr(sys.modules[__name__], op, create_method(op)) setattr(module, op, getattr(sys.modules[__name__], op))
['def', 'expose_endpoints', '(', 'module', ',', '*', 'args', ')', ':', 'for', 'op', 'in', 'args', ':', '# Capture the closure state', 'def', 'create_method', '(', 'o', ')', ':', 'return', 'lambda', 'exp', ':', 'send_request', '(', 'o', ',', 'exp', ')', 'setattr', '(', 'sys', '.', 'modules', '[', '__name__', ']', ',', 'op', ',', 'create_method', '(', 'op', ')', ')', 'setattr', '(', 'module', ',', 'op', ',', 'getattr', '(', 'sys', '.', 'modules', '[', '__name__', ']', ',', 'op', ')', ')']
Expose methods to the given module for each API endpoint
['Expose', 'methods', 'to', 'the', 'given', 'module', 'for', 'each', 'API', 'endpoint']
train
https://github.com/benpryke/PyNewtonMath/blob/9ef7de1fcaa5fe9be66dbf517715defe7a8a8abd/pynewtonmath/core.py#L48-L59
2,662
shidenggui/easyquotation
easyquotation/basequotation.py
BaseQuotation._fetch_stock_data
def _fetch_stock_data(self, stock_list): """获取股票信息""" pool = multiprocessing.pool.ThreadPool(len(stock_list)) try: res = pool.map(self.get_stocks_by_range, stock_list) finally: pool.close() return [d for d in res if d is not None]
python
def _fetch_stock_data(self, stock_list): """获取股票信息""" pool = multiprocessing.pool.ThreadPool(len(stock_list)) try: res = pool.map(self.get_stocks_by_range, stock_list) finally: pool.close() return [d for d in res if d is not None]
['def', '_fetch_stock_data', '(', 'self', ',', 'stock_list', ')', ':', 'pool', '=', 'multiprocessing', '.', 'pool', '.', 'ThreadPool', '(', 'len', '(', 'stock_list', ')', ')', 'try', ':', 'res', '=', 'pool', '.', 'map', '(', 'self', '.', 'get_stocks_by_range', ',', 'stock_list', ')', 'finally', ':', 'pool', '.', 'close', '(', ')', 'return', '[', 'd', 'for', 'd', 'in', 'res', 'if', 'd', 'is', 'not', 'None', ']']
获取股票信息
['获取股票信息']
train
https://github.com/shidenggui/easyquotation/blob/a75820db4f05f5386e1c1024d05b0bfc1de6cbda/easyquotation/basequotation.py#L114-L121
2,663
mrcagney/make_gtfs
make_gtfs/validators.py
check_stops
def check_stops(pfeed, *, as_df=False, include_warnings=False): """ Analog of :func:`check_frequencies` for ``pfeed.stops`` """ # Use gtfstk's stop validator if pfeed.stops is not None: stop_times = pd.DataFrame(columns=['stop_id']) feed = gt.Feed(stops=pfeed.stops, stop_times=stop_times, dist_units='km') return gt.check_stops(feed, as_df=as_df, include_warnings=False)
python
def check_stops(pfeed, *, as_df=False, include_warnings=False): """ Analog of :func:`check_frequencies` for ``pfeed.stops`` """ # Use gtfstk's stop validator if pfeed.stops is not None: stop_times = pd.DataFrame(columns=['stop_id']) feed = gt.Feed(stops=pfeed.stops, stop_times=stop_times, dist_units='km') return gt.check_stops(feed, as_df=as_df, include_warnings=False)
['def', 'check_stops', '(', 'pfeed', ',', '*', ',', 'as_df', '=', 'False', ',', 'include_warnings', '=', 'False', ')', ':', "# Use gtfstk's stop validator", 'if', 'pfeed', '.', 'stops', 'is', 'not', 'None', ':', 'stop_times', '=', 'pd', '.', 'DataFrame', '(', 'columns', '=', '[', "'stop_id'", ']', ')', 'feed', '=', 'gt', '.', 'Feed', '(', 'stops', '=', 'pfeed', '.', 'stops', ',', 'stop_times', '=', 'stop_times', ',', 'dist_units', '=', "'km'", ')', 'return', 'gt', '.', 'check_stops', '(', 'feed', ',', 'as_df', '=', 'as_df', ',', 'include_warnings', '=', 'False', ')']
Analog of :func:`check_frequencies` for ``pfeed.stops``
['Analog', 'of', ':', 'func', ':', 'check_frequencies', 'for', 'pfeed', '.', 'stops']
train
https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/validators.py#L275-L284
2,664
secnot/rectpack
rectpack/packer.py
PackerOnline._new_open_bin
def _new_open_bin(self, width=None, height=None, rid=None): """ Extract the next empty bin and append it to open bins Returns: PackingAlgorithm: Initialized empty packing bin. None: No bin big enough for the rectangle was found """ factories_to_delete = set() # new_bin = None for key, binfac in self._empty_bins.items(): # Only return the new bin if the rect fits. # (If width or height is None, caller doesn't know the size.) if not binfac.fits_inside(width, height): continue # Create bin and add to open_bins new_bin = binfac.new_bin() if new_bin is None: continue self._open_bins.append(new_bin) # If the factory was depleted mark for deletion if binfac.is_empty(): factories_to_delete.add(key) break # Delete marked factories for f in factories_to_delete: del self._empty_bins[f] return new_bin
python
def _new_open_bin(self, width=None, height=None, rid=None): """ Extract the next empty bin and append it to open bins Returns: PackingAlgorithm: Initialized empty packing bin. None: No bin big enough for the rectangle was found """ factories_to_delete = set() # new_bin = None for key, binfac in self._empty_bins.items(): # Only return the new bin if the rect fits. # (If width or height is None, caller doesn't know the size.) if not binfac.fits_inside(width, height): continue # Create bin and add to open_bins new_bin = binfac.new_bin() if new_bin is None: continue self._open_bins.append(new_bin) # If the factory was depleted mark for deletion if binfac.is_empty(): factories_to_delete.add(key) break # Delete marked factories for f in factories_to_delete: del self._empty_bins[f] return new_bin
['def', '_new_open_bin', '(', 'self', ',', 'width', '=', 'None', ',', 'height', '=', 'None', ',', 'rid', '=', 'None', ')', ':', 'factories_to_delete', '=', 'set', '(', ')', '#', 'new_bin', '=', 'None', 'for', 'key', ',', 'binfac', 'in', 'self', '.', '_empty_bins', '.', 'items', '(', ')', ':', '# Only return the new bin if the rect fits.', "# (If width or height is None, caller doesn't know the size.)", 'if', 'not', 'binfac', '.', 'fits_inside', '(', 'width', ',', 'height', ')', ':', 'continue', '# Create bin and add to open_bins', 'new_bin', '=', 'binfac', '.', 'new_bin', '(', ')', 'if', 'new_bin', 'is', 'None', ':', 'continue', 'self', '.', '_open_bins', '.', 'append', '(', 'new_bin', ')', '# If the factory was depleted mark for deletion', 'if', 'binfac', '.', 'is_empty', '(', ')', ':', 'factories_to_delete', '.', 'add', '(', 'key', ')', 'break', '# Delete marked factories', 'for', 'f', 'in', 'factories_to_delete', ':', 'del', 'self', '.', '_empty_bins', '[', 'f', ']', 'return', 'new_bin']
Extract the next empty bin and append it to open bins Returns: PackingAlgorithm: Initialized empty packing bin. None: No bin big enough for the rectangle was found
['Extract', 'the', 'next', 'empty', 'bin', 'and', 'append', 'it', 'to', 'open', 'bins']
train
https://github.com/secnot/rectpack/blob/21d46be48fd453500ea49de699bc9eabc427bdf7/rectpack/packer.py#L227-L261
2,665
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/core_v1_api.py
CoreV1Api.replace_namespaced_config_map
def replace_namespaced_config_map(self, name, namespace, body, **kwargs): # noqa: E501 """replace_namespaced_config_map # noqa: E501 replace the specified ConfigMap # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_config_map(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ConfigMap (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1ConfigMap body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1ConfigMap If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_config_map_with_http_info(name, namespace, body, **kwargs) # noqa: E501 else: (data) = self.replace_namespaced_config_map_with_http_info(name, namespace, body, **kwargs) # noqa: E501 return data
python
def replace_namespaced_config_map(self, name, namespace, body, **kwargs): # noqa: E501 """replace_namespaced_config_map # noqa: E501 replace the specified ConfigMap # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_config_map(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ConfigMap (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1ConfigMap body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1ConfigMap If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_config_map_with_http_info(name, namespace, body, **kwargs) # noqa: E501 else: (data) = self.replace_namespaced_config_map_with_http_info(name, namespace, body, **kwargs) # noqa: E501 return data
['def', 'replace_namespaced_config_map', '(', 'self', ',', 'name', ',', 'namespace', ',', 'body', ',', '*', '*', 'kwargs', ')', ':', '# noqa: E501', 'kwargs', '[', "'_return_http_data_only'", ']', '=', 'True', 'if', 'kwargs', '.', 'get', '(', "'async_req'", ')', ':', 'return', 'self', '.', 'replace_namespaced_config_map_with_http_info', '(', 'name', ',', 'namespace', ',', 'body', ',', '*', '*', 'kwargs', ')', '# noqa: E501', 'else', ':', '(', 'data', ')', '=', 'self', '.', 'replace_namespaced_config_map_with_http_info', '(', 'name', ',', 'namespace', ',', 'body', ',', '*', '*', 'kwargs', ')', '# noqa: E501', 'return', 'data']
replace_namespaced_config_map # noqa: E501 replace the specified ConfigMap # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_config_map(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ConfigMap (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1ConfigMap body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1ConfigMap If the method is called asynchronously, returns the request thread.
['replace_namespaced_config_map', '#', 'noqa', ':', 'E501']
train
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/core_v1_api.py#L20843-L20867
2,666
googledatalab/pydatalab
datalab/bigquery/_sampling.py
Sampling.sampling_query
def sampling_query(sql, fields=None, count=5, sampling=None): """Returns a sampling query for the SQL object. Args: sql: the SQL object to sample fields: an optional list of field names to retrieve. count: an optional count of rows to retrieve which is used if a specific sampling is not specified. sampling: an optional sampling strategy to apply to the table. Returns: A SQL query string for sampling the input sql. """ if sampling is None: sampling = Sampling.default(count=count, fields=fields) return sampling(sql)
python
def sampling_query(sql, fields=None, count=5, sampling=None): """Returns a sampling query for the SQL object. Args: sql: the SQL object to sample fields: an optional list of field names to retrieve. count: an optional count of rows to retrieve which is used if a specific sampling is not specified. sampling: an optional sampling strategy to apply to the table. Returns: A SQL query string for sampling the input sql. """ if sampling is None: sampling = Sampling.default(count=count, fields=fields) return sampling(sql)
['def', 'sampling_query', '(', 'sql', ',', 'fields', '=', 'None', ',', 'count', '=', '5', ',', 'sampling', '=', 'None', ')', ':', 'if', 'sampling', 'is', 'None', ':', 'sampling', '=', 'Sampling', '.', 'default', '(', 'count', '=', 'count', ',', 'fields', '=', 'fields', ')', 'return', 'sampling', '(', 'sql', ')']
Returns a sampling query for the SQL object. Args: sql: the SQL object to sample fields: an optional list of field names to retrieve. count: an optional count of rows to retrieve which is used if a specific sampling is not specified. sampling: an optional sampling strategy to apply to the table. Returns: A SQL query string for sampling the input sql.
['Returns', 'a', 'sampling', 'query', 'for', 'the', 'SQL', 'object', '.']
train
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_sampling.py#L74-L88
2,667
theislab/scanpy
scanpy/readwrite.py
read
def read(filename, backed=False, sheet=None, ext=None, delimiter=None, first_column_names=False, backup_url=None, cache=False, **kwargs) -> AnnData: """Read file and return :class:`~anndata.AnnData` object. To speed up reading, consider passing `cache=True`, which creates an hdf5 cache file. Parameters ---------- filename : `str` If the filename has no file extension, it is interpreted as a key for generating a filename via `sc.settings.writedir + filename + sc.settings.file_format_data`. This is the same behavior as in `sc.read(filename, ...)`. backed : {`False`, `True`, 'r', 'r+'}, optional (default: `False`) Load :class:`~anndata.AnnData` in `backed` mode instead of fully loading it into memory (`memory` mode). Only applies to `.h5ad` files. `True` and 'r' are equivalent. If you want to modify backed attributes of the AnnData object, you need to choose 'r+'. sheet : `str`, optional (default: `None`) Name of sheet/table in hdf5 or Excel file. cache : `bool`, optional (default: `False`) If `False`, read from source, if `True`, read from fast 'h5ad' cache. ext : `str`, optional (default: `None`) Extension that indicates the file type. If `None`, uses extension of filename. delimiter : `str`, optional (default: `None`) Delimiter that separates data within text file. If `None`, will split at arbitrary number of white spaces, which is different from enforcing splitting at any single white space ' '. first_column_names : `bool`, optional (default: `False`) Assume the first column stores row names. This is only necessary if these are not strings: strings in the first column are automatically assumed to be row names. backup_url : `str`, optional (default: `None`) Retrieve the file from an URL if not present on disk. Returns ------- An :class:`~anndata.AnnData` object """ filename = str(filename) # allow passing pathlib.Path objects if is_valid_filename(filename): return _read(filename, backed=backed, sheet=sheet, ext=ext, delimiter=delimiter, first_column_names=first_column_names, backup_url=backup_url, cache=cache, **kwargs) # generate filename and read to dict filekey = filename filename = settings.writedir + filekey + '.' + settings.file_format_data if not Path(filename).exists(): raise ValueError('Reading with filekey "{}" failed, the ' 'inferred filename "{}" does not exist. ' 'If you intended to provide a filename, either ' 'use a filename ending on one of the available extensions {} ' 'or pass the parameter `ext`.' .format(filekey, filename, avail_exts)) return read_h5ad(filename, backed=backed)
python
def read(filename, backed=False, sheet=None, ext=None, delimiter=None, first_column_names=False, backup_url=None, cache=False, **kwargs) -> AnnData: """Read file and return :class:`~anndata.AnnData` object. To speed up reading, consider passing `cache=True`, which creates an hdf5 cache file. Parameters ---------- filename : `str` If the filename has no file extension, it is interpreted as a key for generating a filename via `sc.settings.writedir + filename + sc.settings.file_format_data`. This is the same behavior as in `sc.read(filename, ...)`. backed : {`False`, `True`, 'r', 'r+'}, optional (default: `False`) Load :class:`~anndata.AnnData` in `backed` mode instead of fully loading it into memory (`memory` mode). Only applies to `.h5ad` files. `True` and 'r' are equivalent. If you want to modify backed attributes of the AnnData object, you need to choose 'r+'. sheet : `str`, optional (default: `None`) Name of sheet/table in hdf5 or Excel file. cache : `bool`, optional (default: `False`) If `False`, read from source, if `True`, read from fast 'h5ad' cache. ext : `str`, optional (default: `None`) Extension that indicates the file type. If `None`, uses extension of filename. delimiter : `str`, optional (default: `None`) Delimiter that separates data within text file. If `None`, will split at arbitrary number of white spaces, which is different from enforcing splitting at any single white space ' '. first_column_names : `bool`, optional (default: `False`) Assume the first column stores row names. This is only necessary if these are not strings: strings in the first column are automatically assumed to be row names. backup_url : `str`, optional (default: `None`) Retrieve the file from an URL if not present on disk. Returns ------- An :class:`~anndata.AnnData` object """ filename = str(filename) # allow passing pathlib.Path objects if is_valid_filename(filename): return _read(filename, backed=backed, sheet=sheet, ext=ext, delimiter=delimiter, first_column_names=first_column_names, backup_url=backup_url, cache=cache, **kwargs) # generate filename and read to dict filekey = filename filename = settings.writedir + filekey + '.' + settings.file_format_data if not Path(filename).exists(): raise ValueError('Reading with filekey "{}" failed, the ' 'inferred filename "{}" does not exist. ' 'If you intended to provide a filename, either ' 'use a filename ending on one of the available extensions {} ' 'or pass the parameter `ext`.' .format(filekey, filename, avail_exts)) return read_h5ad(filename, backed=backed)
['def', 'read', '(', 'filename', ',', 'backed', '=', 'False', ',', 'sheet', '=', 'None', ',', 'ext', '=', 'None', ',', 'delimiter', '=', 'None', ',', 'first_column_names', '=', 'False', ',', 'backup_url', '=', 'None', ',', 'cache', '=', 'False', ',', '*', '*', 'kwargs', ')', '->', 'AnnData', ':', 'filename', '=', 'str', '(', 'filename', ')', '# allow passing pathlib.Path objects', 'if', 'is_valid_filename', '(', 'filename', ')', ':', 'return', '_read', '(', 'filename', ',', 'backed', '=', 'backed', ',', 'sheet', '=', 'sheet', ',', 'ext', '=', 'ext', ',', 'delimiter', '=', 'delimiter', ',', 'first_column_names', '=', 'first_column_names', ',', 'backup_url', '=', 'backup_url', ',', 'cache', '=', 'cache', ',', '*', '*', 'kwargs', ')', '# generate filename and read to dict', 'filekey', '=', 'filename', 'filename', '=', 'settings', '.', 'writedir', '+', 'filekey', '+', "'.'", '+', 'settings', '.', 'file_format_data', 'if', 'not', 'Path', '(', 'filename', ')', '.', 'exists', '(', ')', ':', 'raise', 'ValueError', '(', '\'Reading with filekey "{}" failed, the \'', '\'inferred filename "{}" does not exist. \'', "'If you intended to provide a filename, either '", "'use a filename ending on one of the available extensions {} '", "'or pass the parameter `ext`.'", '.', 'format', '(', 'filekey', ',', 'filename', ',', 'avail_exts', ')', ')', 'return', 'read_h5ad', '(', 'filename', ',', 'backed', '=', 'backed', ')']
Read file and return :class:`~anndata.AnnData` object. To speed up reading, consider passing `cache=True`, which creates an hdf5 cache file. Parameters ---------- filename : `str` If the filename has no file extension, it is interpreted as a key for generating a filename via `sc.settings.writedir + filename + sc.settings.file_format_data`. This is the same behavior as in `sc.read(filename, ...)`. backed : {`False`, `True`, 'r', 'r+'}, optional (default: `False`) Load :class:`~anndata.AnnData` in `backed` mode instead of fully loading it into memory (`memory` mode). Only applies to `.h5ad` files. `True` and 'r' are equivalent. If you want to modify backed attributes of the AnnData object, you need to choose 'r+'. sheet : `str`, optional (default: `None`) Name of sheet/table in hdf5 or Excel file. cache : `bool`, optional (default: `False`) If `False`, read from source, if `True`, read from fast 'h5ad' cache. ext : `str`, optional (default: `None`) Extension that indicates the file type. If `None`, uses extension of filename. delimiter : `str`, optional (default: `None`) Delimiter that separates data within text file. If `None`, will split at arbitrary number of white spaces, which is different from enforcing splitting at any single white space ' '. first_column_names : `bool`, optional (default: `False`) Assume the first column stores row names. This is only necessary if these are not strings: strings in the first column are automatically assumed to be row names. backup_url : `str`, optional (default: `None`) Retrieve the file from an URL if not present on disk. Returns ------- An :class:`~anndata.AnnData` object
['Read', 'file', 'and', 'return', ':', 'class', ':', '~anndata', '.', 'AnnData', 'object', '.']
train
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/readwrite.py#L34-L90
2,668
blue-yonder/tsfresh
tsfresh/utilities/distribution.py
DistributorBaseClass.calculate_best_chunk_size
def calculate_best_chunk_size(self, data_length): """ Calculates the best chunk size for a list of length data_length. The current implemented formula is more or less an empirical result for multiprocessing case on one machine. :param data_length: A length which defines how many calculations there need to be. :type data_length: int :return: the calculated chunk size :rtype: int TODO: Investigate which is the best chunk size for different settings. """ chunk_size, extra = divmod(data_length, self.n_workers * 5) if extra: chunk_size += 1 return chunk_size
python
def calculate_best_chunk_size(self, data_length): """ Calculates the best chunk size for a list of length data_length. The current implemented formula is more or less an empirical result for multiprocessing case on one machine. :param data_length: A length which defines how many calculations there need to be. :type data_length: int :return: the calculated chunk size :rtype: int TODO: Investigate which is the best chunk size for different settings. """ chunk_size, extra = divmod(data_length, self.n_workers * 5) if extra: chunk_size += 1 return chunk_size
['def', 'calculate_best_chunk_size', '(', 'self', ',', 'data_length', ')', ':', 'chunk_size', ',', 'extra', '=', 'divmod', '(', 'data_length', ',', 'self', '.', 'n_workers', '*', '5', ')', 'if', 'extra', ':', 'chunk_size', '+=', '1', 'return', 'chunk_size']
Calculates the best chunk size for a list of length data_length. The current implemented formula is more or less an empirical result for multiprocessing case on one machine. :param data_length: A length which defines how many calculations there need to be. :type data_length: int :return: the calculated chunk size :rtype: int TODO: Investigate which is the best chunk size for different settings.
['Calculates', 'the', 'best', 'chunk', 'size', 'for', 'a', 'list', 'of', 'length', 'data_length', '.', 'The', 'current', 'implemented', 'formula', 'is', 'more', 'or', 'less', 'an', 'empirical', 'result', 'for', 'multiprocessing', 'case', 'on', 'one', 'machine', '.', ':', 'param', 'data_length', ':', 'A', 'length', 'which', 'defines', 'how', 'many', 'calculations', 'there', 'need', 'to', 'be', '.', ':', 'type', 'data_length', ':', 'int', ':', 'return', ':', 'the', 'calculated', 'chunk', 'size', ':', 'rtype', ':', 'int']
train
https://github.com/blue-yonder/tsfresh/blob/c72c9c574371cf7dd7d54e00a466792792e5d202/tsfresh/utilities/distribution.py#L85-L100
2,669
IwoHerka/sexpr
sexpr/types/sequence.py
Sequence.pop
def pop(self, sexp): ''' Notes: Sequence works a bit different than other nodes. This method (like others) expectes a list. However, sequence matches against the list, whereas other nodes try to match against elements of the list. ''' for t in self.terms: sexp = t.pop(sexp) return sexp
python
def pop(self, sexp): ''' Notes: Sequence works a bit different than other nodes. This method (like others) expectes a list. However, sequence matches against the list, whereas other nodes try to match against elements of the list. ''' for t in self.terms: sexp = t.pop(sexp) return sexp
['def', 'pop', '(', 'self', ',', 'sexp', ')', ':', 'for', 't', 'in', 'self', '.', 'terms', ':', 'sexp', '=', 't', '.', 'pop', '(', 'sexp', ')', 'return', 'sexp']
Notes: Sequence works a bit different than other nodes. This method (like others) expectes a list. However, sequence matches against the list, whereas other nodes try to match against elements of the list.
['Notes', ':', 'Sequence', 'works', 'a', 'bit', 'different', 'than', 'other', 'nodes', '.', 'This', 'method', '(', 'like', 'others', ')', 'expectes', 'a', 'list', '.', 'However', 'sequence', 'matches', 'against', 'the', 'list', 'whereas', 'other', 'nodes', 'try', 'to', 'match', 'against', 'elements', 'of', 'the', 'list', '.']
train
https://github.com/IwoHerka/sexpr/blob/28e32f543a127bbbf832b2dba7cb93f9e57db3b6/sexpr/types/sequence.py#L11-L20
2,670
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_firmware.py
FirmwareModule.manifests_parse
def manifests_parse(self): '''parse manifests present on system''' self.manifests = [] for manifest_path in self.find_manifests(): if self.manifest_path_is_old(manifest_path): print("fw: Manifest (%s) is old; consider 'manifest download'" % (manifest_path)) manifest = self.manifest_parse(manifest_path) if self.semver_major(manifest["format-version"]) != 1: print("fw: Manifest (%s) has major version %d; MAVProxy only understands version 1" % (manifest_path,manifest["format-version"])) continue self.manifests.append(manifest)
python
def manifests_parse(self): '''parse manifests present on system''' self.manifests = [] for manifest_path in self.find_manifests(): if self.manifest_path_is_old(manifest_path): print("fw: Manifest (%s) is old; consider 'manifest download'" % (manifest_path)) manifest = self.manifest_parse(manifest_path) if self.semver_major(manifest["format-version"]) != 1: print("fw: Manifest (%s) has major version %d; MAVProxy only understands version 1" % (manifest_path,manifest["format-version"])) continue self.manifests.append(manifest)
['def', 'manifests_parse', '(', 'self', ')', ':', 'self', '.', 'manifests', '=', '[', ']', 'for', 'manifest_path', 'in', 'self', '.', 'find_manifests', '(', ')', ':', 'if', 'self', '.', 'manifest_path_is_old', '(', 'manifest_path', ')', ':', 'print', '(', '"fw: Manifest (%s) is old; consider \'manifest download\'"', '%', '(', 'manifest_path', ')', ')', 'manifest', '=', 'self', '.', 'manifest_parse', '(', 'manifest_path', ')', 'if', 'self', '.', 'semver_major', '(', 'manifest', '[', '"format-version"', ']', ')', '!=', '1', ':', 'print', '(', '"fw: Manifest (%s) has major version %d; MAVProxy only understands version 1"', '%', '(', 'manifest_path', ',', 'manifest', '[', '"format-version"', ']', ')', ')', 'continue', 'self', '.', 'manifests', '.', 'append', '(', 'manifest', ')']
parse manifests present on system
['parse', 'manifests', 'present', 'on', 'system']
train
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_firmware.py#L294-L304
2,671
alejandrobll/py-sphviewer
sphviewer/Scene.py
Scene.plot
def plot(self,axis=None,**kargs): """ - plot(axis=None, **kwarg): Finally, sphviewer.Scene class has its own plotting method. It shows the scene as seen by the camera. It is to say, it plots the particles according to their aparent coordinates; axis makes a reference to an existing axis. In case axis is None, the plot is made on the current axis. The kwargs are :class:`~matplotlib.lines.Line2D` properties: agg_filter: unknown alpha: float (0.0 transparent through 1.0 opaque) animated: [True | False] antialiased or aa: [True | False] axes: an :class:`~matplotlib.axes.Axes` instance clip_box: a :class:`matplotlib.transforms.Bbox` instance clip_on: [True | False] clip_path: [ (:class:`~matplotlib.path.Path`, :class:`~matplotlib.transforms.Transform`) | :class:`~matplotlib.patches.Patch` | None ] color or c: any matplotlib color contains: a callable function dash_capstyle: ['butt' | 'round' | 'projecting'] dash_joinstyle: ['miter' | 'round' | 'bevel'] dashes: sequence of on/off ink in points data: 2D array (rows are x, y) or two 1D arrays drawstyle: [ 'default' | 'steps' | 'steps-pre' | 'steps-mid' | 'steps-post' ] figure: a :class:`matplotlib.figure.Figure` instance fillstyle: ['full' | 'left' | 'right' | 'bottom' | 'top'] gid: an id string label: any string linestyle or ls: [ ``'-'`` | ``'--'`` | ``'-.'`` | ``':'`` | ``'None'`` | ``' '`` | ``''`` ] and any drawstyle in combination with a linestyle, e.g. ``'steps--'``. linewidth or lw: float value in points lod: [True | False] marker: [ ``7`` | ``4`` | ``5`` | ``6`` | ``'o'`` | ``'D'`` | ``'h'`` | ``'H'`` | ``'_'`` | ``''`` | ``'None'`` | ``' '`` | ``None`` | ``'8'`` | ``'p'`` | ``','`` | ``'+'`` | ``'.'`` | ``'s'`` | ``'*'`` | ``'d'`` | ``3`` | ``0`` | ``1`` | ``2`` | ``'1'`` | ``'3'`` | ``'4'`` | ``'2'`` | ``'v'`` | ``'<'`` | ``'>'`` | ``'^'`` | ``'|'`` | ``'x'`` | ``'$...$'`` | *tuple* | *Nx2 array* ] markeredgecolor or mec: any matplotlib color markeredgewidth or mew: float value in points markerfacecolor or mfc: any matplotlib color markerfacecoloralt or mfcalt: any matplotlib color markersize or ms: float markevery: None | integer | (startind, stride) picker: float distance in points or callable pick function ``fn(artist, event)`` pickradius: float distance in points rasterized: [True | False | None] snap: unknown solid_capstyle: ['butt' | 'round' | 'projecting'] solid_joinstyle: ['miter' | 'round' | 'bevel'] transform: a :class:`matplotlib.transforms.Transform` instance url: a url string visible: [True | False] xdata: 1D array ydata: 1D array zorder: any number kwargs *scalex* and *scaley*, if defined, are passed on to :meth:`~matplotlib.axes.Axes.autoscale_view` to determine whether the *x* and *y* axes are autoscaled; the default is *True*. Additional kwargs: hold = [True|False] overrides default hold state """ if(axis == None): axis = plt.gca() axis.plot(self.__x, self.__y, 'k.', **kargs)
python
def plot(self,axis=None,**kargs): """ - plot(axis=None, **kwarg): Finally, sphviewer.Scene class has its own plotting method. It shows the scene as seen by the camera. It is to say, it plots the particles according to their aparent coordinates; axis makes a reference to an existing axis. In case axis is None, the plot is made on the current axis. The kwargs are :class:`~matplotlib.lines.Line2D` properties: agg_filter: unknown alpha: float (0.0 transparent through 1.0 opaque) animated: [True | False] antialiased or aa: [True | False] axes: an :class:`~matplotlib.axes.Axes` instance clip_box: a :class:`matplotlib.transforms.Bbox` instance clip_on: [True | False] clip_path: [ (:class:`~matplotlib.path.Path`, :class:`~matplotlib.transforms.Transform`) | :class:`~matplotlib.patches.Patch` | None ] color or c: any matplotlib color contains: a callable function dash_capstyle: ['butt' | 'round' | 'projecting'] dash_joinstyle: ['miter' | 'round' | 'bevel'] dashes: sequence of on/off ink in points data: 2D array (rows are x, y) or two 1D arrays drawstyle: [ 'default' | 'steps' | 'steps-pre' | 'steps-mid' | 'steps-post' ] figure: a :class:`matplotlib.figure.Figure` instance fillstyle: ['full' | 'left' | 'right' | 'bottom' | 'top'] gid: an id string label: any string linestyle or ls: [ ``'-'`` | ``'--'`` | ``'-.'`` | ``':'`` | ``'None'`` | ``' '`` | ``''`` ] and any drawstyle in combination with a linestyle, e.g. ``'steps--'``. linewidth or lw: float value in points lod: [True | False] marker: [ ``7`` | ``4`` | ``5`` | ``6`` | ``'o'`` | ``'D'`` | ``'h'`` | ``'H'`` | ``'_'`` | ``''`` | ``'None'`` | ``' '`` | ``None`` | ``'8'`` | ``'p'`` | ``','`` | ``'+'`` | ``'.'`` | ``'s'`` | ``'*'`` | ``'d'`` | ``3`` | ``0`` | ``1`` | ``2`` | ``'1'`` | ``'3'`` | ``'4'`` | ``'2'`` | ``'v'`` | ``'<'`` | ``'>'`` | ``'^'`` | ``'|'`` | ``'x'`` | ``'$...$'`` | *tuple* | *Nx2 array* ] markeredgecolor or mec: any matplotlib color markeredgewidth or mew: float value in points markerfacecolor or mfc: any matplotlib color markerfacecoloralt or mfcalt: any matplotlib color markersize or ms: float markevery: None | integer | (startind, stride) picker: float distance in points or callable pick function ``fn(artist, event)`` pickradius: float distance in points rasterized: [True | False | None] snap: unknown solid_capstyle: ['butt' | 'round' | 'projecting'] solid_joinstyle: ['miter' | 'round' | 'bevel'] transform: a :class:`matplotlib.transforms.Transform` instance url: a url string visible: [True | False] xdata: 1D array ydata: 1D array zorder: any number kwargs *scalex* and *scaley*, if defined, are passed on to :meth:`~matplotlib.axes.Axes.autoscale_view` to determine whether the *x* and *y* axes are autoscaled; the default is *True*. Additional kwargs: hold = [True|False] overrides default hold state """ if(axis == None): axis = plt.gca() axis.plot(self.__x, self.__y, 'k.', **kargs)
['def', 'plot', '(', 'self', ',', 'axis', '=', 'None', ',', '*', '*', 'kargs', ')', ':', 'if', '(', 'axis', '==', 'None', ')', ':', 'axis', '=', 'plt', '.', 'gca', '(', ')', 'axis', '.', 'plot', '(', 'self', '.', '__x', ',', 'self', '.', '__y', ',', "'k.'", ',', '*', '*', 'kargs', ')']
- plot(axis=None, **kwarg): Finally, sphviewer.Scene class has its own plotting method. It shows the scene as seen by the camera. It is to say, it plots the particles according to their aparent coordinates; axis makes a reference to an existing axis. In case axis is None, the plot is made on the current axis. The kwargs are :class:`~matplotlib.lines.Line2D` properties: agg_filter: unknown alpha: float (0.0 transparent through 1.0 opaque) animated: [True | False] antialiased or aa: [True | False] axes: an :class:`~matplotlib.axes.Axes` instance clip_box: a :class:`matplotlib.transforms.Bbox` instance clip_on: [True | False] clip_path: [ (:class:`~matplotlib.path.Path`, :class:`~matplotlib.transforms.Transform`) | :class:`~matplotlib.patches.Patch` | None ] color or c: any matplotlib color contains: a callable function dash_capstyle: ['butt' | 'round' | 'projecting'] dash_joinstyle: ['miter' | 'round' | 'bevel'] dashes: sequence of on/off ink in points data: 2D array (rows are x, y) or two 1D arrays drawstyle: [ 'default' | 'steps' | 'steps-pre' | 'steps-mid' | 'steps-post' ] figure: a :class:`matplotlib.figure.Figure` instance fillstyle: ['full' | 'left' | 'right' | 'bottom' | 'top'] gid: an id string label: any string linestyle or ls: [ ``'-'`` | ``'--'`` | ``'-.'`` | ``':'`` | ``'None'`` | ``' '`` | ``''`` ] and any drawstyle in combination with a linestyle, e.g. ``'steps--'``. linewidth or lw: float value in points lod: [True | False] marker: [ ``7`` | ``4`` | ``5`` | ``6`` | ``'o'`` | ``'D'`` | ``'h'`` | ``'H'`` | ``'_'`` | ``''`` | ``'None'`` | ``' '`` | ``None`` | ``'8'`` | ``'p'`` | ``','`` | ``'+'`` | ``'.'`` | ``'s'`` | ``'*'`` | ``'d'`` | ``3`` | ``0`` | ``1`` | ``2`` | ``'1'`` | ``'3'`` | ``'4'`` | ``'2'`` | ``'v'`` | ``'<'`` | ``'>'`` | ``'^'`` | ``'|'`` | ``'x'`` | ``'$...$'`` | *tuple* | *Nx2 array* ] markeredgecolor or mec: any matplotlib color markeredgewidth or mew: float value in points markerfacecolor or mfc: any matplotlib color markerfacecoloralt or mfcalt: any matplotlib color markersize or ms: float markevery: None | integer | (startind, stride) picker: float distance in points or callable pick function ``fn(artist, event)`` pickradius: float distance in points rasterized: [True | False | None] snap: unknown solid_capstyle: ['butt' | 'round' | 'projecting'] solid_joinstyle: ['miter' | 'round' | 'bevel'] transform: a :class:`matplotlib.transforms.Transform` instance url: a url string visible: [True | False] xdata: 1D array ydata: 1D array zorder: any number kwargs *scalex* and *scaley*, if defined, are passed on to :meth:`~matplotlib.axes.Axes.autoscale_view` to determine whether the *x* and *y* axes are autoscaled; the default is *True*. Additional kwargs: hold = [True|False] overrides default hold state
['-', 'plot', '(', 'axis', '=', 'None', '**', 'kwarg', ')', ':', 'Finally', 'sphviewer', '.', 'Scene', 'class', 'has', 'its', 'own', 'plotting', 'method', '.', 'It', 'shows', 'the', 'scene', 'as', 'seen', 'by', 'the', 'camera', '.', 'It', 'is', 'to', 'say', 'it', 'plots', 'the', 'particles', 'according', 'to', 'their', 'aparent', 'coordinates', ';', 'axis', 'makes', 'a', 'reference', 'to', 'an', 'existing', 'axis', '.', 'In', 'case', 'axis', 'is', 'None', 'the', 'plot', 'is', 'made', 'on', 'the', 'current', 'axis', '.']
train
https://github.com/alejandrobll/py-sphviewer/blob/f198bd9ed5adfb58ebdf66d169206e609fd46e42/sphviewer/Scene.py#L299-L359
2,672
skelsec/minikerberos
minikerberos/common.py
KerberosCredential.from_connection_string
def from_connection_string(s): """ Credential input format: <domain>/<username>/<secret_type>:<secret>@<dc_ip_or_hostname> """ cred = KerberosCredential() cred.domain, t = s.split('/', 1) cred.username, t = t.split('/', 1) secret_type, t = t.split(':', 1) secret, target = t.rsplit('@', 1) st = KerberosSecretType(secret_type.upper()) if st == KerberosSecretType.PASSWORD or st == KerberosSecretType.PW or st == KerberosSecretType.PASS: cred.password = secret elif st == KerberosSecretType.NT or st == KerberosSecretType.RC4: cred.nt_hash = secret cred.kerberos_key_rc4 = secret elif st == KerberosSecretType.AES: cred.kerberos_key_aes_256 = secret cred.kerberos_key_aes_128 = secret elif st == KerberosSecretType.DES: cred.kerberos_key_des = secret elif st == KerberosSecretType.DES3 or st == KerberosSecretType.TDES: cred.kerberos_key_des3 = secret elif st == KerberosSecretType.CCACHE: cred.ccache = CCACHE.from_file(secret) return cred
python
def from_connection_string(s): """ Credential input format: <domain>/<username>/<secret_type>:<secret>@<dc_ip_or_hostname> """ cred = KerberosCredential() cred.domain, t = s.split('/', 1) cred.username, t = t.split('/', 1) secret_type, t = t.split(':', 1) secret, target = t.rsplit('@', 1) st = KerberosSecretType(secret_type.upper()) if st == KerberosSecretType.PASSWORD or st == KerberosSecretType.PW or st == KerberosSecretType.PASS: cred.password = secret elif st == KerberosSecretType.NT or st == KerberosSecretType.RC4: cred.nt_hash = secret cred.kerberos_key_rc4 = secret elif st == KerberosSecretType.AES: cred.kerberos_key_aes_256 = secret cred.kerberos_key_aes_128 = secret elif st == KerberosSecretType.DES: cred.kerberos_key_des = secret elif st == KerberosSecretType.DES3 or st == KerberosSecretType.TDES: cred.kerberos_key_des3 = secret elif st == KerberosSecretType.CCACHE: cred.ccache = CCACHE.from_file(secret) return cred
['def', 'from_connection_string', '(', 's', ')', ':', 'cred', '=', 'KerberosCredential', '(', ')', 'cred', '.', 'domain', ',', 't', '=', 's', '.', 'split', '(', "'/'", ',', '1', ')', 'cred', '.', 'username', ',', 't', '=', 't', '.', 'split', '(', "'/'", ',', '1', ')', 'secret_type', ',', 't', '=', 't', '.', 'split', '(', "':'", ',', '1', ')', 'secret', ',', 'target', '=', 't', '.', 'rsplit', '(', "'@'", ',', '1', ')', 'st', '=', 'KerberosSecretType', '(', 'secret_type', '.', 'upper', '(', ')', ')', 'if', 'st', '==', 'KerberosSecretType', '.', 'PASSWORD', 'or', 'st', '==', 'KerberosSecretType', '.', 'PW', 'or', 'st', '==', 'KerberosSecretType', '.', 'PASS', ':', 'cred', '.', 'password', '=', 'secret', 'elif', 'st', '==', 'KerberosSecretType', '.', 'NT', 'or', 'st', '==', 'KerberosSecretType', '.', 'RC4', ':', 'cred', '.', 'nt_hash', '=', 'secret', 'cred', '.', 'kerberos_key_rc4', '=', 'secret', 'elif', 'st', '==', 'KerberosSecretType', '.', 'AES', ':', 'cred', '.', 'kerberos_key_aes_256', '=', 'secret', 'cred', '.', 'kerberos_key_aes_128', '=', 'secret', 'elif', 'st', '==', 'KerberosSecretType', '.', 'DES', ':', 'cred', '.', 'kerberos_key_des', '=', 'secret', 'elif', 'st', '==', 'KerberosSecretType', '.', 'DES3', 'or', 'st', '==', 'KerberosSecretType', '.', 'TDES', ':', 'cred', '.', 'kerberos_key_des3', '=', 'secret', 'elif', 'st', '==', 'KerberosSecretType', '.', 'CCACHE', ':', 'cred', '.', 'ccache', '=', 'CCACHE', '.', 'from_file', '(', 'secret', ')', 'return', 'cred']
Credential input format: <domain>/<username>/<secret_type>:<secret>@<dc_ip_or_hostname>
['Credential', 'input', 'format', ':', '<domain', '>', '/', '<username', '>', '/', '<secret_type', '>', ':', '<secret', '>']
train
https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/common.py#L174-L208
2,673
koheimiya/extheano
extheano/jit.py
FuncInfo._get_keys_defdict
def _get_keys_defdict(self): '''Get the keys and the default dictionary of the given function's arguments ''' # inspect argspecs argspec = inspect.getargspec(self.func) keys, defvals = argspec.args, argspec.defaults # convert to (list_of_argkeys, dict_of_default_keys) if defvals is None: return keys, None else: defvals = list(defvals) keys.reverse() defvals.reverse() defdict = dict(zip(keys, defvals)) keys.reverse() return keys, defdict
python
def _get_keys_defdict(self): '''Get the keys and the default dictionary of the given function's arguments ''' # inspect argspecs argspec = inspect.getargspec(self.func) keys, defvals = argspec.args, argspec.defaults # convert to (list_of_argkeys, dict_of_default_keys) if defvals is None: return keys, None else: defvals = list(defvals) keys.reverse() defvals.reverse() defdict = dict(zip(keys, defvals)) keys.reverse() return keys, defdict
['def', '_get_keys_defdict', '(', 'self', ')', ':', '# inspect argspecs', 'argspec', '=', 'inspect', '.', 'getargspec', '(', 'self', '.', 'func', ')', 'keys', ',', 'defvals', '=', 'argspec', '.', 'args', ',', 'argspec', '.', 'defaults', '# convert to (list_of_argkeys, dict_of_default_keys)', 'if', 'defvals', 'is', 'None', ':', 'return', 'keys', ',', 'None', 'else', ':', 'defvals', '=', 'list', '(', 'defvals', ')', 'keys', '.', 'reverse', '(', ')', 'defvals', '.', 'reverse', '(', ')', 'defdict', '=', 'dict', '(', 'zip', '(', 'keys', ',', 'defvals', ')', ')', 'keys', '.', 'reverse', '(', ')', 'return', 'keys', ',', 'defdict']
Get the keys and the default dictionary of the given function's arguments
['Get', 'the', 'keys', 'and', 'the', 'default', 'dictionary', 'of', 'the', 'given', 'function', 's', 'arguments']
train
https://github.com/koheimiya/extheano/blob/ea099a6395ca8772660b2c715fb26cde12738181/extheano/jit.py#L156-L173
2,674
BD2KOnFHIR/i2b2model
i2b2model/shared/i2b2core.py
I2B2CoreWithUploadId._delete_upload_id
def _delete_upload_id(conn: Connection, table: Table, upload_id: int) -> int: """Remove all table records with the supplied upload_id :param conn: sql connection :param table: table to modify :param upload_id: target upload_id :return: number of records removed """ return conn.execute(delete(table).where(table.c.upload_id == upload_id)).rowcount if upload_id else 0
python
def _delete_upload_id(conn: Connection, table: Table, upload_id: int) -> int: """Remove all table records with the supplied upload_id :param conn: sql connection :param table: table to modify :param upload_id: target upload_id :return: number of records removed """ return conn.execute(delete(table).where(table.c.upload_id == upload_id)).rowcount if upload_id else 0
['def', '_delete_upload_id', '(', 'conn', ':', 'Connection', ',', 'table', ':', 'Table', ',', 'upload_id', ':', 'int', ')', '->', 'int', ':', 'return', 'conn', '.', 'execute', '(', 'delete', '(', 'table', ')', '.', 'where', '(', 'table', '.', 'c', '.', 'upload_id', '==', 'upload_id', ')', ')', '.', 'rowcount', 'if', 'upload_id', 'else', '0']
Remove all table records with the supplied upload_id :param conn: sql connection :param table: table to modify :param upload_id: target upload_id :return: number of records removed
['Remove', 'all', 'table', 'records', 'with', 'the', 'supplied', 'upload_id']
train
https://github.com/BD2KOnFHIR/i2b2model/blob/9d49bb53b0733dd83ab5b716014865e270a3c903/i2b2model/shared/i2b2core.py#L43-L51
2,675
pricingassistant/mrq
mrq/worker.py
Worker.refresh_queues
def refresh_queues(self, fatal=False): """ Updates the list of currently known queues and subqueues """ try: queues = [] prefixes = [q for q in self.config["queues"] if q.endswith("/")] known_subqueues = Queue.all_known(prefixes=prefixes) for q in self.config["queues"]: queues.append(Queue(q)) if q.endswith("/"): for subqueue in known_subqueues: if subqueue.startswith(q): queues.append(Queue(subqueue)) self.queues = queues except Exception as e: # pylint: disable=broad-except self.log.error("When refreshing subqueues: %s", e) if fatal: raise
python
def refresh_queues(self, fatal=False): """ Updates the list of currently known queues and subqueues """ try: queues = [] prefixes = [q for q in self.config["queues"] if q.endswith("/")] known_subqueues = Queue.all_known(prefixes=prefixes) for q in self.config["queues"]: queues.append(Queue(q)) if q.endswith("/"): for subqueue in known_subqueues: if subqueue.startswith(q): queues.append(Queue(subqueue)) self.queues = queues except Exception as e: # pylint: disable=broad-except self.log.error("When refreshing subqueues: %s", e) if fatal: raise
['def', 'refresh_queues', '(', 'self', ',', 'fatal', '=', 'False', ')', ':', 'try', ':', 'queues', '=', '[', ']', 'prefixes', '=', '[', 'q', 'for', 'q', 'in', 'self', '.', 'config', '[', '"queues"', ']', 'if', 'q', '.', 'endswith', '(', '"/"', ')', ']', 'known_subqueues', '=', 'Queue', '.', 'all_known', '(', 'prefixes', '=', 'prefixes', ')', 'for', 'q', 'in', 'self', '.', 'config', '[', '"queues"', ']', ':', 'queues', '.', 'append', '(', 'Queue', '(', 'q', ')', ')', 'if', 'q', '.', 'endswith', '(', '"/"', ')', ':', 'for', 'subqueue', 'in', 'known_subqueues', ':', 'if', 'subqueue', '.', 'startswith', '(', 'q', ')', ':', 'queues', '.', 'append', '(', 'Queue', '(', 'subqueue', ')', ')', 'self', '.', 'queues', '=', 'queues', 'except', 'Exception', 'as', 'e', ':', '# pylint: disable=broad-except', 'self', '.', 'log', '.', 'error', '(', '"When refreshing subqueues: %s"', ',', 'e', ')', 'if', 'fatal', ':', 'raise']
Updates the list of currently known queues and subqueues
['Updates', 'the', 'list', 'of', 'currently', 'known', 'queues', 'and', 'subqueues']
train
https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/worker.py#L190-L210
2,676
learningequality/ricecooker
ricecooker/utils/metadata_provider.py
CsvMetadataProvider.get_thumbnail_paths
def get_thumbnail_paths(self): """ Helper function used to avoid processing thumbnail files during `os.walk`. """ thumbnail_path_tuples = [] # channel thumbnail channel_info = self.get_channel_info() chthumbnail_path = channel_info.get('thumbnail_chan_path', None) if chthumbnail_path: chthumbnail_path_tuple = path_to_tuple(chthumbnail_path, windows=self.winpaths) thumbnail_path_tuples.append(chthumbnail_path_tuple) # content thumbnails for content_file_path_tuple, row in self.contentcache.items(): thumbnail_path = row.get('thumbnail_chan_path', None) if thumbnail_path: thumbnail_path_tuple = path_to_tuple(thumbnail_path, windows=self.winpaths) thumbnail_path_tuples.append(thumbnail_path_tuple) return thumbnail_path_tuples
python
def get_thumbnail_paths(self): """ Helper function used to avoid processing thumbnail files during `os.walk`. """ thumbnail_path_tuples = [] # channel thumbnail channel_info = self.get_channel_info() chthumbnail_path = channel_info.get('thumbnail_chan_path', None) if chthumbnail_path: chthumbnail_path_tuple = path_to_tuple(chthumbnail_path, windows=self.winpaths) thumbnail_path_tuples.append(chthumbnail_path_tuple) # content thumbnails for content_file_path_tuple, row in self.contentcache.items(): thumbnail_path = row.get('thumbnail_chan_path', None) if thumbnail_path: thumbnail_path_tuple = path_to_tuple(thumbnail_path, windows=self.winpaths) thumbnail_path_tuples.append(thumbnail_path_tuple) return thumbnail_path_tuples
['def', 'get_thumbnail_paths', '(', 'self', ')', ':', 'thumbnail_path_tuples', '=', '[', ']', '# channel thumbnail', 'channel_info', '=', 'self', '.', 'get_channel_info', '(', ')', 'chthumbnail_path', '=', 'channel_info', '.', 'get', '(', "'thumbnail_chan_path'", ',', 'None', ')', 'if', 'chthumbnail_path', ':', 'chthumbnail_path_tuple', '=', 'path_to_tuple', '(', 'chthumbnail_path', ',', 'windows', '=', 'self', '.', 'winpaths', ')', 'thumbnail_path_tuples', '.', 'append', '(', 'chthumbnail_path_tuple', ')', '# content thumbnails', 'for', 'content_file_path_tuple', ',', 'row', 'in', 'self', '.', 'contentcache', '.', 'items', '(', ')', ':', 'thumbnail_path', '=', 'row', '.', 'get', '(', "'thumbnail_chan_path'", ',', 'None', ')', 'if', 'thumbnail_path', ':', 'thumbnail_path_tuple', '=', 'path_to_tuple', '(', 'thumbnail_path', ',', 'windows', '=', 'self', '.', 'winpaths', ')', 'thumbnail_path_tuples', '.', 'append', '(', 'thumbnail_path_tuple', ')', 'return', 'thumbnail_path_tuples']
Helper function used to avoid processing thumbnail files during `os.walk`.
['Helper', 'function', 'used', 'to', 'avoid', 'processing', 'thumbnail', 'files', 'during', 'os', '.', 'walk', '.']
train
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/metadata_provider.py#L270-L287
2,677
joanvila/aioredlock
aioredlock/redis.py
Instance.set_lock
async def set_lock(self, resource, lock_identifier, lock_timeout): """ Lock this instance and set lock expiration time to lock_timeout :param resource: redis key to set :param lock_identifier: uniquie id of lock :param lock_timeout: timeout for lock in seconds :raises: LockError if lock is not acquired """ lock_timeout_ms = int(lock_timeout * 1000) try: with await self.connect() as redis: await redis.eval( self.set_lock_script, keys=[resource], args=[lock_identifier, lock_timeout_ms] ) except aioredis.errors.ReplyError as exc: # script fault self.log.debug('Can not set lock "%s" on %s', resource, repr(self)) raise LockError('Can not set lock') from exc except (aioredis.errors.RedisError, OSError) as exc: self.log.error('Can not set lock "%s" on %s: %s', resource, repr(self), repr(exc)) raise LockError('Can not set lock') from exc except asyncio.CancelledError: self.log.debug('Lock "%s" is cancelled on %s', resource, repr(self)) raise except Exception as exc: self.log.exception('Can not set lock "%s" on %s', resource, repr(self)) raise else: self.log.debug('Lock "%s" is set on %s', resource, repr(self))
python
async def set_lock(self, resource, lock_identifier, lock_timeout): """ Lock this instance and set lock expiration time to lock_timeout :param resource: redis key to set :param lock_identifier: uniquie id of lock :param lock_timeout: timeout for lock in seconds :raises: LockError if lock is not acquired """ lock_timeout_ms = int(lock_timeout * 1000) try: with await self.connect() as redis: await redis.eval( self.set_lock_script, keys=[resource], args=[lock_identifier, lock_timeout_ms] ) except aioredis.errors.ReplyError as exc: # script fault self.log.debug('Can not set lock "%s" on %s', resource, repr(self)) raise LockError('Can not set lock') from exc except (aioredis.errors.RedisError, OSError) as exc: self.log.error('Can not set lock "%s" on %s: %s', resource, repr(self), repr(exc)) raise LockError('Can not set lock') from exc except asyncio.CancelledError: self.log.debug('Lock "%s" is cancelled on %s', resource, repr(self)) raise except Exception as exc: self.log.exception('Can not set lock "%s" on %s', resource, repr(self)) raise else: self.log.debug('Lock "%s" is set on %s', resource, repr(self))
['async', 'def', 'set_lock', '(', 'self', ',', 'resource', ',', 'lock_identifier', ',', 'lock_timeout', ')', ':', 'lock_timeout_ms', '=', 'int', '(', 'lock_timeout', '*', '1000', ')', 'try', ':', 'with', 'await', 'self', '.', 'connect', '(', ')', 'as', 'redis', ':', 'await', 'redis', '.', 'eval', '(', 'self', '.', 'set_lock_script', ',', 'keys', '=', '[', 'resource', ']', ',', 'args', '=', '[', 'lock_identifier', ',', 'lock_timeout_ms', ']', ')', 'except', 'aioredis', '.', 'errors', '.', 'ReplyError', 'as', 'exc', ':', '# script fault', 'self', '.', 'log', '.', 'debug', '(', '\'Can not set lock "%s" on %s\'', ',', 'resource', ',', 'repr', '(', 'self', ')', ')', 'raise', 'LockError', '(', "'Can not set lock'", ')', 'from', 'exc', 'except', '(', 'aioredis', '.', 'errors', '.', 'RedisError', ',', 'OSError', ')', 'as', 'exc', ':', 'self', '.', 'log', '.', 'error', '(', '\'Can not set lock "%s" on %s: %s\'', ',', 'resource', ',', 'repr', '(', 'self', ')', ',', 'repr', '(', 'exc', ')', ')', 'raise', 'LockError', '(', "'Can not set lock'", ')', 'from', 'exc', 'except', 'asyncio', '.', 'CancelledError', ':', 'self', '.', 'log', '.', 'debug', '(', '\'Lock "%s" is cancelled on %s\'', ',', 'resource', ',', 'repr', '(', 'self', ')', ')', 'raise', 'except', 'Exception', 'as', 'exc', ':', 'self', '.', 'log', '.', 'exception', '(', '\'Can not set lock "%s" on %s\'', ',', 'resource', ',', 'repr', '(', 'self', ')', ')', 'raise', 'else', ':', 'self', '.', 'log', '.', 'debug', '(', '\'Lock "%s" is set on %s\'', ',', 'resource', ',', 'repr', '(', 'self', ')', ')']
Lock this instance and set lock expiration time to lock_timeout :param resource: redis key to set :param lock_identifier: uniquie id of lock :param lock_timeout: timeout for lock in seconds :raises: LockError if lock is not acquired
['Lock', 'this', 'instance', 'and', 'set', 'lock', 'expiration', 'time', 'to', 'lock_timeout', ':', 'param', 'resource', ':', 'redis', 'key', 'to', 'set', ':', 'param', 'lock_identifier', ':', 'uniquie', 'id', 'of', 'lock', ':', 'param', 'lock_timeout', ':', 'timeout', 'for', 'lock', 'in', 'seconds', ':', 'raises', ':', 'LockError', 'if', 'lock', 'is', 'not', 'acquired']
train
https://github.com/joanvila/aioredlock/blob/6c62f0895c93b26b87ca8e3fe36bc024c81be421/aioredlock/redis.py#L127-L162
2,678
Autodesk/aomi
aomi/helpers.py
load_word_file
def load_word_file(filename): """Loads a words file as a list of lines""" words_file = resource_filename(__name__, "words/%s" % filename) handle = open(words_file, 'r') words = handle.readlines() handle.close() return words
python
def load_word_file(filename): """Loads a words file as a list of lines""" words_file = resource_filename(__name__, "words/%s" % filename) handle = open(words_file, 'r') words = handle.readlines() handle.close() return words
['def', 'load_word_file', '(', 'filename', ')', ':', 'words_file', '=', 'resource_filename', '(', '__name__', ',', '"words/%s"', '%', 'filename', ')', 'handle', '=', 'open', '(', 'words_file', ',', "'r'", ')', 'words', '=', 'handle', '.', 'readlines', '(', ')', 'handle', '.', 'close', '(', ')', 'return', 'words']
Loads a words file as a list of lines
['Loads', 'a', 'words', 'file', 'as', 'a', 'list', 'of', 'lines']
train
https://github.com/Autodesk/aomi/blob/84da2dfb0424837adf9c4ddc1aa352e942bb7a4a/aomi/helpers.py#L165-L171
2,679
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
Motif.pwm_scan
def pwm_scan(self, fa, cutoff=0.9, nreport=50, scan_rc=True): """Scan sequences with this motif. Scan sequences from a FASTA object with this motif. Less efficient than using a Scanner object. By setting the cutoff to 0.0 and nreport to 1, the best match for every sequence will be returned. Only the position of the matches is returned. Parameters ---------- fa : Fasta object Fasta object to scan. cutoff : float , optional Cutoff to use for motif scanning. This cutoff is not specifically optimized and the strictness will vary a lot with motif lengh. nreport : int , optional Maximum number of matches to report. scan_rc : bool , optional Scan the reverse complement. True by default. Returns ------- matches : dict Dictionary with motif matches. Only the position of the matches is returned. """ c = self.pwm_min_score() + ( self.pwm_max_score() - self.pwm_min_score()) * cutoff pwm = self.pwm matches = {} for name, seq in fa.items(): matches[name] = [] result = pfmscan(seq.upper(), pwm, c, nreport, scan_rc) for _,pos,_ in result: matches[name].append(pos) return matches
python
def pwm_scan(self, fa, cutoff=0.9, nreport=50, scan_rc=True): """Scan sequences with this motif. Scan sequences from a FASTA object with this motif. Less efficient than using a Scanner object. By setting the cutoff to 0.0 and nreport to 1, the best match for every sequence will be returned. Only the position of the matches is returned. Parameters ---------- fa : Fasta object Fasta object to scan. cutoff : float , optional Cutoff to use for motif scanning. This cutoff is not specifically optimized and the strictness will vary a lot with motif lengh. nreport : int , optional Maximum number of matches to report. scan_rc : bool , optional Scan the reverse complement. True by default. Returns ------- matches : dict Dictionary with motif matches. Only the position of the matches is returned. """ c = self.pwm_min_score() + ( self.pwm_max_score() - self.pwm_min_score()) * cutoff pwm = self.pwm matches = {} for name, seq in fa.items(): matches[name] = [] result = pfmscan(seq.upper(), pwm, c, nreport, scan_rc) for _,pos,_ in result: matches[name].append(pos) return matches
['def', 'pwm_scan', '(', 'self', ',', 'fa', ',', 'cutoff', '=', '0.9', ',', 'nreport', '=', '50', ',', 'scan_rc', '=', 'True', ')', ':', 'c', '=', 'self', '.', 'pwm_min_score', '(', ')', '+', '(', 'self', '.', 'pwm_max_score', '(', ')', '-', 'self', '.', 'pwm_min_score', '(', ')', ')', '*', 'cutoff', 'pwm', '=', 'self', '.', 'pwm', 'matches', '=', '{', '}', 'for', 'name', ',', 'seq', 'in', 'fa', '.', 'items', '(', ')', ':', 'matches', '[', 'name', ']', '=', '[', ']', 'result', '=', 'pfmscan', '(', 'seq', '.', 'upper', '(', ')', ',', 'pwm', ',', 'c', ',', 'nreport', ',', 'scan_rc', ')', 'for', '_', ',', 'pos', ',', '_', 'in', 'result', ':', 'matches', '[', 'name', ']', '.', 'append', '(', 'pos', ')', 'return', 'matches']
Scan sequences with this motif. Scan sequences from a FASTA object with this motif. Less efficient than using a Scanner object. By setting the cutoff to 0.0 and nreport to 1, the best match for every sequence will be returned. Only the position of the matches is returned. Parameters ---------- fa : Fasta object Fasta object to scan. cutoff : float , optional Cutoff to use for motif scanning. This cutoff is not specifically optimized and the strictness will vary a lot with motif lengh. nreport : int , optional Maximum number of matches to report. scan_rc : bool , optional Scan the reverse complement. True by default. Returns ------- matches : dict Dictionary with motif matches. Only the position of the matches is returned.
['Scan', 'sequences', 'with', 'this', 'motif', '.']
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L408-L443
2,680
ContextLab/quail
quail/distance.py
correlation
def correlation(a, b): "Returns correlation distance between a and b" if isinstance(a, list): a = np.array(a) if isinstance(b, list): b = np.array(b) a = a.reshape(1, -1) b = b.reshape(1, -1) return cdist(a, b, 'correlation')
python
def correlation(a, b): "Returns correlation distance between a and b" if isinstance(a, list): a = np.array(a) if isinstance(b, list): b = np.array(b) a = a.reshape(1, -1) b = b.reshape(1, -1) return cdist(a, b, 'correlation')
['def', 'correlation', '(', 'a', ',', 'b', ')', ':', 'if', 'isinstance', '(', 'a', ',', 'list', ')', ':', 'a', '=', 'np', '.', 'array', '(', 'a', ')', 'if', 'isinstance', '(', 'b', ',', 'list', ')', ':', 'b', '=', 'np', '.', 'array', '(', 'b', ')', 'a', '=', 'a', '.', 'reshape', '(', '1', ',', '-', '1', ')', 'b', '=', 'b', '.', 'reshape', '(', '1', ',', '-', '1', ')', 'return', 'cdist', '(', 'a', ',', 'b', ',', "'correlation'", ')']
Returns correlation distance between a and b
['Returns', 'correlation', 'distance', 'between', 'a', 'and', 'b']
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/distance.py#L9-L17
2,681
Azure/azure-cli-extensions
src/sqlvm-preview/azext_sqlvm_preview/_format.py
format_additional_features_server_configurations
def format_additional_features_server_configurations(result): ''' Formats the AdditionalFeaturesServerConfigurations object removing arguments that are empty ''' from collections import OrderedDict # Only display parameters that have content order_dict = OrderedDict() if result.is_rservices_enabled is not None: order_dict['isRServicesEnabled'] = result.is_rservices_enabled if result.backup_permissions_for_azure_backup_svc is not None: order_dict['backupPermissionsForAzureBackupSvc'] = result.backup_permissions_for_azure_backup_svc return order_dict
python
def format_additional_features_server_configurations(result): ''' Formats the AdditionalFeaturesServerConfigurations object removing arguments that are empty ''' from collections import OrderedDict # Only display parameters that have content order_dict = OrderedDict() if result.is_rservices_enabled is not None: order_dict['isRServicesEnabled'] = result.is_rservices_enabled if result.backup_permissions_for_azure_backup_svc is not None: order_dict['backupPermissionsForAzureBackupSvc'] = result.backup_permissions_for_azure_backup_svc return order_dict
['def', 'format_additional_features_server_configurations', '(', 'result', ')', ':', 'from', 'collections', 'import', 'OrderedDict', '# Only display parameters that have content', 'order_dict', '=', 'OrderedDict', '(', ')', 'if', 'result', '.', 'is_rservices_enabled', 'is', 'not', 'None', ':', 'order_dict', '[', "'isRServicesEnabled'", ']', '=', 'result', '.', 'is_rservices_enabled', 'if', 'result', '.', 'backup_permissions_for_azure_backup_svc', 'is', 'not', 'None', ':', 'order_dict', '[', "'backupPermissionsForAzureBackupSvc'", ']', '=', 'result', '.', 'backup_permissions_for_azure_backup_svc', 'return', 'order_dict']
Formats the AdditionalFeaturesServerConfigurations object removing arguments that are empty
['Formats', 'the', 'AdditionalFeaturesServerConfigurations', 'object', 'removing', 'arguments', 'that', 'are', 'empty']
train
https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/sqlvm-preview/azext_sqlvm_preview/_format.py#L135-L147
2,682
schapman1974/tinymongo
setup.py
parse_md_to_rst
def parse_md_to_rst(file): """Read Markdown file and convert to ReStructured Text.""" try: from m2r import parse_from_file return parse_from_file(file).replace( "artwork/", "http://198.27.119.65/" ) except ImportError: # m2r may not be installed in user environment return read(file)
python
def parse_md_to_rst(file): """Read Markdown file and convert to ReStructured Text.""" try: from m2r import parse_from_file return parse_from_file(file).replace( "artwork/", "http://198.27.119.65/" ) except ImportError: # m2r may not be installed in user environment return read(file)
['def', 'parse_md_to_rst', '(', 'file', ')', ':', 'try', ':', 'from', 'm2r', 'import', 'parse_from_file', 'return', 'parse_from_file', '(', 'file', ')', '.', 'replace', '(', '"artwork/"', ',', '"http://198.27.119.65/"', ')', 'except', 'ImportError', ':', '# m2r may not be installed in user environment', 'return', 'read', '(', 'file', ')']
Read Markdown file and convert to ReStructured Text.
['Read', 'Markdown', 'file', 'and', 'convert', 'to', 'ReStructured', 'Text', '.']
train
https://github.com/schapman1974/tinymongo/blob/993048059dc0aa789d879b69feb79a0f237a60b3/setup.py#L14-L23
2,683
oasis-open/cti-stix-validator
stix2validator/v21/shoulds.py
relationships_strict
def relationships_strict(instance): """Ensure that only the relationship types defined in the specification are used. """ # Don't check objects that aren't relationships or that are custom objects if (instance['type'] != 'relationship' or instance['type'] not in enums.TYPES): return if ('relationship_type' not in instance or 'source_ref' not in instance or 'target_ref' not in instance): # Since these fields are required, schemas will already catch the error return r_type = instance['relationship_type'] try: r_source = re.search(r"(.+)\-\-", instance['source_ref']).group(1) r_target = re.search(r"(.+)\-\-", instance['target_ref']).group(1) except (AttributeError, TypeError): # Schemas already catch errors of these properties not being strings or # not containing the string '--'. return if (r_type in enums.COMMON_RELATIONSHIPS or r_source in enums.NON_SDOS or r_target in enums.NON_SDOS): # If all objects can have this relationship type, no more checks needed # Schemas already catch if source/target type cannot have relationship return if r_source not in enums.RELATIONSHIPS: return JSONError("'%s' is not a suggested relationship source object " "for the '%s' relationship." % (r_source, r_type), instance['id'], 'relationship-types') if r_type not in enums.RELATIONSHIPS[r_source]: return JSONError("'%s' is not a suggested relationship type for '%s' " "objects." % (r_type, r_source), instance['id'], 'relationship-types') if r_target not in enums.RELATIONSHIPS[r_source][r_type]: return JSONError("'%s' is not a suggested relationship target object " "for '%s' objects with the '%s' relationship." % (r_target, r_source, r_type), instance['id'], 'relationship-types')
python
def relationships_strict(instance): """Ensure that only the relationship types defined in the specification are used. """ # Don't check objects that aren't relationships or that are custom objects if (instance['type'] != 'relationship' or instance['type'] not in enums.TYPES): return if ('relationship_type' not in instance or 'source_ref' not in instance or 'target_ref' not in instance): # Since these fields are required, schemas will already catch the error return r_type = instance['relationship_type'] try: r_source = re.search(r"(.+)\-\-", instance['source_ref']).group(1) r_target = re.search(r"(.+)\-\-", instance['target_ref']).group(1) except (AttributeError, TypeError): # Schemas already catch errors of these properties not being strings or # not containing the string '--'. return if (r_type in enums.COMMON_RELATIONSHIPS or r_source in enums.NON_SDOS or r_target in enums.NON_SDOS): # If all objects can have this relationship type, no more checks needed # Schemas already catch if source/target type cannot have relationship return if r_source not in enums.RELATIONSHIPS: return JSONError("'%s' is not a suggested relationship source object " "for the '%s' relationship." % (r_source, r_type), instance['id'], 'relationship-types') if r_type not in enums.RELATIONSHIPS[r_source]: return JSONError("'%s' is not a suggested relationship type for '%s' " "objects." % (r_type, r_source), instance['id'], 'relationship-types') if r_target not in enums.RELATIONSHIPS[r_source][r_type]: return JSONError("'%s' is not a suggested relationship target object " "for '%s' objects with the '%s' relationship." % (r_target, r_source, r_type), instance['id'], 'relationship-types')
['def', 'relationships_strict', '(', 'instance', ')', ':', "# Don't check objects that aren't relationships or that are custom objects", 'if', '(', 'instance', '[', "'type'", ']', '!=', "'relationship'", 'or', 'instance', '[', "'type'", ']', 'not', 'in', 'enums', '.', 'TYPES', ')', ':', 'return', 'if', '(', "'relationship_type'", 'not', 'in', 'instance', 'or', "'source_ref'", 'not', 'in', 'instance', 'or', "'target_ref'", 'not', 'in', 'instance', ')', ':', '# Since these fields are required, schemas will already catch the error', 'return', 'r_type', '=', 'instance', '[', "'relationship_type'", ']', 'try', ':', 'r_source', '=', 're', '.', 'search', '(', 'r"(.+)\\-\\-"', ',', 'instance', '[', "'source_ref'", ']', ')', '.', 'group', '(', '1', ')', 'r_target', '=', 're', '.', 'search', '(', 'r"(.+)\\-\\-"', ',', 'instance', '[', "'target_ref'", ']', ')', '.', 'group', '(', '1', ')', 'except', '(', 'AttributeError', ',', 'TypeError', ')', ':', '# Schemas already catch errors of these properties not being strings or', "# not containing the string '--'.", 'return', 'if', '(', 'r_type', 'in', 'enums', '.', 'COMMON_RELATIONSHIPS', 'or', 'r_source', 'in', 'enums', '.', 'NON_SDOS', 'or', 'r_target', 'in', 'enums', '.', 'NON_SDOS', ')', ':', '# If all objects can have this relationship type, no more checks needed', '# Schemas already catch if source/target type cannot have relationship', 'return', 'if', 'r_source', 'not', 'in', 'enums', '.', 'RELATIONSHIPS', ':', 'return', 'JSONError', '(', '"\'%s\' is not a suggested relationship source object "', '"for the \'%s\' relationship."', '%', '(', 'r_source', ',', 'r_type', ')', ',', 'instance', '[', "'id'", ']', ',', "'relationship-types'", ')', 'if', 'r_type', 'not', 'in', 'enums', '.', 'RELATIONSHIPS', '[', 'r_source', ']', ':', 'return', 'JSONError', '(', '"\'%s\' is not a suggested relationship type for \'%s\' "', '"objects."', '%', '(', 'r_type', ',', 'r_source', ')', ',', 'instance', '[', "'id'", ']', ',', "'relationship-types'", ')', 'if', 'r_target', 'not', 'in', 'enums', '.', 'RELATIONSHIPS', '[', 'r_source', ']', '[', 'r_type', ']', ':', 'return', 'JSONError', '(', '"\'%s\' is not a suggested relationship target object "', '"for \'%s\' objects with the \'%s\' relationship."', '%', '(', 'r_target', ',', 'r_source', ',', 'r_type', ')', ',', 'instance', '[', "'id'", ']', ',', "'relationship-types'", ')']
Ensure that only the relationship types defined in the specification are used.
['Ensure', 'that', 'only', 'the', 'relationship', 'types', 'defined', 'in', 'the', 'specification', 'are', 'used', '.']
train
https://github.com/oasis-open/cti-stix-validator/blob/a607014e3fa500a7678f8b61b278456ca581f9d0/stix2validator/v21/shoulds.py#L271-L315
2,684
IrvKalb/pygwidgets
pygwidgets/pygwidgets.py
Dragger.handleEvent
def handleEvent(self, eventObj): """This method should be called every time through the main loop. It handles all of the dragging Parameters: | eventObj - the event object obtained by calling pygame.event.get() Returns: | False most of the time | True when the user finishes dragging by lifting up on the mouse. """ if not self.isEnabled: return False if eventObj.type not in (MOUSEMOTION, MOUSEBUTTONUP, MOUSEBUTTONDOWN) : # The dragger only cares about mouse-related events return False clicked = False if eventObj.type == MOUSEBUTTONDOWN: if self.rect.collidepoint(eventObj.pos): self.dragging = True self.deltaX = eventObj.pos[0] - self.rect.left self.deltaY = eventObj.pos[1] - self.rect.top self.startDraggingX = self.rect.left self.startDraggingY = self.rect.top elif eventObj.type == MOUSEBUTTONUP: if self.dragging: self.dragging = False clicked = True self.mouseUpLoc = (eventObj.pos[0], eventObj.pos[1]) self.rect.left = eventObj.pos[0] - self.deltaX self.rect.top = eventObj.pos[1] - self.deltaY self.setLoc((self.rect.left, self.rect.top)) elif eventObj.type == MOUSEMOTION: if self.dragging: self.rect.left = eventObj.pos[0] - self.deltaX self.rect.top = eventObj.pos[1] - self.deltaY else: self.mouseOver = self.rect.collidepoint(eventObj.pos) if clicked: if self.callBack is not None: self.callBack(self.nickname) return clicked
python
def handleEvent(self, eventObj): """This method should be called every time through the main loop. It handles all of the dragging Parameters: | eventObj - the event object obtained by calling pygame.event.get() Returns: | False most of the time | True when the user finishes dragging by lifting up on the mouse. """ if not self.isEnabled: return False if eventObj.type not in (MOUSEMOTION, MOUSEBUTTONUP, MOUSEBUTTONDOWN) : # The dragger only cares about mouse-related events return False clicked = False if eventObj.type == MOUSEBUTTONDOWN: if self.rect.collidepoint(eventObj.pos): self.dragging = True self.deltaX = eventObj.pos[0] - self.rect.left self.deltaY = eventObj.pos[1] - self.rect.top self.startDraggingX = self.rect.left self.startDraggingY = self.rect.top elif eventObj.type == MOUSEBUTTONUP: if self.dragging: self.dragging = False clicked = True self.mouseUpLoc = (eventObj.pos[0], eventObj.pos[1]) self.rect.left = eventObj.pos[0] - self.deltaX self.rect.top = eventObj.pos[1] - self.deltaY self.setLoc((self.rect.left, self.rect.top)) elif eventObj.type == MOUSEMOTION: if self.dragging: self.rect.left = eventObj.pos[0] - self.deltaX self.rect.top = eventObj.pos[1] - self.deltaY else: self.mouseOver = self.rect.collidepoint(eventObj.pos) if clicked: if self.callBack is not None: self.callBack(self.nickname) return clicked
['def', 'handleEvent', '(', 'self', ',', 'eventObj', ')', ':', 'if', 'not', 'self', '.', 'isEnabled', ':', 'return', 'False', 'if', 'eventObj', '.', 'type', 'not', 'in', '(', 'MOUSEMOTION', ',', 'MOUSEBUTTONUP', ',', 'MOUSEBUTTONDOWN', ')', ':', '# The dragger only cares about mouse-related events\r', 'return', 'False', 'clicked', '=', 'False', 'if', 'eventObj', '.', 'type', '==', 'MOUSEBUTTONDOWN', ':', 'if', 'self', '.', 'rect', '.', 'collidepoint', '(', 'eventObj', '.', 'pos', ')', ':', 'self', '.', 'dragging', '=', 'True', 'self', '.', 'deltaX', '=', 'eventObj', '.', 'pos', '[', '0', ']', '-', 'self', '.', 'rect', '.', 'left', 'self', '.', 'deltaY', '=', 'eventObj', '.', 'pos', '[', '1', ']', '-', 'self', '.', 'rect', '.', 'top', 'self', '.', 'startDraggingX', '=', 'self', '.', 'rect', '.', 'left', 'self', '.', 'startDraggingY', '=', 'self', '.', 'rect', '.', 'top', 'elif', 'eventObj', '.', 'type', '==', 'MOUSEBUTTONUP', ':', 'if', 'self', '.', 'dragging', ':', 'self', '.', 'dragging', '=', 'False', 'clicked', '=', 'True', 'self', '.', 'mouseUpLoc', '=', '(', 'eventObj', '.', 'pos', '[', '0', ']', ',', 'eventObj', '.', 'pos', '[', '1', ']', ')', 'self', '.', 'rect', '.', 'left', '=', 'eventObj', '.', 'pos', '[', '0', ']', '-', 'self', '.', 'deltaX', 'self', '.', 'rect', '.', 'top', '=', 'eventObj', '.', 'pos', '[', '1', ']', '-', 'self', '.', 'deltaY', 'self', '.', 'setLoc', '(', '(', 'self', '.', 'rect', '.', 'left', ',', 'self', '.', 'rect', '.', 'top', ')', ')', 'elif', 'eventObj', '.', 'type', '==', 'MOUSEMOTION', ':', 'if', 'self', '.', 'dragging', ':', 'self', '.', 'rect', '.', 'left', '=', 'eventObj', '.', 'pos', '[', '0', ']', '-', 'self', '.', 'deltaX', 'self', '.', 'rect', '.', 'top', '=', 'eventObj', '.', 'pos', '[', '1', ']', '-', 'self', '.', 'deltaY', 'else', ':', 'self', '.', 'mouseOver', '=', 'self', '.', 'rect', '.', 'collidepoint', '(', 'eventObj', '.', 'pos', ')', 'if', 'clicked', ':', 'if', 'self', '.', 'callBack', 'is', 'not', 'None', ':', 'self', '.', 'callBack', '(', 'self', '.', 'nickname', ')', 'return', 'clicked']
This method should be called every time through the main loop. It handles all of the dragging Parameters: | eventObj - the event object obtained by calling pygame.event.get() Returns: | False most of the time | True when the user finishes dragging by lifting up on the mouse.
['This', 'method', 'should', 'be', 'called', 'every', 'time', 'through', 'the', 'main', 'loop', '.', 'It', 'handles', 'all', 'of', 'the', 'dragging', 'Parameters', ':', '|', 'eventObj', '-', 'the', 'event', 'object', 'obtained', 'by', 'calling', 'pygame', '.', 'event', '.', 'get', '()', 'Returns', ':', '|', 'False', 'most', 'of', 'the', 'time', '|', 'True', 'when', 'the', 'user', 'finishes', 'dragging', 'by', 'lifting', 'up', 'on', 'the', 'mouse', '.']
train
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L2068-L2118
2,685
gwastro/pycbc-glue
pycbc_glue/ligolw/dbtables.py
TimeSlideTable.get_time_slide_id
def get_time_slide_id(self, offsetdict, create_new = None, superset_ok = False, nonunique_ok = False): """ Return the time_slide_id corresponding to the offset vector described by offsetdict, a dictionary of instrument/offset pairs. If the optional create_new argument is None (the default), then the table must contain a matching offset vector. The return value is the ID of that vector. If the table does not contain a matching offset vector then KeyError is raised. If the optional create_new argument is set to a Process object (or any other object with a process_id attribute), then if the table does not contain a matching offset vector a new one will be added to the table and marked as having been created by the given process. The return value is the ID of the (possibly newly created) matching offset vector. If the optional superset_ok argument is False (the default) then an offset vector in the table is considered to "match" the requested offset vector only if they contain the exact same set of instruments. If the superset_ok argument is True, then an offset vector in the table is considered to match the requested offset vector as long as it provides the same offsets for the same instruments as the requested vector, even if it provides offsets for other instruments as well. More than one offset vector in the table might match the requested vector. If the optional nonunique_ok argument is False (the default), then KeyError will be raised if more than one offset vector in the table is found to match the requested vector. If the optional nonunique_ok is True then the return value is the ID of one of the matching offset vectors selected at random. """ # look for matching offset vectors if superset_ok: ids = [id for id, slide in self.as_dict().items() if offsetdict == dict((instrument, offset) for instrument, offset in slide.items() if instrument in offsetdict)] else: ids = [id for id, slide in self.as_dict().items() if offsetdict == slide] if len(ids) > 1: # found more than one if nonunique_ok: # and that's OK return ids[0] # and that's not OK raise KeyError(offsetdict) if len(ids) == 1: # found one return ids[0] # offset vector not found in table if create_new is None: # and that's not OK raise KeyError(offsetdict) # that's OK, create new vector id = self.get_next_id() for instrument, offset in offsetdict.items(): row = self.RowType() row.process_id = create_new.process_id row.time_slide_id = id row.instrument = instrument row.offset = offset self.append(row) # return new ID return id
python
def get_time_slide_id(self, offsetdict, create_new = None, superset_ok = False, nonunique_ok = False): """ Return the time_slide_id corresponding to the offset vector described by offsetdict, a dictionary of instrument/offset pairs. If the optional create_new argument is None (the default), then the table must contain a matching offset vector. The return value is the ID of that vector. If the table does not contain a matching offset vector then KeyError is raised. If the optional create_new argument is set to a Process object (or any other object with a process_id attribute), then if the table does not contain a matching offset vector a new one will be added to the table and marked as having been created by the given process. The return value is the ID of the (possibly newly created) matching offset vector. If the optional superset_ok argument is False (the default) then an offset vector in the table is considered to "match" the requested offset vector only if they contain the exact same set of instruments. If the superset_ok argument is True, then an offset vector in the table is considered to match the requested offset vector as long as it provides the same offsets for the same instruments as the requested vector, even if it provides offsets for other instruments as well. More than one offset vector in the table might match the requested vector. If the optional nonunique_ok argument is False (the default), then KeyError will be raised if more than one offset vector in the table is found to match the requested vector. If the optional nonunique_ok is True then the return value is the ID of one of the matching offset vectors selected at random. """ # look for matching offset vectors if superset_ok: ids = [id for id, slide in self.as_dict().items() if offsetdict == dict((instrument, offset) for instrument, offset in slide.items() if instrument in offsetdict)] else: ids = [id for id, slide in self.as_dict().items() if offsetdict == slide] if len(ids) > 1: # found more than one if nonunique_ok: # and that's OK return ids[0] # and that's not OK raise KeyError(offsetdict) if len(ids) == 1: # found one return ids[0] # offset vector not found in table if create_new is None: # and that's not OK raise KeyError(offsetdict) # that's OK, create new vector id = self.get_next_id() for instrument, offset in offsetdict.items(): row = self.RowType() row.process_id = create_new.process_id row.time_slide_id = id row.instrument = instrument row.offset = offset self.append(row) # return new ID return id
['def', 'get_time_slide_id', '(', 'self', ',', 'offsetdict', ',', 'create_new', '=', 'None', ',', 'superset_ok', '=', 'False', ',', 'nonunique_ok', '=', 'False', ')', ':', '# look for matching offset vectors', 'if', 'superset_ok', ':', 'ids', '=', '[', 'id', 'for', 'id', ',', 'slide', 'in', 'self', '.', 'as_dict', '(', ')', '.', 'items', '(', ')', 'if', 'offsetdict', '==', 'dict', '(', '(', 'instrument', ',', 'offset', ')', 'for', 'instrument', ',', 'offset', 'in', 'slide', '.', 'items', '(', ')', 'if', 'instrument', 'in', 'offsetdict', ')', ']', 'else', ':', 'ids', '=', '[', 'id', 'for', 'id', ',', 'slide', 'in', 'self', '.', 'as_dict', '(', ')', '.', 'items', '(', ')', 'if', 'offsetdict', '==', 'slide', ']', 'if', 'len', '(', 'ids', ')', '>', '1', ':', '# found more than one', 'if', 'nonunique_ok', ':', "# and that's OK", 'return', 'ids', '[', '0', ']', "# and that's not OK", 'raise', 'KeyError', '(', 'offsetdict', ')', 'if', 'len', '(', 'ids', ')', '==', '1', ':', '# found one', 'return', 'ids', '[', '0', ']', '# offset vector not found in table', 'if', 'create_new', 'is', 'None', ':', "# and that's not OK", 'raise', 'KeyError', '(', 'offsetdict', ')', "# that's OK, create new vector", 'id', '=', 'self', '.', 'get_next_id', '(', ')', 'for', 'instrument', ',', 'offset', 'in', 'offsetdict', '.', 'items', '(', ')', ':', 'row', '=', 'self', '.', 'RowType', '(', ')', 'row', '.', 'process_id', '=', 'create_new', '.', 'process_id', 'row', '.', 'time_slide_id', '=', 'id', 'row', '.', 'instrument', '=', 'instrument', 'row', '.', 'offset', '=', 'offset', 'self', '.', 'append', '(', 'row', ')', '# return new ID', 'return', 'id']
Return the time_slide_id corresponding to the offset vector described by offsetdict, a dictionary of instrument/offset pairs. If the optional create_new argument is None (the default), then the table must contain a matching offset vector. The return value is the ID of that vector. If the table does not contain a matching offset vector then KeyError is raised. If the optional create_new argument is set to a Process object (or any other object with a process_id attribute), then if the table does not contain a matching offset vector a new one will be added to the table and marked as having been created by the given process. The return value is the ID of the (possibly newly created) matching offset vector. If the optional superset_ok argument is False (the default) then an offset vector in the table is considered to "match" the requested offset vector only if they contain the exact same set of instruments. If the superset_ok argument is True, then an offset vector in the table is considered to match the requested offset vector as long as it provides the same offsets for the same instruments as the requested vector, even if it provides offsets for other instruments as well. More than one offset vector in the table might match the requested vector. If the optional nonunique_ok argument is False (the default), then KeyError will be raised if more than one offset vector in the table is found to match the requested vector. If the optional nonunique_ok is True then the return value is the ID of one of the matching offset vectors selected at random.
['Return', 'the', 'time_slide_id', 'corresponding', 'to', 'the', 'offset', 'vector', 'described', 'by', 'offsetdict', 'a', 'dictionary', 'of', 'instrument', '/', 'offset', 'pairs', '.']
train
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/ligolw/dbtables.py#L875-L942
2,686
phaethon/kamene
kamene/layers/ipsec.py
CryptAlgo.decrypt
def decrypt(self, esp, key, icv_size=None): """ Decrypt an ESP packet @param esp: an encrypted ESP packet @param key: the secret key used for encryption @param icv_size: the length of the icv used for integrity check @return: a valid ESP packet encrypted with this algorithm @raise IPSecIntegrityError: if the integrity check fails with an AEAD algorithm """ if icv_size is None: icv_size = self.icv_size if self.is_aead else 0 iv = esp.data[:self.iv_size] data = esp.data[self.iv_size:len(esp.data) - icv_size] icv = esp.data[len(esp.data) - icv_size:] if self.cipher: cipher = self.new_cipher(key, iv, icv) decryptor = cipher.decryptor() if self.is_aead: # Tag value check is done during the finalize method decryptor.authenticate_additional_data( struct.pack('!LL', esp.spi, esp.seq) ) try: data = decryptor.update(data) + decryptor.finalize() except InvalidTag as err: raise IPSecIntegrityError(err) # extract padlen and nh padlen = (data[-2]) nh = data[-1] # then use padlen to determine data and padding data = data[:len(data) - padlen - 2] padding = data[len(data) - padlen - 2: len(data) - 2] return _ESPPlain(spi=esp.spi, seq=esp.seq, iv=iv, data=data, padding=padding, padlen=padlen, nh=nh, icv=icv)
python
def decrypt(self, esp, key, icv_size=None): """ Decrypt an ESP packet @param esp: an encrypted ESP packet @param key: the secret key used for encryption @param icv_size: the length of the icv used for integrity check @return: a valid ESP packet encrypted with this algorithm @raise IPSecIntegrityError: if the integrity check fails with an AEAD algorithm """ if icv_size is None: icv_size = self.icv_size if self.is_aead else 0 iv = esp.data[:self.iv_size] data = esp.data[self.iv_size:len(esp.data) - icv_size] icv = esp.data[len(esp.data) - icv_size:] if self.cipher: cipher = self.new_cipher(key, iv, icv) decryptor = cipher.decryptor() if self.is_aead: # Tag value check is done during the finalize method decryptor.authenticate_additional_data( struct.pack('!LL', esp.spi, esp.seq) ) try: data = decryptor.update(data) + decryptor.finalize() except InvalidTag as err: raise IPSecIntegrityError(err) # extract padlen and nh padlen = (data[-2]) nh = data[-1] # then use padlen to determine data and padding data = data[:len(data) - padlen - 2] padding = data[len(data) - padlen - 2: len(data) - 2] return _ESPPlain(spi=esp.spi, seq=esp.seq, iv=iv, data=data, padding=padding, padlen=padlen, nh=nh, icv=icv)
['def', 'decrypt', '(', 'self', ',', 'esp', ',', 'key', ',', 'icv_size', '=', 'None', ')', ':', 'if', 'icv_size', 'is', 'None', ':', 'icv_size', '=', 'self', '.', 'icv_size', 'if', 'self', '.', 'is_aead', 'else', '0', 'iv', '=', 'esp', '.', 'data', '[', ':', 'self', '.', 'iv_size', ']', 'data', '=', 'esp', '.', 'data', '[', 'self', '.', 'iv_size', ':', 'len', '(', 'esp', '.', 'data', ')', '-', 'icv_size', ']', 'icv', '=', 'esp', '.', 'data', '[', 'len', '(', 'esp', '.', 'data', ')', '-', 'icv_size', ':', ']', 'if', 'self', '.', 'cipher', ':', 'cipher', '=', 'self', '.', 'new_cipher', '(', 'key', ',', 'iv', ',', 'icv', ')', 'decryptor', '=', 'cipher', '.', 'decryptor', '(', ')', 'if', 'self', '.', 'is_aead', ':', '# Tag value check is done during the finalize method', 'decryptor', '.', 'authenticate_additional_data', '(', 'struct', '.', 'pack', '(', "'!LL'", ',', 'esp', '.', 'spi', ',', 'esp', '.', 'seq', ')', ')', 'try', ':', 'data', '=', 'decryptor', '.', 'update', '(', 'data', ')', '+', 'decryptor', '.', 'finalize', '(', ')', 'except', 'InvalidTag', 'as', 'err', ':', 'raise', 'IPSecIntegrityError', '(', 'err', ')', '# extract padlen and nh', 'padlen', '=', '(', 'data', '[', '-', '2', ']', ')', 'nh', '=', 'data', '[', '-', '1', ']', '# then use padlen to determine data and padding', 'data', '=', 'data', '[', ':', 'len', '(', 'data', ')', '-', 'padlen', '-', '2', ']', 'padding', '=', 'data', '[', 'len', '(', 'data', ')', '-', 'padlen', '-', '2', ':', 'len', '(', 'data', ')', '-', '2', ']', 'return', '_ESPPlain', '(', 'spi', '=', 'esp', '.', 'spi', ',', 'seq', '=', 'esp', '.', 'seq', ',', 'iv', '=', 'iv', ',', 'data', '=', 'data', ',', 'padding', '=', 'padding', ',', 'padlen', '=', 'padlen', ',', 'nh', '=', 'nh', ',', 'icv', '=', 'icv', ')']
Decrypt an ESP packet @param esp: an encrypted ESP packet @param key: the secret key used for encryption @param icv_size: the length of the icv used for integrity check @return: a valid ESP packet encrypted with this algorithm @raise IPSecIntegrityError: if the integrity check fails with an AEAD algorithm
['Decrypt', 'an', 'ESP', 'packet']
train
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/layers/ipsec.py#L338-L387
2,687
arcturial/clickatell-python
clickatell/http/__init__.py
Http.request
def request(self, action, data={}, headers={}, method='GET'): """ Append the user authentication details to every incoming request """ data = self.merge(data, {'user': self.username, 'password': self.password, 'api_id': self.apiId}) return Transport.request(self, action, data, headers, method)
python
def request(self, action, data={}, headers={}, method='GET'): """ Append the user authentication details to every incoming request """ data = self.merge(data, {'user': self.username, 'password': self.password, 'api_id': self.apiId}) return Transport.request(self, action, data, headers, method)
['def', 'request', '(', 'self', ',', 'action', ',', 'data', '=', '{', '}', ',', 'headers', '=', '{', '}', ',', 'method', '=', "'GET'", ')', ':', 'data', '=', 'self', '.', 'merge', '(', 'data', ',', '{', "'user'", ':', 'self', '.', 'username', ',', "'password'", ':', 'self', '.', 'password', ',', "'api_id'", ':', 'self', '.', 'apiId', '}', ')', 'return', 'Transport', '.', 'request', '(', 'self', ',', 'action', ',', 'data', ',', 'headers', ',', 'method', ')']
Append the user authentication details to every incoming request
['Append', 'the', 'user', 'authentication', 'details', 'to', 'every', 'incoming', 'request']
train
https://github.com/arcturial/clickatell-python/blob/4a554c28edaf2e5d0d9e81b4c9415241bfd61d00/clickatell/http/__init__.py#L24-L29
2,688
henzk/ape
ape/container_mode/validators/feature_order_validator.py
FeatureOrderValidator._check_position
def _check_position(self, feature, info): """ Takes the featur and the info dict and checks for the forced position :param feature: :param info: :return: """ pos = info.get('position') if pos is not None: feature_pos = self.get_feature_position(feature) if feature_pos is not None: if feature_pos != pos: message = '{feature} has a forced position on ({pos}) but is on position {feature_pos}.'.format( feature=feature, pos=pos, feature_pos=feature_pos ) self.violations.append((feature, message))
python
def _check_position(self, feature, info): """ Takes the featur and the info dict and checks for the forced position :param feature: :param info: :return: """ pos = info.get('position') if pos is not None: feature_pos = self.get_feature_position(feature) if feature_pos is not None: if feature_pos != pos: message = '{feature} has a forced position on ({pos}) but is on position {feature_pos}.'.format( feature=feature, pos=pos, feature_pos=feature_pos ) self.violations.append((feature, message))
['def', '_check_position', '(', 'self', ',', 'feature', ',', 'info', ')', ':', 'pos', '=', 'info', '.', 'get', '(', "'position'", ')', 'if', 'pos', 'is', 'not', 'None', ':', 'feature_pos', '=', 'self', '.', 'get_feature_position', '(', 'feature', ')', 'if', 'feature_pos', 'is', 'not', 'None', ':', 'if', 'feature_pos', '!=', 'pos', ':', 'message', '=', "'{feature} has a forced position on ({pos}) but is on position {feature_pos}.'", '.', 'format', '(', 'feature', '=', 'feature', ',', 'pos', '=', 'pos', ',', 'feature_pos', '=', 'feature_pos', ')', 'self', '.', 'violations', '.', 'append', '(', '(', 'feature', ',', 'message', ')', ')']
Takes the featur and the info dict and checks for the forced position :param feature: :param info: :return:
['Takes', 'the', 'featur', 'and', 'the', 'info', 'dict', 'and', 'checks', 'for', 'the', 'forced', 'position', ':', 'param', 'feature', ':', ':', 'param', 'info', ':', ':', 'return', ':']
train
https://github.com/henzk/ape/blob/a1b7ea5e5b25c42beffeaaa5c32d94ad82634819/ape/container_mode/validators/feature_order_validator.py#L72-L89
2,689
asweigart/pyautogui
pyautogui/__init__.py
dragRel
def dragRel(xOffset=0, yOffset=0, duration=0.0, tween=linear, button='left', pause=None, _pause=True, mouseDownUp=True): """Performs a mouse drag (mouse movement while a button is held down) to a point on the screen, relative to its current position. The x and y parameters detail where the mouse event happens. If None, the current mouse position is used. If a float value, it is rounded down. If outside the boundaries of the screen, the event happens at edge of the screen. Args: x (int, float, None, tuple, optional): How far left (for negative values) or right (for positive values) to move the cursor. 0 by default. If tuple, this is used for xOffset and yOffset. y (int, float, None, optional): How far up (for negative values) or down (for positive values) to move the cursor. 0 by default. duration (float, optional): The amount of time it takes to move the mouse cursor to the new xy coordinates. If 0, then the mouse cursor is moved instantaneously. 0.0 by default. tween (func, optional): The tweening function used if the duration is not 0. A linear tween is used by default. See the tweens.py file for details. button (str, int, optional): The mouse button clicked. Must be one of 'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by default. mouseDownUp (True, False): When true, the mouseUp/Down actions are not perfomed. Which allows dragging over multiple (small) actions. 'True' by default. Returns: None """ if xOffset is None: xOffset = 0 if yOffset is None: yOffset = 0 if type(xOffset) in (tuple, list): xOffset, yOffset = xOffset[0], xOffset[1] if xOffset == 0 and yOffset == 0: return # no-op case _failSafeCheck() mousex, mousey = platformModule._position() if mouseDownUp: mouseDown(button=button, _pause=False) _mouseMoveDrag('drag', mousex, mousey, xOffset, yOffset, duration, tween, button) if mouseDownUp: mouseUp(button=button, _pause=False) _autoPause(pause, _pause)
python
def dragRel(xOffset=0, yOffset=0, duration=0.0, tween=linear, button='left', pause=None, _pause=True, mouseDownUp=True): """Performs a mouse drag (mouse movement while a button is held down) to a point on the screen, relative to its current position. The x and y parameters detail where the mouse event happens. If None, the current mouse position is used. If a float value, it is rounded down. If outside the boundaries of the screen, the event happens at edge of the screen. Args: x (int, float, None, tuple, optional): How far left (for negative values) or right (for positive values) to move the cursor. 0 by default. If tuple, this is used for xOffset and yOffset. y (int, float, None, optional): How far up (for negative values) or down (for positive values) to move the cursor. 0 by default. duration (float, optional): The amount of time it takes to move the mouse cursor to the new xy coordinates. If 0, then the mouse cursor is moved instantaneously. 0.0 by default. tween (func, optional): The tweening function used if the duration is not 0. A linear tween is used by default. See the tweens.py file for details. button (str, int, optional): The mouse button clicked. Must be one of 'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by default. mouseDownUp (True, False): When true, the mouseUp/Down actions are not perfomed. Which allows dragging over multiple (small) actions. 'True' by default. Returns: None """ if xOffset is None: xOffset = 0 if yOffset is None: yOffset = 0 if type(xOffset) in (tuple, list): xOffset, yOffset = xOffset[0], xOffset[1] if xOffset == 0 and yOffset == 0: return # no-op case _failSafeCheck() mousex, mousey = platformModule._position() if mouseDownUp: mouseDown(button=button, _pause=False) _mouseMoveDrag('drag', mousex, mousey, xOffset, yOffset, duration, tween, button) if mouseDownUp: mouseUp(button=button, _pause=False) _autoPause(pause, _pause)
['def', 'dragRel', '(', 'xOffset', '=', '0', ',', 'yOffset', '=', '0', ',', 'duration', '=', '0.0', ',', 'tween', '=', 'linear', ',', 'button', '=', "'left'", ',', 'pause', '=', 'None', ',', '_pause', '=', 'True', ',', 'mouseDownUp', '=', 'True', ')', ':', 'if', 'xOffset', 'is', 'None', ':', 'xOffset', '=', '0', 'if', 'yOffset', 'is', 'None', ':', 'yOffset', '=', '0', 'if', 'type', '(', 'xOffset', ')', 'in', '(', 'tuple', ',', 'list', ')', ':', 'xOffset', ',', 'yOffset', '=', 'xOffset', '[', '0', ']', ',', 'xOffset', '[', '1', ']', 'if', 'xOffset', '==', '0', 'and', 'yOffset', '==', '0', ':', 'return', '# no-op case', '_failSafeCheck', '(', ')', 'mousex', ',', 'mousey', '=', 'platformModule', '.', '_position', '(', ')', 'if', 'mouseDownUp', ':', 'mouseDown', '(', 'button', '=', 'button', ',', '_pause', '=', 'False', ')', '_mouseMoveDrag', '(', "'drag'", ',', 'mousex', ',', 'mousey', ',', 'xOffset', ',', 'yOffset', ',', 'duration', ',', 'tween', ',', 'button', ')', 'if', 'mouseDownUp', ':', 'mouseUp', '(', 'button', '=', 'button', ',', '_pause', '=', 'False', ')', '_autoPause', '(', 'pause', ',', '_pause', ')']
Performs a mouse drag (mouse movement while a button is held down) to a point on the screen, relative to its current position. The x and y parameters detail where the mouse event happens. If None, the current mouse position is used. If a float value, it is rounded down. If outside the boundaries of the screen, the event happens at edge of the screen. Args: x (int, float, None, tuple, optional): How far left (for negative values) or right (for positive values) to move the cursor. 0 by default. If tuple, this is used for xOffset and yOffset. y (int, float, None, optional): How far up (for negative values) or down (for positive values) to move the cursor. 0 by default. duration (float, optional): The amount of time it takes to move the mouse cursor to the new xy coordinates. If 0, then the mouse cursor is moved instantaneously. 0.0 by default. tween (func, optional): The tweening function used if the duration is not 0. A linear tween is used by default. See the tweens.py file for details. button (str, int, optional): The mouse button clicked. Must be one of 'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by default. mouseDownUp (True, False): When true, the mouseUp/Down actions are not perfomed. Which allows dragging over multiple (small) actions. 'True' by default. Returns: None
['Performs', 'a', 'mouse', 'drag', '(', 'mouse', 'movement', 'while', 'a', 'button', 'is', 'held', 'down', ')', 'to', 'a', 'point', 'on', 'the', 'screen', 'relative', 'to', 'its', 'current', 'position', '.']
train
https://github.com/asweigart/pyautogui/blob/77524bd47334a89024013fd48e05151c3ac9289a/pyautogui/__init__.py#L778-L827
2,690
splunk/splunk-sdk-python
examples/analytics/bottle.py
Bottle.hook
def hook(self, name): """ Return a decorator that attaches a callback to a hook. """ def wrapper(func): self.hooks.add(name, func) return func return wrapper
python
def hook(self, name): """ Return a decorator that attaches a callback to a hook. """ def wrapper(func): self.hooks.add(name, func) return func return wrapper
['def', 'hook', '(', 'self', ',', 'name', ')', ':', 'def', 'wrapper', '(', 'func', ')', ':', 'self', '.', 'hooks', '.', 'add', '(', 'name', ',', 'func', ')', 'return', 'func', 'return', 'wrapper']
Return a decorator that attaches a callback to a hook.
['Return', 'a', 'decorator', 'that', 'attaches', 'a', 'callback', 'to', 'a', 'hook', '.']
train
https://github.com/splunk/splunk-sdk-python/blob/a245a4eeb93b3621730418008e31715912bcdcd8/examples/analytics/bottle.py#L624-L629
2,691
RiotGames/cloud-inquisitor
backend/cloud_inquisitor/plugins/views/config.py
NamespaceGet.put
def put(self, namespacePrefix): """Update a specific configuration namespace""" self.reqparse.add_argument('name', type=str, required=True) self.reqparse.add_argument('sortOrder', type=int, required=True) args = self.reqparse.parse_args() ns = db.ConfigNamespace.find_one(ConfigNamespace.namespace_prefix == namespacePrefix) if not ns: return self.make_response('No such namespace: {}'.format(namespacePrefix), HTTP.NOT_FOUND) ns.name = args['name'] ns.sort_order = args['sortOrder'] db.session.add(ns) db.session.commit() self.dbconfig.reload_data() auditlog(event='configNamespace.update', actor=session['user'].username, data=args) return self.make_response('Namespace updated')
python
def put(self, namespacePrefix): """Update a specific configuration namespace""" self.reqparse.add_argument('name', type=str, required=True) self.reqparse.add_argument('sortOrder', type=int, required=True) args = self.reqparse.parse_args() ns = db.ConfigNamespace.find_one(ConfigNamespace.namespace_prefix == namespacePrefix) if not ns: return self.make_response('No such namespace: {}'.format(namespacePrefix), HTTP.NOT_FOUND) ns.name = args['name'] ns.sort_order = args['sortOrder'] db.session.add(ns) db.session.commit() self.dbconfig.reload_data() auditlog(event='configNamespace.update', actor=session['user'].username, data=args) return self.make_response('Namespace updated')
['def', 'put', '(', 'self', ',', 'namespacePrefix', ')', ':', 'self', '.', 'reqparse', '.', 'add_argument', '(', "'name'", ',', 'type', '=', 'str', ',', 'required', '=', 'True', ')', 'self', '.', 'reqparse', '.', 'add_argument', '(', "'sortOrder'", ',', 'type', '=', 'int', ',', 'required', '=', 'True', ')', 'args', '=', 'self', '.', 'reqparse', '.', 'parse_args', '(', ')', 'ns', '=', 'db', '.', 'ConfigNamespace', '.', 'find_one', '(', 'ConfigNamespace', '.', 'namespace_prefix', '==', 'namespacePrefix', ')', 'if', 'not', 'ns', ':', 'return', 'self', '.', 'make_response', '(', "'No such namespace: {}'", '.', 'format', '(', 'namespacePrefix', ')', ',', 'HTTP', '.', 'NOT_FOUND', ')', 'ns', '.', 'name', '=', 'args', '[', "'name'", ']', 'ns', '.', 'sort_order', '=', 'args', '[', "'sortOrder'", ']', 'db', '.', 'session', '.', 'add', '(', 'ns', ')', 'db', '.', 'session', '.', 'commit', '(', ')', 'self', '.', 'dbconfig', '.', 'reload_data', '(', ')', 'auditlog', '(', 'event', '=', "'configNamespace.update'", ',', 'actor', '=', 'session', '[', "'user'", ']', '.', 'username', ',', 'data', '=', 'args', ')', 'return', 'self', '.', 'make_response', '(', "'Namespace updated'", ')']
Update a specific configuration namespace
['Update', 'a', 'specific', 'configuration', 'namespace']
train
https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/backend/cloud_inquisitor/plugins/views/config.py#L184-L202
2,692
agoragames/kairos
kairos/timeseries.py
GregorianTime.normalize
def normalize(self, timestamp, steps=0): ''' Normalize a timestamp according to the interval configuration. Optionally can be used to calculate the timestamp N steps away. ''' # So far, the only commonality with RelativeTime return self.from_bucket( self.to_bucket(timestamp, steps) )
python
def normalize(self, timestamp, steps=0): ''' Normalize a timestamp according to the interval configuration. Optionally can be used to calculate the timestamp N steps away. ''' # So far, the only commonality with RelativeTime return self.from_bucket( self.to_bucket(timestamp, steps) )
['def', 'normalize', '(', 'self', ',', 'timestamp', ',', 'steps', '=', '0', ')', ':', '# So far, the only commonality with RelativeTime', 'return', 'self', '.', 'from_bucket', '(', 'self', '.', 'to_bucket', '(', 'timestamp', ',', 'steps', ')', ')']
Normalize a timestamp according to the interval configuration. Optionally can be used to calculate the timestamp N steps away.
['Normalize', 'a', 'timestamp', 'according', 'to', 'the', 'interval', 'configuration', '.', 'Optionally', 'can', 'be', 'used', 'to', 'calculate', 'the', 'timestamp', 'N', 'steps', 'away', '.']
train
https://github.com/agoragames/kairos/blob/0b062d543b0f4a46df460fa0eb6ec281232ab179/kairos/timeseries.py#L229-L235
2,693
hyperledger/indy-plenum
ledger/error.py
returns_true_or_raises
def returns_true_or_raises(f): """A safety net. Decorator for functions that are only allowed to return True or raise an exception. Args: f: A function whose only expected return value is True. Returns: A wrapped functions whose guaranteed only return value is True. """ @functools.wraps(f) def wrapped(*args, **kwargs): ret = f(*args, **kwargs) if ret is not True: raise RuntimeError("Unexpected return value %r" % ret) return True return wrapped
python
def returns_true_or_raises(f): """A safety net. Decorator for functions that are only allowed to return True or raise an exception. Args: f: A function whose only expected return value is True. Returns: A wrapped functions whose guaranteed only return value is True. """ @functools.wraps(f) def wrapped(*args, **kwargs): ret = f(*args, **kwargs) if ret is not True: raise RuntimeError("Unexpected return value %r" % ret) return True return wrapped
['def', 'returns_true_or_raises', '(', 'f', ')', ':', '@', 'functools', '.', 'wraps', '(', 'f', ')', 'def', 'wrapped', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'ret', '=', 'f', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'if', 'ret', 'is', 'not', 'True', ':', 'raise', 'RuntimeError', '(', '"Unexpected return value %r"', '%', 'ret', ')', 'return', 'True', 'return', 'wrapped']
A safety net. Decorator for functions that are only allowed to return True or raise an exception. Args: f: A function whose only expected return value is True. Returns: A wrapped functions whose guaranteed only return value is True.
['A', 'safety', 'net', '.']
train
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/ledger/error.py#L126-L144
2,694
ebroecker/canmatrix
src/canmatrix/log.py
set_log_level
def set_log_level(logger, level): # type: (logging.Logger, int) -> None """Dynamic reconfiguration of the log level""" if level > 2: level = 2 if level < -1: level = -1 levels = { -1: logging.ERROR, 0: logging.WARN, 1: logging.INFO, 2: logging.DEBUG } logger.setLevel(levels[level])
python
def set_log_level(logger, level): # type: (logging.Logger, int) -> None """Dynamic reconfiguration of the log level""" if level > 2: level = 2 if level < -1: level = -1 levels = { -1: logging.ERROR, 0: logging.WARN, 1: logging.INFO, 2: logging.DEBUG } logger.setLevel(levels[level])
['def', 'set_log_level', '(', 'logger', ',', 'level', ')', ':', '# type: (logging.Logger, int) -> None', 'if', 'level', '>', '2', ':', 'level', '=', '2', 'if', 'level', '<', '-', '1', ':', 'level', '=', '-', '1', 'levels', '=', '{', '-', '1', ':', 'logging', '.', 'ERROR', ',', '0', ':', 'logging', '.', 'WARN', ',', '1', ':', 'logging', '.', 'INFO', ',', '2', ':', 'logging', '.', 'DEBUG', '}', 'logger', '.', 'setLevel', '(', 'levels', '[', 'level', ']', ')']
Dynamic reconfiguration of the log level
['Dynamic', 'reconfiguration', 'of', 'the', 'log', 'level']
train
https://github.com/ebroecker/canmatrix/blob/d6150b7a648350f051a11c431e9628308c8d5593/src/canmatrix/log.py#L48-L61
2,695
googleads/googleads-python-lib
examples/adwords/v201809/advanced_operations/add_dynamic_search_ads_campaign.py
_CreateBudget
def _CreateBudget(client): """Creates the budget. Args: client: an AdWordsClient instance. Returns: a suds.sudsobject.Object representation of the created budget. """ budget_service = client.GetService('BudgetService', version='v201809') # Create the campaign budget operation = { 'operand': { 'name': 'Interplanetary Cruise Budget #%d' % uuid.uuid4(), 'deliveryMethod': 'STANDARD', 'amount': { 'microAmount': 500000 } }, 'operator': 'ADD' } budget = budget_service.mutate([operation])['value'][0] print 'Budget with ID "%d" and name "%s" was created.' % ( budget['budgetId'], budget['name']) return budget
python
def _CreateBudget(client): """Creates the budget. Args: client: an AdWordsClient instance. Returns: a suds.sudsobject.Object representation of the created budget. """ budget_service = client.GetService('BudgetService', version='v201809') # Create the campaign budget operation = { 'operand': { 'name': 'Interplanetary Cruise Budget #%d' % uuid.uuid4(), 'deliveryMethod': 'STANDARD', 'amount': { 'microAmount': 500000 } }, 'operator': 'ADD' } budget = budget_service.mutate([operation])['value'][0] print 'Budget with ID "%d" and name "%s" was created.' % ( budget['budgetId'], budget['name']) return budget
['def', '_CreateBudget', '(', 'client', ')', ':', 'budget_service', '=', 'client', '.', 'GetService', '(', "'BudgetService'", ',', 'version', '=', "'v201809'", ')', '# Create the campaign budget', 'operation', '=', '{', "'operand'", ':', '{', "'name'", ':', "'Interplanetary Cruise Budget #%d'", '%', 'uuid', '.', 'uuid4', '(', ')', ',', "'deliveryMethod'", ':', "'STANDARD'", ',', "'amount'", ':', '{', "'microAmount'", ':', '500000', '}', '}', ',', "'operator'", ':', "'ADD'", '}', 'budget', '=', 'budget_service', '.', 'mutate', '(', '[', 'operation', ']', ')', '[', "'value'", ']', '[', '0', ']', 'print', '\'Budget with ID "%d" and name "%s" was created.\'', '%', '(', 'budget', '[', "'budgetId'", ']', ',', 'budget', '[', "'name'", ']', ')', 'return', 'budget']
Creates the budget. Args: client: an AdWordsClient instance. Returns: a suds.sudsobject.Object representation of the created budget.
['Creates', 'the', 'budget', '.']
train
https://github.com/googleads/googleads-python-lib/blob/aa3b1b474b0f9789ca55ca46f4b2b57aeae38874/examples/adwords/v201809/advanced_operations/add_dynamic_search_ads_campaign.py#L43-L71
2,696
shtalinberg/django-el-pagination
el_pagination/templatetags/el_pagination_tags.py
show_current_number
def show_current_number(parser, token): """Show the current page number, or insert it in the context. This tag can for example be useful to change the page title according to the current page number. To just show current page number: .. code-block:: html+django {% show_current_number %} If you use multiple paginations in the same page, you can get the page number for a specific pagination using the querystring key, e.g.: .. code-block:: html+django {% show_current_number using mykey %} The default page when no querystring is specified is 1. If you changed it in the `paginate`_ template tag, you have to call ``show_current_number`` according to your choice, e.g.: .. code-block:: html+django {% show_current_number starting from page 3 %} This can be also achieved using a template variable you passed to the context, e.g.: .. code-block:: html+django {% show_current_number starting from page page_number %} You can of course mix it all (the order of arguments is important): .. code-block:: html+django {% show_current_number starting from page 3 using mykey %} If you want to insert the current page number in the context, without actually displaying it in the template, use the *as* argument, i.e.: .. code-block:: html+django {% show_current_number as page_number %} {% show_current_number starting from page 3 using mykey as page_number %} """ # Validate args. try: tag_name, args = token.contents.split(None, 1) except ValueError: key = None number = None tag_name = token.contents[0] var_name = None else: # Use a regexp to catch args. match = SHOW_CURRENT_NUMBER_EXPRESSION.match(args) if match is None: msg = 'Invalid arguments for %r tag' % tag_name raise template.TemplateSyntaxError(msg) # Retrieve objects. groupdict = match.groupdict() key = groupdict['key'] number = groupdict['number'] var_name = groupdict['var_name'] # Call the node. return ShowCurrentNumberNode(number, key, var_name)
python
def show_current_number(parser, token): """Show the current page number, or insert it in the context. This tag can for example be useful to change the page title according to the current page number. To just show current page number: .. code-block:: html+django {% show_current_number %} If you use multiple paginations in the same page, you can get the page number for a specific pagination using the querystring key, e.g.: .. code-block:: html+django {% show_current_number using mykey %} The default page when no querystring is specified is 1. If you changed it in the `paginate`_ template tag, you have to call ``show_current_number`` according to your choice, e.g.: .. code-block:: html+django {% show_current_number starting from page 3 %} This can be also achieved using a template variable you passed to the context, e.g.: .. code-block:: html+django {% show_current_number starting from page page_number %} You can of course mix it all (the order of arguments is important): .. code-block:: html+django {% show_current_number starting from page 3 using mykey %} If you want to insert the current page number in the context, without actually displaying it in the template, use the *as* argument, i.e.: .. code-block:: html+django {% show_current_number as page_number %} {% show_current_number starting from page 3 using mykey as page_number %} """ # Validate args. try: tag_name, args = token.contents.split(None, 1) except ValueError: key = None number = None tag_name = token.contents[0] var_name = None else: # Use a regexp to catch args. match = SHOW_CURRENT_NUMBER_EXPRESSION.match(args) if match is None: msg = 'Invalid arguments for %r tag' % tag_name raise template.TemplateSyntaxError(msg) # Retrieve objects. groupdict = match.groupdict() key = groupdict['key'] number = groupdict['number'] var_name = groupdict['var_name'] # Call the node. return ShowCurrentNumberNode(number, key, var_name)
['def', 'show_current_number', '(', 'parser', ',', 'token', ')', ':', '# Validate args.', 'try', ':', 'tag_name', ',', 'args', '=', 'token', '.', 'contents', '.', 'split', '(', 'None', ',', '1', ')', 'except', 'ValueError', ':', 'key', '=', 'None', 'number', '=', 'None', 'tag_name', '=', 'token', '.', 'contents', '[', '0', ']', 'var_name', '=', 'None', 'else', ':', '# Use a regexp to catch args.', 'match', '=', 'SHOW_CURRENT_NUMBER_EXPRESSION', '.', 'match', '(', 'args', ')', 'if', 'match', 'is', 'None', ':', 'msg', '=', "'Invalid arguments for %r tag'", '%', 'tag_name', 'raise', 'template', '.', 'TemplateSyntaxError', '(', 'msg', ')', '# Retrieve objects.', 'groupdict', '=', 'match', '.', 'groupdict', '(', ')', 'key', '=', 'groupdict', '[', "'key'", ']', 'number', '=', 'groupdict', '[', "'number'", ']', 'var_name', '=', 'groupdict', '[', "'var_name'", ']', '# Call the node.', 'return', 'ShowCurrentNumberNode', '(', 'number', ',', 'key', ',', 'var_name', ')']
Show the current page number, or insert it in the context. This tag can for example be useful to change the page title according to the current page number. To just show current page number: .. code-block:: html+django {% show_current_number %} If you use multiple paginations in the same page, you can get the page number for a specific pagination using the querystring key, e.g.: .. code-block:: html+django {% show_current_number using mykey %} The default page when no querystring is specified is 1. If you changed it in the `paginate`_ template tag, you have to call ``show_current_number`` according to your choice, e.g.: .. code-block:: html+django {% show_current_number starting from page 3 %} This can be also achieved using a template variable you passed to the context, e.g.: .. code-block:: html+django {% show_current_number starting from page page_number %} You can of course mix it all (the order of arguments is important): .. code-block:: html+django {% show_current_number starting from page 3 using mykey %} If you want to insert the current page number in the context, without actually displaying it in the template, use the *as* argument, i.e.: .. code-block:: html+django {% show_current_number as page_number %} {% show_current_number starting from page 3 using mykey as page_number %}
['Show', 'the', 'current', 'page', 'number', 'or', 'insert', 'it', 'in', 'the', 'context', '.']
train
https://github.com/shtalinberg/django-el-pagination/blob/889ba62b46cb58292d554753a0bfda0b0a6d57da/el_pagination/templatetags/el_pagination_tags.py#L579-L649
2,697
glomex/gcdt
gcdt/kumo_core.py
call_pre_hook
def call_pre_hook(awsclient, cloudformation): """Invoke the pre_hook BEFORE the config is read. :param awsclient: :param cloudformation: """ # TODO: this is deprecated!! move this to glomex_config_reader # no config available if not hasattr(cloudformation, 'pre_hook'): # hook is not present return hook_func = getattr(cloudformation, 'pre_hook') if not hook_func.func_code.co_argcount: hook_func() # for compatibility with existing templates else: log.error('pre_hock can not have any arguments. The pre_hook it is ' + 'executed BEFORE config is read')
python
def call_pre_hook(awsclient, cloudformation): """Invoke the pre_hook BEFORE the config is read. :param awsclient: :param cloudformation: """ # TODO: this is deprecated!! move this to glomex_config_reader # no config available if not hasattr(cloudformation, 'pre_hook'): # hook is not present return hook_func = getattr(cloudformation, 'pre_hook') if not hook_func.func_code.co_argcount: hook_func() # for compatibility with existing templates else: log.error('pre_hock can not have any arguments. The pre_hook it is ' + 'executed BEFORE config is read')
['def', 'call_pre_hook', '(', 'awsclient', ',', 'cloudformation', ')', ':', '# TODO: this is deprecated!! move this to glomex_config_reader', '# no config available', 'if', 'not', 'hasattr', '(', 'cloudformation', ',', "'pre_hook'", ')', ':', '# hook is not present', 'return', 'hook_func', '=', 'getattr', '(', 'cloudformation', ',', "'pre_hook'", ')', 'if', 'not', 'hook_func', '.', 'func_code', '.', 'co_argcount', ':', 'hook_func', '(', ')', '# for compatibility with existing templates', 'else', ':', 'log', '.', 'error', '(', "'pre_hock can not have any arguments. The pre_hook it is '", '+', "'executed BEFORE config is read'", ')']
Invoke the pre_hook BEFORE the config is read. :param awsclient: :param cloudformation:
['Invoke', 'the', 'pre_hook', 'BEFORE', 'the', 'config', 'is', 'read', '.']
train
https://github.com/glomex/gcdt/blob/cd67cf416371337b83cb9ca3f696277125703339/gcdt/kumo_core.py#L123-L139
2,698
inveniosoftware/invenio-userprofiles
invenio_userprofiles/models.py
UserProfile.get_by_username
def get_by_username(cls, username): """Get profile by username. :param username: A username to query for (case insensitive). """ return cls.query.filter( UserProfile._username == username.lower() ).one()
python
def get_by_username(cls, username): """Get profile by username. :param username: A username to query for (case insensitive). """ return cls.query.filter( UserProfile._username == username.lower() ).one()
['def', 'get_by_username', '(', 'cls', ',', 'username', ')', ':', 'return', 'cls', '.', 'query', '.', 'filter', '(', 'UserProfile', '.', '_username', '==', 'username', '.', 'lower', '(', ')', ')', '.', 'one', '(', ')']
Get profile by username. :param username: A username to query for (case insensitive).
['Get', 'profile', 'by', 'username', '.']
train
https://github.com/inveniosoftware/invenio-userprofiles/blob/4c682e7d67a4cab8dc38472a31fa1c34cbba03dd/invenio_userprofiles/models.py#L78-L85
2,699
SatelliteQE/nailgun
nailgun/entities.py
JobTemplate.read
def read(self, entity=None, attrs=None, ignore=None, params=None): """Ignore the template inputs when initially reading the job template. Look up each TemplateInput entity separately and afterwords add them to the JobTemplate entity.""" if attrs is None: attrs = self.read_json(params=params) if ignore is None: ignore = set() ignore.add('template_inputs') entity = super(JobTemplate, self).read(entity=entity, attrs=attrs, ignore=ignore, params=params) referenced_entities = [ TemplateInput(entity._server_config, id=entity_id, template=JobTemplate(entity._server_config, id=entity.id)) for entity_id in _get_entity_ids('template_inputs', attrs) ] setattr(entity, 'template_inputs', referenced_entities) return entity
python
def read(self, entity=None, attrs=None, ignore=None, params=None): """Ignore the template inputs when initially reading the job template. Look up each TemplateInput entity separately and afterwords add them to the JobTemplate entity.""" if attrs is None: attrs = self.read_json(params=params) if ignore is None: ignore = set() ignore.add('template_inputs') entity = super(JobTemplate, self).read(entity=entity, attrs=attrs, ignore=ignore, params=params) referenced_entities = [ TemplateInput(entity._server_config, id=entity_id, template=JobTemplate(entity._server_config, id=entity.id)) for entity_id in _get_entity_ids('template_inputs', attrs) ] setattr(entity, 'template_inputs', referenced_entities) return entity
['def', 'read', '(', 'self', ',', 'entity', '=', 'None', ',', 'attrs', '=', 'None', ',', 'ignore', '=', 'None', ',', 'params', '=', 'None', ')', ':', 'if', 'attrs', 'is', 'None', ':', 'attrs', '=', 'self', '.', 'read_json', '(', 'params', '=', 'params', ')', 'if', 'ignore', 'is', 'None', ':', 'ignore', '=', 'set', '(', ')', 'ignore', '.', 'add', '(', "'template_inputs'", ')', 'entity', '=', 'super', '(', 'JobTemplate', ',', 'self', ')', '.', 'read', '(', 'entity', '=', 'entity', ',', 'attrs', '=', 'attrs', ',', 'ignore', '=', 'ignore', ',', 'params', '=', 'params', ')', 'referenced_entities', '=', '[', 'TemplateInput', '(', 'entity', '.', '_server_config', ',', 'id', '=', 'entity_id', ',', 'template', '=', 'JobTemplate', '(', 'entity', '.', '_server_config', ',', 'id', '=', 'entity', '.', 'id', ')', ')', 'for', 'entity_id', 'in', '_get_entity_ids', '(', "'template_inputs'", ',', 'attrs', ')', ']', 'setattr', '(', 'entity', ',', "'template_inputs'", ',', 'referenced_entities', ')', 'return', 'entity']
Ignore the template inputs when initially reading the job template. Look up each TemplateInput entity separately and afterwords add them to the JobTemplate entity.
['Ignore', 'the', 'template', 'inputs', 'when', 'initially', 'reading', 'the', 'job', 'template', '.', 'Look', 'up', 'each', 'TemplateInput', 'entity', 'separately', 'and', 'afterwords', 'add', 'them', 'to', 'the', 'JobTemplate', 'entity', '.']
train
https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/entities.py#L1667-L1686