body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def __init__(self, ignore_validation: bool) -> None: 'Initialize Class properties.' super().__init__() self.ignore_validation = ignore_validation self._app_packages = [] self._install_json_schema = None self._layout_json_schema = None self.config = {} self.ij = InstallJson() self.invalid_json_files = [] self.lj = LayoutJson() self.tj = TcexJson() self.validation_data = self._validation_data
2,886,200,466,622,236,700
Initialize Class properties.
tcex/bin/validate.py
__init__
benjaminPurdy/tcex
python
def __init__(self, ignore_validation: bool) -> None: super().__init__() self.ignore_validation = ignore_validation self._app_packages = [] self._install_json_schema = None self._layout_json_schema = None self.config = {} self.ij = InstallJson() self.invalid_json_files = [] self.lj = LayoutJson() self.tj = TcexJson() self.validation_data = self._validation_data
@property def _validation_data(self) -> Dict[(str, list)]: 'Return structure for validation data.' return {'errors': [], 'fileSyntax': [], 'layouts': [], 'moduleImports': [], 'schema': [], 'feeds': []}
5,958,275,173,923,295,000
Return structure for validation data.
tcex/bin/validate.py
_validation_data
benjaminPurdy/tcex
python
@property def _validation_data(self) -> Dict[(str, list)]: return {'errors': [], 'fileSyntax': [], 'layouts': [], 'moduleImports': [], 'schema': [], 'feeds': []}
def _check_node_import(self, node: Union[(ast.Import, ast.ImportFrom)], filename: str) -> None: '.' if isinstance(node, ast.Import): for n in node.names: m = n.name.split('.')[0] if (not self.check_import_stdlib(m)): m_status = self.check_imported(m) if (not m_status): self.validation_data['errors'].append(f'Module validation failed for {filename} module "{m}" could not be imported).') self.validation_data['moduleImports'].append({'filename': filename, 'module': m, 'status': m_status}) elif isinstance(node, ast.ImportFrom): m = node.module.split('.')[0] if (not self.check_import_stdlib(m)): m_status = self.check_imported(m) if (not m_status): self.validation_data['errors'].append(f'Module validation failed for {filename} module "{m}" could not be imported).') self.validation_data['moduleImports'].append({'filename': filename, 'module': m, 'status': m_status})
-3,932,265,064,709,798,000
.
tcex/bin/validate.py
_check_node_import
benjaminPurdy/tcex
python
def _check_node_import(self, node: Union[(astImport, astImportFrom)], filename: str) -> None: if isinstance(node, astImport): for n in nodenames: m = nnamesplit()[0] if (not selfcheck_import_stdlib(m)): m_status = selfcheck_imported(m) if (not m_status): selfvalidation_data['errors']append(f'Module validation failed for {filename} module "{m}" could not be imported)') selfvalidation_data['moduleImports']append({'filename': filename, 'module': m, 'status': m_status}) elif isinstance(node, astImportFrom): m = nodemodulesplit()[0] if (not selfcheck_import_stdlib(m)): m_status = selfcheck_imported(m) if (not m_status): selfvalidation_data['errors']append(f'Module validation failed for {filename} module "{m}" could not be imported)') selfvalidation_data['moduleImports']append({'filename': filename, 'module': m, 'status': m_status})
def check_imports(self) -> None: 'Check the projects top level directory for missing imports.\n\n This method will check only files ending in **.py** and does not handle imports validation\n for sub-directories.\n ' for filename in sorted(os.listdir(self.app_path)): if (not filename.endswith('.py')): continue fq_path = os.path.join(self.app_path, filename) with open(fq_path, 'rb') as f: code_lines = deque([(f.read(), 1)]) while code_lines: (code, _) = code_lines.popleft() try: parsed_code = ast.parse(code) for node in ast.walk(parsed_code): self._check_node_import(node, filename) except SyntaxError: pass
8,037,250,862,082,015,000
Check the projects top level directory for missing imports. This method will check only files ending in **.py** and does not handle imports validation for sub-directories.
tcex/bin/validate.py
check_imports
benjaminPurdy/tcex
python
def check_imports(self) -> None: 'Check the projects top level directory for missing imports.\n\n This method will check only files ending in **.py** and does not handle imports validation\n for sub-directories.\n ' for filename in sorted(os.listdir(self.app_path)): if (not filename.endswith('.py')): continue fq_path = os.path.join(self.app_path, filename) with open(fq_path, 'rb') as f: code_lines = deque([(f.read(), 1)]) while code_lines: (code, _) = code_lines.popleft() try: parsed_code = ast.parse(code) for node in ast.walk(parsed_code): self._check_node_import(node, filename) except SyntaxError: pass
@staticmethod def check_import_stdlib(module: str) -> bool: 'Check if module is in Python stdlib.\n\n Args:\n module: The name of the module to check.\n\n Returns:\n bool: Returns True if the module is in the stdlib or template.\n ' if ((module in stdlib_list('3.6')) or (module in stdlib_list('3.7')) or (module in stdlib_list('3.8')) or (module in ['app', 'args', 'base_app_input', 'job_app', 'playbook_app', 'run', 'service_app'])): return True return False
574,623,895,274,065,000
Check if module is in Python stdlib. Args: module: The name of the module to check. Returns: bool: Returns True if the module is in the stdlib or template.
tcex/bin/validate.py
check_import_stdlib
benjaminPurdy/tcex
python
@staticmethod def check_import_stdlib(module: str) -> bool: 'Check if module is in Python stdlib.\n\n Args:\n module: The name of the module to check.\n\n Returns:\n bool: Returns True if the module is in the stdlib or template.\n ' if ((module in stdlib_list('3.6')) or (module in stdlib_list('3.7')) or (module in stdlib_list('3.8')) or (module in ['app', 'args', 'base_app_input', 'job_app', 'playbook_app', 'run', 'service_app'])): return True return False
@staticmethod def check_imported(module: str) -> bool: 'Check whether the provide module can be imported (package installed).\n\n Args:\n module: The name of the module to check availability.\n\n Returns:\n bool: True if the module can be imported, False otherwise.\n ' try: del sys.modules[module] except (AttributeError, KeyError): pass find_spec = importlib.util.find_spec(module) found = (find_spec is not None) if (found is True): try: if ('dist-packages' in find_spec.origin): found = False except TypeError: pass try: if ('site-packages' in find_spec.origin): found = False except TypeError: pass return found
556,081,726,153,656,300
Check whether the provide module can be imported (package installed). Args: module: The name of the module to check availability. Returns: bool: True if the module can be imported, False otherwise.
tcex/bin/validate.py
check_imported
benjaminPurdy/tcex
python
@staticmethod def check_imported(module: str) -> bool: 'Check whether the provide module can be imported (package installed).\n\n Args:\n module: The name of the module to check availability.\n\n Returns:\n bool: True if the module can be imported, False otherwise.\n ' try: del sys.modules[module] except (AttributeError, KeyError): pass find_spec = importlib.util.find_spec(module) found = (find_spec is not None) if (found is True): try: if ('dist-packages' in find_spec.origin): found = False except TypeError: pass try: if ('site-packages' in find_spec.origin): found = False except TypeError: pass return found
def check_install_json(self) -> None: 'Check all install.json files for valid schema.' if ('install.json' in self.invalid_json_files): return status = True try: self.ij.model except ValidationError as ex: self.invalid_json_files.append(self.ij.fqfn.name) status = False for error in json.loads(ex.json()): location = [str(location) for location in error.get('loc')] self.validation_data['errors'].append(f"Schema validation failed for install.json. {error.get('msg')}: {' -> '.join(location)}") except ValueError: return self.validation_data['schema'].append({'filename': self.ij.fqfn.name, 'status': status})
5,865,411,555,246,264,000
Check all install.json files for valid schema.
tcex/bin/validate.py
check_install_json
benjaminPurdy/tcex
python
def check_install_json(self) -> None: if ('install.json' in self.invalid_json_files): return status = True try: self.ij.model except ValidationError as ex: self.invalid_json_files.append(self.ij.fqfn.name) status = False for error in json.loads(ex.json()): location = [str(location) for location in error.get('loc')] self.validation_data['errors'].append(f"Schema validation failed for install.json. {error.get('msg')}: {' -> '.join(location)}") except ValueError: return self.validation_data['schema'].append({'filename': self.ij.fqfn.name, 'status': status})
def check_job_json(self) -> None: 'Validate feed files for feed job apps.' if ('install.json' in self.invalid_json_files): return app_version = (self.tj.model.package.app_version or self.ij.model.package_version) program_name = f'{self.tj.model.package.app_name}_{app_version}'.replace('_', ' ') status = True for feed in self.ij.model.feeds: if (feed.job_file in self.invalid_json_files): continue jj = JobJson(filename=feed.job_file) if (not jj.fqfn.is_file()): self.validation_data['errors'].append(f'Schema validation failed for {feed.job_file}. The job.json file could not be found.') continue try: jj.model except ValidationError as ex: status = False for error in json.loads(ex.json()): location = [str(location) for location in error.get('loc')] self.validation_data['errors'].append(f"Schema validation failed for {feed.job_file}. {error.get('msg')}: {' -> '.join(location)}") if ((status is True) and (jj.model.program_name != program_name)): status = False self.validation_data['errors'].append(f'Schema validation failed for {feed.job_file}. The job.json programName {jj.model.program_name} != {program_name}.') if ((status is True) and (jj.model.program_version != self.ij.model.program_version)): status = False self.validation_data['errors'].append(f'Schema validation failed for {feed.job_file}. The job.json programVersion {jj.model.program_version} != {self.ij.model.program_version}.') self.validation_data['schema'].append({'filename': feed.job_file, 'status': status})
5,187,031,783,895,453,000
Validate feed files for feed job apps.
tcex/bin/validate.py
check_job_json
benjaminPurdy/tcex
python
def check_job_json(self) -> None: if ('install.json' in self.invalid_json_files): return app_version = (self.tj.model.package.app_version or self.ij.model.package_version) program_name = f'{self.tj.model.package.app_name}_{app_version}'.replace('_', ' ') status = True for feed in self.ij.model.feeds: if (feed.job_file in self.invalid_json_files): continue jj = JobJson(filename=feed.job_file) if (not jj.fqfn.is_file()): self.validation_data['errors'].append(f'Schema validation failed for {feed.job_file}. The job.json file could not be found.') continue try: jj.model except ValidationError as ex: status = False for error in json.loads(ex.json()): location = [str(location) for location in error.get('loc')] self.validation_data['errors'].append(f"Schema validation failed for {feed.job_file}. {error.get('msg')}: {' -> '.join(location)}") if ((status is True) and (jj.model.program_name != program_name)): status = False self.validation_data['errors'].append(f'Schema validation failed for {feed.job_file}. The job.json programName {jj.model.program_name} != {program_name}.') if ((status is True) and (jj.model.program_version != self.ij.model.program_version)): status = False self.validation_data['errors'].append(f'Schema validation failed for {feed.job_file}. The job.json programVersion {jj.model.program_version} != {self.ij.model.program_version}.') self.validation_data['schema'].append({'filename': feed.job_file, 'status': status})
def check_layout_json(self) -> None: 'Check all layout.json files for valid schema.' if ((not self.lj.has_layout) or ('layout.json' in self.invalid_json_files)): return status = True try: self.lj.model except ValidationError as ex: self.invalid_json_files.append(self.ij.fqfn.name) status = False for error in json.loads(ex.json()): location = [str(location) for location in error.get('loc')] self.validation_data['errors'].append(f"Schema validation failed for layout.json. {error.get('msg')}: {' -> '.join(location)}") except ValueError: return self.validation_data['schema'].append({'filename': self.lj.fqfn.name, 'status': status}) if (status is True): self.check_layout_params()
-9,029,930,225,845,429,000
Check all layout.json files for valid schema.
tcex/bin/validate.py
check_layout_json
benjaminPurdy/tcex
python
def check_layout_json(self) -> None: if ((not self.lj.has_layout) or ('layout.json' in self.invalid_json_files)): return status = True try: self.lj.model except ValidationError as ex: self.invalid_json_files.append(self.ij.fqfn.name) status = False for error in json.loads(ex.json()): location = [str(location) for location in error.get('loc')] self.validation_data['errors'].append(f"Schema validation failed for layout.json. {error.get('msg')}: {' -> '.join(location)}") except ValueError: return self.validation_data['schema'].append({'filename': self.lj.fqfn.name, 'status': status}) if (status is True): self.check_layout_params()
def check_layout_params(self) -> None: "Check that the layout.json is consistent with install.json.\n\n The layout.json files references the params.name from the install.json file. The method\n will validate that no reference appear for inputs in install.json that don't exist.\n " ij_input_names = list(self.ij.model.filter_params(service_config=False, hidden=False)) ij_output_names = [o.name for o in self.ij.model.playbook.output_variables] for name in self.ij.validate.validate_duplicate_input(): self.validation_data['errors'].append(f'Duplicate input name found in install.json ({name})') status = False for sequence in self.ij.validate.validate_duplicate_sequence(): self.validation_data['errors'].append(f'Duplicate sequence number found in install.json ({sequence})') status = False for output in self.ij.validate.validate_duplicate_output(): self.validation_data['errors'].append(f'Duplicate output variable name found in install.json ({output})') status = False if ('sqlite3' in sys.modules): self.permutations.db_create_table(self.permutations._input_table, ij_input_names) status = True for i in self.lj.model.inputs: for p in i.parameters: if (p.name not in ij_input_names): self.validation_data['errors'].append(f"""Layouts input.parameters[].name validations failed ("{p.get('name')}" is defined in layout.json, but hidden or not found in install.json).""") status = False else: ij_input_names.remove(p.name) if ('sqlite3' in sys.modules): if p.display: display_query = f'SELECT * FROM {self.permutations._input_table} WHERE {p.display}' try: self.permutations.db_conn.execute(display_query.replace('"', '')) except sqlite3.Error: self.validation_data['errors'].append(f'Layouts input.parameters[].display validations failed ("{p.display}" query is an invalid statement).') status = False self.validation_data['layouts'].append({'params': 'inputs', 'status': status}) if ij_input_names: input_names = ','.join(ij_input_names) self.validation_data['errors'].append(f'Layouts input.parameters[].name validations failed ("{input_names}" values from install.json were not included in layout.json.') status = False status = True for o in self.lj.model.outputs: if (o.name not in ij_output_names): self.validation_data['errors'].append(f'Layouts output validations failed ({o.name} is defined in layout.json, but not found in install.json).') status = False if ('sqlite3' in sys.modules): if o.display: display_query = f'SELECT * FROM {self.permutations._input_table} WHERE {o.display}' try: self.permutations.db_conn.execute(display_query.replace('"', '')) except sqlite3.Error: self.validation_data['errors'].append(f'Layouts outputs.display validations failed ("{o.display}" query is an invalid statement).') status = False self.validation_data['layouts'].append({'params': 'outputs', 'status': status})
1,388,452,454,794,460,200
Check that the layout.json is consistent with install.json. The layout.json files references the params.name from the install.json file. The method will validate that no reference appear for inputs in install.json that don't exist.
tcex/bin/validate.py
check_layout_params
benjaminPurdy/tcex
python
def check_layout_params(self) -> None: "Check that the layout.json is consistent with install.json.\n\n The layout.json files references the params.name from the install.json file. The method\n will validate that no reference appear for inputs in install.json that don't exist.\n " ij_input_names = list(self.ij.model.filter_params(service_config=False, hidden=False)) ij_output_names = [o.name for o in self.ij.model.playbook.output_variables] for name in self.ij.validate.validate_duplicate_input(): self.validation_data['errors'].append(f'Duplicate input name found in install.json ({name})') status = False for sequence in self.ij.validate.validate_duplicate_sequence(): self.validation_data['errors'].append(f'Duplicate sequence number found in install.json ({sequence})') status = False for output in self.ij.validate.validate_duplicate_output(): self.validation_data['errors'].append(f'Duplicate output variable name found in install.json ({output})') status = False if ('sqlite3' in sys.modules): self.permutations.db_create_table(self.permutations._input_table, ij_input_names) status = True for i in self.lj.model.inputs: for p in i.parameters: if (p.name not in ij_input_names): self.validation_data['errors'].append(f"Layouts input.parameters[].name validations failed ("{p.get('name')}" is defined in layout.json, but hidden or not found in install.json).") status = False else: ij_input_names.remove(p.name) if ('sqlite3' in sys.modules): if p.display: display_query = f'SELECT * FROM {self.permutations._input_table} WHERE {p.display}' try: self.permutations.db_conn.execute(display_query.replace('"', )) except sqlite3.Error: self.validation_data['errors'].append(f'Layouts input.parameters[].display validations failed ("{p.display}" query is an invalid statement).') status = False self.validation_data['layouts'].append({'params': 'inputs', 'status': status}) if ij_input_names: input_names = ','.join(ij_input_names) self.validation_data['errors'].append(f'Layouts input.parameters[].name validations failed ("{input_names}" values from install.json were not included in layout.json.') status = False status = True for o in self.lj.model.outputs: if (o.name not in ij_output_names): self.validation_data['errors'].append(f'Layouts output validations failed ({o.name} is defined in layout.json, but not found in install.json).') status = False if ('sqlite3' in sys.modules): if o.display: display_query = f'SELECT * FROM {self.permutations._input_table} WHERE {o.display}' try: self.permutations.db_conn.execute(display_query.replace('"', )) except sqlite3.Error: self.validation_data['errors'].append(f'Layouts outputs.display validations failed ("{o.display}" query is an invalid statement).') status = False self.validation_data['layouts'].append({'params': 'outputs', 'status': status})
def check_syntax(self, app_path=None) -> None: 'Run syntax on each ".py" and ".json" file.\n\n Args:\n app_path (str, optional): The path of Python files.\n ' fqpn = Path((app_path or os.getcwd())) for fqfn in sorted(fqpn.iterdir()): error = None status = True if fqfn.name.endswith('.py'): try: with fqfn.open(mode='rb') as fh: ast.parse(fh.read(), filename=fqfn.name) except SyntaxError: status = False e = [] for line in traceback.format_exc().split('\n')[(- 5):(- 2)]: e.append(line.strip()) error = ' '.join(e) elif fqfn.name.endswith('.json'): try: with fqfn.open() as fh: json.load(fh) except ValueError as e: self.invalid_json_files.append(fqfn.name) status = False error = e else: continue if error: self.validation_data['errors'].append(f'Syntax validation failed for {fqfn.name} ({error}).') self.validation_data['fileSyntax'].append({'filename': fqfn.name, 'status': status})
8,878,062,756,795,646,000
Run syntax on each ".py" and ".json" file. Args: app_path (str, optional): The path of Python files.
tcex/bin/validate.py
check_syntax
benjaminPurdy/tcex
python
def check_syntax(self, app_path=None) -> None: 'Run syntax on each ".py" and ".json" file.\n\n Args:\n app_path (str, optional): The path of Python files.\n ' fqpn = Path((app_path or os.getcwd())) for fqfn in sorted(fqpn.iterdir()): error = None status = True if fqfn.name.endswith('.py'): try: with fqfn.open(mode='rb') as fh: ast.parse(fh.read(), filename=fqfn.name) except SyntaxError: status = False e = [] for line in traceback.format_exc().split('\n')[(- 5):(- 2)]: e.append(line.strip()) error = ' '.join(e) elif fqfn.name.endswith('.json'): try: with fqfn.open() as fh: json.load(fh) except ValueError as e: self.invalid_json_files.append(fqfn.name) status = False error = e else: continue if error: self.validation_data['errors'].append(f'Syntax validation failed for {fqfn.name} ({error}).') self.validation_data['fileSyntax'].append({'filename': fqfn.name, 'status': status})
def interactive(self) -> None: '[App Builder] Run in interactive mode.' while True: line = sys.stdin.readline().strip() if (line == 'quit'): sys.exit() elif (line == 'validate'): self.check_syntax() self.check_imports() self.check_install_json() self.check_layout_json() self.check_job_json() self.print_json() self.validation_data = self._validation_data
-7,308,072,104,282,095,000
[App Builder] Run in interactive mode.
tcex/bin/validate.py
interactive
benjaminPurdy/tcex
python
def interactive(self) -> None: while True: line = sys.stdin.readline().strip() if (line == 'quit'): sys.exit() elif (line == 'validate'): self.check_syntax() self.check_imports() self.check_install_json() self.check_layout_json() self.check_job_json() self.print_json() self.validation_data = self._validation_data
def print_json(self) -> None: '[App Builder] Print JSON output.' print(json.dumps({'validation_data': self.validation_data}))
-1,345,672,885,728,641,000
[App Builder] Print JSON output.
tcex/bin/validate.py
print_json
benjaminPurdy/tcex
python
def print_json(self) -> None: print(json.dumps({'validation_data': self.validation_data}))
def _print_file_syntax_results(self) -> None: 'Print file syntax results.' if self.validation_data.get('fileSyntax'): print(f''' {c.Style.BRIGHT}{c.Fore.BLUE}Validated File Syntax:''') print(f"{c.Style.BRIGHT}{'File:'!s:<60}{'Status:'!s:<25}") for f in self.validation_data.get('fileSyntax'): status_color = self.status_color(f.get('status')) status_value = self.status_value(f.get('status')) print(f"{f.get('filename')!s:<60}{status_color}{status_value!s:<25}")
-7,130,818,483,572,398,000
Print file syntax results.
tcex/bin/validate.py
_print_file_syntax_results
benjaminPurdy/tcex
python
def _print_file_syntax_results(self) -> None: if self.validation_data.get('fileSyntax'): print(f' {c.Style.BRIGHT}{c.Fore.BLUE}Validated File Syntax:') print(f"{c.Style.BRIGHT}{'File:'!s:<60}{'Status:'!s:<25}") for f in self.validation_data.get('fileSyntax'): status_color = self.status_color(f.get('status')) status_value = self.status_value(f.get('status')) print(f"{f.get('filename')!s:<60}{status_color}{status_value!s:<25}")
def _print_imports_results(self) -> None: 'Print import results.' if self.validation_data.get('moduleImports'): print(f''' {c.Style.BRIGHT}{c.Fore.BLUE}Validated Imports:''') print(f"{c.Style.BRIGHT}{'File:'!s:<30}{'Module:'!s:<30}{'Status:'!s:<25}") for f in self.validation_data.get('moduleImports'): status_color = self.status_color(f.get('status')) status_value = self.status_value(f.get('status')) print(f"{f.get('filename')!s:<30}{c.Fore.WHITE}{f.get('module')!s:<30}{status_color}{status_value!s:<25}")
4,693,549,105,083,687,000
Print import results.
tcex/bin/validate.py
_print_imports_results
benjaminPurdy/tcex
python
def _print_imports_results(self) -> None: if self.validation_data.get('moduleImports'): print(f' {c.Style.BRIGHT}{c.Fore.BLUE}Validated Imports:') print(f"{c.Style.BRIGHT}{'File:'!s:<30}{'Module:'!s:<30}{'Status:'!s:<25}") for f in self.validation_data.get('moduleImports'): status_color = self.status_color(f.get('status')) status_value = self.status_value(f.get('status')) print(f"{f.get('filename')!s:<30}{c.Fore.WHITE}{f.get('module')!s:<30}{status_color}{status_value!s:<25}")
def _print_schema_results(self) -> None: 'Print schema results.' if self.validation_data.get('schema'): print(f''' {c.Style.BRIGHT}{c.Fore.BLUE}Validated Schema:''') print(f"{c.Style.BRIGHT}{'File:'!s:<60}{'Status:'!s:<25}") for f in self.validation_data.get('schema'): status_color = self.status_color(f.get('status')) status_value = self.status_value(f.get('status')) print(f"{f.get('filename')!s:<60}{status_color}{status_value!s:<25}")
-5,820,603,044,823,452,000
Print schema results.
tcex/bin/validate.py
_print_schema_results
benjaminPurdy/tcex
python
def _print_schema_results(self) -> None: if self.validation_data.get('schema'): print(f' {c.Style.BRIGHT}{c.Fore.BLUE}Validated Schema:') print(f"{c.Style.BRIGHT}{'File:'!s:<60}{'Status:'!s:<25}") for f in self.validation_data.get('schema'): status_color = self.status_color(f.get('status')) status_value = self.status_value(f.get('status')) print(f"{f.get('filename')!s:<60}{status_color}{status_value!s:<25}")
def _print_layouts_results(self) -> None: 'Print layout results.' if self.validation_data.get('layouts'): print(f''' {c.Style.BRIGHT}{c.Fore.BLUE}Validated Layouts:''') print(f"{c.Style.BRIGHT}{'Params:'!s:<60}{'Status:'!s:<25}") for f in self.validation_data.get('layouts'): status_color = self.status_color(f.get('status')) status_value = self.status_value(f.get('status')) print(f"{f.get('params')!s:<60}{status_color}{status_value!s:<25}")
1,522,637,771,830,174,000
Print layout results.
tcex/bin/validate.py
_print_layouts_results
benjaminPurdy/tcex
python
def _print_layouts_results(self) -> None: if self.validation_data.get('layouts'): print(f' {c.Style.BRIGHT}{c.Fore.BLUE}Validated Layouts:') print(f"{c.Style.BRIGHT}{'Params:'!s:<60}{'Status:'!s:<25}") for f in self.validation_data.get('layouts'): status_color = self.status_color(f.get('status')) status_value = self.status_value(f.get('status')) print(f"{f.get('params')!s:<60}{status_color}{status_value!s:<25}")
def _print_feed_results(self) -> None: 'Print feed results.' if self.validation_data.get('feeds'): print(f''' {c.Style.BRIGHT}{c.Fore.BLUE}Validated Feed Jobs:''') print(f"{c.Style.BRIGHT}{'Feeds:'!s:<60}{'Status:'!s:<25}") for f in self.validation_data.get('feeds'): status_color = self.status_color(f.get('status')) status_value = self.status_value(f.get('status')) print(f"{f.get('name')!s:<60}{status_color}{status_value!s:<25}")
7,289,236,692,831,736,000
Print feed results.
tcex/bin/validate.py
_print_feed_results
benjaminPurdy/tcex
python
def _print_feed_results(self) -> None: if self.validation_data.get('feeds'): print(f' {c.Style.BRIGHT}{c.Fore.BLUE}Validated Feed Jobs:') print(f"{c.Style.BRIGHT}{'Feeds:'!s:<60}{'Status:'!s:<25}") for f in self.validation_data.get('feeds'): status_color = self.status_color(f.get('status')) status_value = self.status_value(f.get('status')) print(f"{f.get('name')!s:<60}{status_color}{status_value!s:<25}")
def _print_errors(self) -> None: 'Print errors results.' if self.validation_data.get('errors'): print('\n') for error in self.validation_data.get('errors'): print(f'* {c.Fore.RED}{error}') if (not self.ignore_validation): self.exit_code = 1
2,625,961,558,882,467,300
Print errors results.
tcex/bin/validate.py
_print_errors
benjaminPurdy/tcex
python
def _print_errors(self) -> None: if self.validation_data.get('errors'): print('\n') for error in self.validation_data.get('errors'): print(f'* {c.Fore.RED}{error}') if (not self.ignore_validation): self.exit_code = 1
def print_results(self) -> None: 'Print results.' self._print_file_syntax_results() self._print_imports_results() self._print_schema_results() self._print_layouts_results() self._print_feed_results() self._print_errors()
5,307,456,271,778,884,000
Print results.
tcex/bin/validate.py
print_results
benjaminPurdy/tcex
python
def print_results(self) -> None: self._print_file_syntax_results() self._print_imports_results() self._print_schema_results() self._print_layouts_results() self._print_feed_results() self._print_errors()
@staticmethod def status_color(status) -> str: 'Return the appropriate status color.' return (c.Fore.GREEN if status else c.Fore.RED)
-5,684,548,797,497,374,000
Return the appropriate status color.
tcex/bin/validate.py
status_color
benjaminPurdy/tcex
python
@staticmethod def status_color(status) -> str: return (c.Fore.GREEN if status else c.Fore.RED)
@staticmethod def status_value(status) -> str: 'Return the appropriate status color.' return ('passed' if status else 'failed')
-6,747,285,470,334,901,000
Return the appropriate status color.
tcex/bin/validate.py
status_value
benjaminPurdy/tcex
python
@staticmethod def status_value(status) -> str: return ('passed' if status else 'failed')
@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like) def empty_like(prototype, dtype=None, order=None, subok=None): "\n empty_like(prototype, dtype=None, order='K', subok=True)\n\n Return a new array with the same shape and type as a given array.\n\n Parameters\n ----------\n prototype : array_like\n The shape and data-type of `prototype` define these same attributes\n of the returned array.\n dtype : data-type, optional\n Overrides the data type of the result.\n\n .. versionadded:: 1.6.0\n order : {'C', 'F', 'A', or 'K'}, optional\n Overrides the memory layout of the result. 'C' means C-order,\n 'F' means F-order, 'A' means 'F' if ``prototype`` is Fortran\n contiguous, 'C' otherwise. 'K' means match the layout of ``prototype``\n as closely as possible.\n\n .. versionadded:: 1.6.0\n subok : bool, optional.\n If True, then the newly created array will use the sub-class\n type of 'a', otherwise it will be a base-class array. Defaults\n to True.\n\n Returns\n -------\n out : ndarray\n Array of uninitialized (arbitrary) data with the same\n shape and type as `prototype`.\n\n See Also\n --------\n ones_like : Return an array of ones with shape and type of input.\n zeros_like : Return an array of zeros with shape and type of input.\n full_like : Return a new array with shape of input filled with value.\n empty : Return a new uninitialized array.\n\n Notes\n -----\n This function does *not* initialize the returned array; to do that use\n `zeros_like` or `ones_like` instead. It may be marginally faster than\n the functions that do set the array values.\n\n Examples\n --------\n >>> a = ([1,2,3], [4,5,6]) # a is array-like\n >>> np.empty_like(a)\n array([[-1073741821, -1073741821, 3], #random\n [ 0, 0, -1073741821]])\n >>> a = np.array([[1., 2., 3.],[4.,5.,6.]])\n >>> np.empty_like(a)\n array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000],#random\n [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])\n\n " return (prototype,)
-1,656,268,581,539,844,600
empty_like(prototype, dtype=None, order='K', subok=True) Return a new array with the same shape and type as a given array. Parameters ---------- prototype : array_like The shape and data-type of `prototype` define these same attributes of the returned array. dtype : data-type, optional Overrides the data type of the result. .. versionadded:: 1.6.0 order : {'C', 'F', 'A', or 'K'}, optional Overrides the memory layout of the result. 'C' means C-order, 'F' means F-order, 'A' means 'F' if ``prototype`` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of ``prototype`` as closely as possible. .. versionadded:: 1.6.0 subok : bool, optional. If True, then the newly created array will use the sub-class type of 'a', otherwise it will be a base-class array. Defaults to True. Returns ------- out : ndarray Array of uninitialized (arbitrary) data with the same shape and type as `prototype`. See Also -------- ones_like : Return an array of ones with shape and type of input. zeros_like : Return an array of zeros with shape and type of input. full_like : Return a new array with shape of input filled with value. empty : Return a new uninitialized array. Notes ----- This function does *not* initialize the returned array; to do that use `zeros_like` or `ones_like` instead. It may be marginally faster than the functions that do set the array values. Examples -------- >>> a = ([1,2,3], [4,5,6]) # a is array-like >>> np.empty_like(a) array([[-1073741821, -1073741821, 3], #random [ 0, 0, -1073741821]]) >>> a = np.array([[1., 2., 3.],[4.,5.,6.]]) >>> np.empty_like(a) array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000],#random [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
venv/lib/python3.7/site-packages/numpy/core/multiarray.py
empty_like
180Studios/LoginApp
python
@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like) def empty_like(prototype, dtype=None, order=None, subok=None): "\n empty_like(prototype, dtype=None, order='K', subok=True)\n\n Return a new array with the same shape and type as a given array.\n\n Parameters\n ----------\n prototype : array_like\n The shape and data-type of `prototype` define these same attributes\n of the returned array.\n dtype : data-type, optional\n Overrides the data type of the result.\n\n .. versionadded:: 1.6.0\n order : {'C', 'F', 'A', or 'K'}, optional\n Overrides the memory layout of the result. 'C' means C-order,\n 'F' means F-order, 'A' means 'F' if ``prototype`` is Fortran\n contiguous, 'C' otherwise. 'K' means match the layout of ``prototype``\n as closely as possible.\n\n .. versionadded:: 1.6.0\n subok : bool, optional.\n If True, then the newly created array will use the sub-class\n type of 'a', otherwise it will be a base-class array. Defaults\n to True.\n\n Returns\n -------\n out : ndarray\n Array of uninitialized (arbitrary) data with the same\n shape and type as `prototype`.\n\n See Also\n --------\n ones_like : Return an array of ones with shape and type of input.\n zeros_like : Return an array of zeros with shape and type of input.\n full_like : Return a new array with shape of input filled with value.\n empty : Return a new uninitialized array.\n\n Notes\n -----\n This function does *not* initialize the returned array; to do that use\n `zeros_like` or `ones_like` instead. It may be marginally faster than\n the functions that do set the array values.\n\n Examples\n --------\n >>> a = ([1,2,3], [4,5,6]) # a is array-like\n >>> np.empty_like(a)\n array([[-1073741821, -1073741821, 3], #random\n [ 0, 0, -1073741821]])\n >>> a = np.array([[1., 2., 3.],[4.,5.,6.]])\n >>> np.empty_like(a)\n array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000],#random\n [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])\n\n " return (prototype,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate) def concatenate(arrays, axis=None, out=None): '\n concatenate((a1, a2, ...), axis=0, out=None)\n\n Join a sequence of arrays along an existing axis.\n\n Parameters\n ----------\n a1, a2, ... : sequence of array_like\n The arrays must have the same shape, except in the dimension\n corresponding to `axis` (the first, by default).\n axis : int, optional\n The axis along which the arrays will be joined. If axis is None,\n arrays are flattened before use. Default is 0.\n out : ndarray, optional\n If provided, the destination to place the result. The shape must be\n correct, matching that of what concatenate would have returned if no\n out argument were specified.\n\n Returns\n -------\n res : ndarray\n The concatenated array.\n\n See Also\n --------\n ma.concatenate : Concatenate function that preserves input masks.\n array_split : Split an array into multiple sub-arrays of equal or\n near-equal size.\n split : Split array into a list of multiple sub-arrays of equal size.\n hsplit : Split array into multiple sub-arrays horizontally (column wise)\n vsplit : Split array into multiple sub-arrays vertically (row wise)\n dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).\n stack : Stack a sequence of arrays along a new axis.\n hstack : Stack arrays in sequence horizontally (column wise)\n vstack : Stack arrays in sequence vertically (row wise)\n dstack : Stack arrays in sequence depth wise (along third dimension)\n block : Assemble arrays from blocks.\n\n Notes\n -----\n When one or more of the arrays to be concatenated is a MaskedArray,\n this function will return a MaskedArray object instead of an ndarray,\n but the input masks are *not* preserved. In cases where a MaskedArray\n is expected as input, use the ma.concatenate function from the masked\n array module instead.\n\n Examples\n --------\n >>> a = np.array([[1, 2], [3, 4]])\n >>> b = np.array([[5, 6]])\n >>> np.concatenate((a, b), axis=0)\n array([[1, 2],\n [3, 4],\n [5, 6]])\n >>> np.concatenate((a, b.T), axis=1)\n array([[1, 2, 5],\n [3, 4, 6]])\n >>> np.concatenate((a, b), axis=None)\n array([1, 2, 3, 4, 5, 6])\n\n This function will not preserve masking of MaskedArray inputs.\n\n >>> a = np.ma.arange(3)\n >>> a[1] = np.ma.masked\n >>> b = np.arange(2, 5)\n >>> a\n masked_array(data=[0, --, 2],\n mask=[False, True, False],\n fill_value=999999)\n >>> b\n array([2, 3, 4])\n >>> np.concatenate([a, b])\n masked_array(data=[0, 1, 2, 2, 3, 4],\n mask=False,\n fill_value=999999)\n >>> np.ma.concatenate([a, b])\n masked_array(data=[0, --, 2, 2, 3, 4],\n mask=[False, True, False, False, False, False],\n fill_value=999999)\n\n ' if (out is not None): arrays = list(arrays) arrays.append(out) return arrays
-8,880,532,773,887,127,000
concatenate((a1, a2, ...), axis=0, out=None) Join a sequence of arrays along an existing axis. Parameters ---------- a1, a2, ... : sequence of array_like The arrays must have the same shape, except in the dimension corresponding to `axis` (the first, by default). axis : int, optional The axis along which the arrays will be joined. If axis is None, arrays are flattened before use. Default is 0. out : ndarray, optional If provided, the destination to place the result. The shape must be correct, matching that of what concatenate would have returned if no out argument were specified. Returns ------- res : ndarray The concatenated array. See Also -------- ma.concatenate : Concatenate function that preserves input masks. array_split : Split an array into multiple sub-arrays of equal or near-equal size. split : Split array into a list of multiple sub-arrays of equal size. hsplit : Split array into multiple sub-arrays horizontally (column wise) vsplit : Split array into multiple sub-arrays vertically (row wise) dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). stack : Stack a sequence of arrays along a new axis. hstack : Stack arrays in sequence horizontally (column wise) vstack : Stack arrays in sequence vertically (row wise) dstack : Stack arrays in sequence depth wise (along third dimension) block : Assemble arrays from blocks. Notes ----- When one or more of the arrays to be concatenated is a MaskedArray, this function will return a MaskedArray object instead of an ndarray, but the input masks are *not* preserved. In cases where a MaskedArray is expected as input, use the ma.concatenate function from the masked array module instead. Examples -------- >>> a = np.array([[1, 2], [3, 4]]) >>> b = np.array([[5, 6]]) >>> np.concatenate((a, b), axis=0) array([[1, 2], [3, 4], [5, 6]]) >>> np.concatenate((a, b.T), axis=1) array([[1, 2, 5], [3, 4, 6]]) >>> np.concatenate((a, b), axis=None) array([1, 2, 3, 4, 5, 6]) This function will not preserve masking of MaskedArray inputs. >>> a = np.ma.arange(3) >>> a[1] = np.ma.masked >>> b = np.arange(2, 5) >>> a masked_array(data=[0, --, 2], mask=[False, True, False], fill_value=999999) >>> b array([2, 3, 4]) >>> np.concatenate([a, b]) masked_array(data=[0, 1, 2, 2, 3, 4], mask=False, fill_value=999999) >>> np.ma.concatenate([a, b]) masked_array(data=[0, --, 2, 2, 3, 4], mask=[False, True, False, False, False, False], fill_value=999999)
venv/lib/python3.7/site-packages/numpy/core/multiarray.py
concatenate
180Studios/LoginApp
python
@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate) def concatenate(arrays, axis=None, out=None): '\n concatenate((a1, a2, ...), axis=0, out=None)\n\n Join a sequence of arrays along an existing axis.\n\n Parameters\n ----------\n a1, a2, ... : sequence of array_like\n The arrays must have the same shape, except in the dimension\n corresponding to `axis` (the first, by default).\n axis : int, optional\n The axis along which the arrays will be joined. If axis is None,\n arrays are flattened before use. Default is 0.\n out : ndarray, optional\n If provided, the destination to place the result. The shape must be\n correct, matching that of what concatenate would have returned if no\n out argument were specified.\n\n Returns\n -------\n res : ndarray\n The concatenated array.\n\n See Also\n --------\n ma.concatenate : Concatenate function that preserves input masks.\n array_split : Split an array into multiple sub-arrays of equal or\n near-equal size.\n split : Split array into a list of multiple sub-arrays of equal size.\n hsplit : Split array into multiple sub-arrays horizontally (column wise)\n vsplit : Split array into multiple sub-arrays vertically (row wise)\n dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).\n stack : Stack a sequence of arrays along a new axis.\n hstack : Stack arrays in sequence horizontally (column wise)\n vstack : Stack arrays in sequence vertically (row wise)\n dstack : Stack arrays in sequence depth wise (along third dimension)\n block : Assemble arrays from blocks.\n\n Notes\n -----\n When one or more of the arrays to be concatenated is a MaskedArray,\n this function will return a MaskedArray object instead of an ndarray,\n but the input masks are *not* preserved. In cases where a MaskedArray\n is expected as input, use the ma.concatenate function from the masked\n array module instead.\n\n Examples\n --------\n >>> a = np.array([[1, 2], [3, 4]])\n >>> b = np.array([[5, 6]])\n >>> np.concatenate((a, b), axis=0)\n array([[1, 2],\n [3, 4],\n [5, 6]])\n >>> np.concatenate((a, b.T), axis=1)\n array([[1, 2, 5],\n [3, 4, 6]])\n >>> np.concatenate((a, b), axis=None)\n array([1, 2, 3, 4, 5, 6])\n\n This function will not preserve masking of MaskedArray inputs.\n\n >>> a = np.ma.arange(3)\n >>> a[1] = np.ma.masked\n >>> b = np.arange(2, 5)\n >>> a\n masked_array(data=[0, --, 2],\n mask=[False, True, False],\n fill_value=999999)\n >>> b\n array([2, 3, 4])\n >>> np.concatenate([a, b])\n masked_array(data=[0, 1, 2, 2, 3, 4],\n mask=False,\n fill_value=999999)\n >>> np.ma.concatenate([a, b])\n masked_array(data=[0, --, 2, 2, 3, 4],\n mask=[False, True, False, False, False, False],\n fill_value=999999)\n\n ' if (out is not None): arrays = list(arrays) arrays.append(out) return arrays
@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner) def inner(a, b): '\n inner(a, b)\n\n Inner product of two arrays.\n\n Ordinary inner product of vectors for 1-D arrays (without complex\n conjugation), in higher dimensions a sum product over the last axes.\n\n Parameters\n ----------\n a, b : array_like\n If `a` and `b` are nonscalar, their last dimensions must match.\n\n Returns\n -------\n out : ndarray\n `out.shape = a.shape[:-1] + b.shape[:-1]`\n\n Raises\n ------\n ValueError\n If the last dimension of `a` and `b` has different size.\n\n See Also\n --------\n tensordot : Sum products over arbitrary axes.\n dot : Generalised matrix product, using second last dimension of `b`.\n einsum : Einstein summation convention.\n\n Notes\n -----\n For vectors (1-D arrays) it computes the ordinary inner-product::\n\n np.inner(a, b) = sum(a[:]*b[:])\n\n More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::\n\n np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))\n\n or explicitly::\n\n np.inner(a, b)[i0,...,ir-1,j0,...,js-1]\n = sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])\n\n In addition `a` or `b` may be scalars, in which case::\n\n np.inner(a,b) = a*b\n\n Examples\n --------\n Ordinary inner product for vectors:\n\n >>> a = np.array([1,2,3])\n >>> b = np.array([0,1,0])\n >>> np.inner(a, b)\n 2\n\n A multidimensional example:\n\n >>> a = np.arange(24).reshape((2,3,4))\n >>> b = np.arange(4)\n >>> np.inner(a, b)\n array([[ 14, 38, 62],\n [ 86, 110, 134]])\n\n An example where `b` is a scalar:\n\n >>> np.inner(np.eye(2), 7)\n array([[ 7., 0.],\n [ 0., 7.]])\n\n ' return (a, b)
-7,448,896,873,532,278,000
inner(a, b) Inner product of two arrays. Ordinary inner product of vectors for 1-D arrays (without complex conjugation), in higher dimensions a sum product over the last axes. Parameters ---------- a, b : array_like If `a` and `b` are nonscalar, their last dimensions must match. Returns ------- out : ndarray `out.shape = a.shape[:-1] + b.shape[:-1]` Raises ------ ValueError If the last dimension of `a` and `b` has different size. See Also -------- tensordot : Sum products over arbitrary axes. dot : Generalised matrix product, using second last dimension of `b`. einsum : Einstein summation convention. Notes ----- For vectors (1-D arrays) it computes the ordinary inner-product:: np.inner(a, b) = sum(a[:]*b[:]) More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`:: np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1)) or explicitly:: np.inner(a, b)[i0,...,ir-1,j0,...,js-1] = sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:]) In addition `a` or `b` may be scalars, in which case:: np.inner(a,b) = a*b Examples -------- Ordinary inner product for vectors: >>> a = np.array([1,2,3]) >>> b = np.array([0,1,0]) >>> np.inner(a, b) 2 A multidimensional example: >>> a = np.arange(24).reshape((2,3,4)) >>> b = np.arange(4) >>> np.inner(a, b) array([[ 14, 38, 62], [ 86, 110, 134]]) An example where `b` is a scalar: >>> np.inner(np.eye(2), 7) array([[ 7., 0.], [ 0., 7.]])
venv/lib/python3.7/site-packages/numpy/core/multiarray.py
inner
180Studios/LoginApp
python
@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner) def inner(a, b): '\n inner(a, b)\n\n Inner product of two arrays.\n\n Ordinary inner product of vectors for 1-D arrays (without complex\n conjugation), in higher dimensions a sum product over the last axes.\n\n Parameters\n ----------\n a, b : array_like\n If `a` and `b` are nonscalar, their last dimensions must match.\n\n Returns\n -------\n out : ndarray\n `out.shape = a.shape[:-1] + b.shape[:-1]`\n\n Raises\n ------\n ValueError\n If the last dimension of `a` and `b` has different size.\n\n See Also\n --------\n tensordot : Sum products over arbitrary axes.\n dot : Generalised matrix product, using second last dimension of `b`.\n einsum : Einstein summation convention.\n\n Notes\n -----\n For vectors (1-D arrays) it computes the ordinary inner-product::\n\n np.inner(a, b) = sum(a[:]*b[:])\n\n More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::\n\n np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))\n\n or explicitly::\n\n np.inner(a, b)[i0,...,ir-1,j0,...,js-1]\n = sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])\n\n In addition `a` or `b` may be scalars, in which case::\n\n np.inner(a,b) = a*b\n\n Examples\n --------\n Ordinary inner product for vectors:\n\n >>> a = np.array([1,2,3])\n >>> b = np.array([0,1,0])\n >>> np.inner(a, b)\n 2\n\n A multidimensional example:\n\n >>> a = np.arange(24).reshape((2,3,4))\n >>> b = np.arange(4)\n >>> np.inner(a, b)\n array([[ 14, 38, 62],\n [ 86, 110, 134]])\n\n An example where `b` is a scalar:\n\n >>> np.inner(np.eye(2), 7)\n array([[ 7., 0.],\n [ 0., 7.]])\n\n ' return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.where) def where(condition, x=None, y=None): '\n where(condition, [x, y])\n\n Return elements chosen from `x` or `y` depending on `condition`.\n\n .. note::\n When only `condition` is provided, this function is a shorthand for\n ``np.asarray(condition).nonzero()``. Using `nonzero` directly should be\n preferred, as it behaves correctly for subclasses. The rest of this\n documentation covers only the case where all three arguments are\n provided.\n\n Parameters\n ----------\n condition : array_like, bool\n Where True, yield `x`, otherwise yield `y`.\n x, y : array_like\n Values from which to choose. `x`, `y` and `condition` need to be\n broadcastable to some shape.\n\n Returns\n -------\n out : ndarray\n An array with elements from `x` where `condition` is True, and elements\n from `y` elsewhere.\n\n See Also\n --------\n choose\n nonzero : The function that is called when x and y are omitted\n\n Notes\n -----\n If all the arrays are 1-D, `where` is equivalent to::\n\n [xv if c else yv\n for c, xv, yv in zip(condition, x, y)]\n\n Examples\n --------\n >>> a = np.arange(10)\n >>> a\n array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n >>> np.where(a < 5, a, 10*a)\n array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90])\n\n This can be used on multidimensional arrays too:\n\n >>> np.where([[True, False], [True, True]],\n ... [[1, 2], [3, 4]],\n ... [[9, 8], [7, 6]])\n array([[1, 8],\n [3, 4]])\n\n The shapes of x, y, and the condition are broadcast together:\n\n >>> x, y = np.ogrid[:3, :4]\n >>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast\n array([[10, 0, 0, 0],\n [10, 11, 1, 1],\n [10, 11, 12, 2]])\n\n >>> a = np.array([[0, 1, 2],\n ... [0, 2, 4],\n ... [0, 3, 6]])\n >>> np.where(a < 4, a, -1) # -1 is broadcast\n array([[ 0, 1, 2],\n [ 0, 2, -1],\n [ 0, 3, -1]])\n ' return (condition, x, y)
5,377,605,712,749,175,000
where(condition, [x, y]) Return elements chosen from `x` or `y` depending on `condition`. .. note:: When only `condition` is provided, this function is a shorthand for ``np.asarray(condition).nonzero()``. Using `nonzero` directly should be preferred, as it behaves correctly for subclasses. The rest of this documentation covers only the case where all three arguments are provided. Parameters ---------- condition : array_like, bool Where True, yield `x`, otherwise yield `y`. x, y : array_like Values from which to choose. `x`, `y` and `condition` need to be broadcastable to some shape. Returns ------- out : ndarray An array with elements from `x` where `condition` is True, and elements from `y` elsewhere. See Also -------- choose nonzero : The function that is called when x and y are omitted Notes ----- If all the arrays are 1-D, `where` is equivalent to:: [xv if c else yv for c, xv, yv in zip(condition, x, y)] Examples -------- >>> a = np.arange(10) >>> a array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) >>> np.where(a < 5, a, 10*a) array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90]) This can be used on multidimensional arrays too: >>> np.where([[True, False], [True, True]], ... [[1, 2], [3, 4]], ... [[9, 8], [7, 6]]) array([[1, 8], [3, 4]]) The shapes of x, y, and the condition are broadcast together: >>> x, y = np.ogrid[:3, :4] >>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast array([[10, 0, 0, 0], [10, 11, 1, 1], [10, 11, 12, 2]]) >>> a = np.array([[0, 1, 2], ... [0, 2, 4], ... [0, 3, 6]]) >>> np.where(a < 4, a, -1) # -1 is broadcast array([[ 0, 1, 2], [ 0, 2, -1], [ 0, 3, -1]])
venv/lib/python3.7/site-packages/numpy/core/multiarray.py
where
180Studios/LoginApp
python
@array_function_from_c_func_and_dispatcher(_multiarray_umath.where) def where(condition, x=None, y=None): '\n where(condition, [x, y])\n\n Return elements chosen from `x` or `y` depending on `condition`.\n\n .. note::\n When only `condition` is provided, this function is a shorthand for\n ``np.asarray(condition).nonzero()``. Using `nonzero` directly should be\n preferred, as it behaves correctly for subclasses. The rest of this\n documentation covers only the case where all three arguments are\n provided.\n\n Parameters\n ----------\n condition : array_like, bool\n Where True, yield `x`, otherwise yield `y`.\n x, y : array_like\n Values from which to choose. `x`, `y` and `condition` need to be\n broadcastable to some shape.\n\n Returns\n -------\n out : ndarray\n An array with elements from `x` where `condition` is True, and elements\n from `y` elsewhere.\n\n See Also\n --------\n choose\n nonzero : The function that is called when x and y are omitted\n\n Notes\n -----\n If all the arrays are 1-D, `where` is equivalent to::\n\n [xv if c else yv\n for c, xv, yv in zip(condition, x, y)]\n\n Examples\n --------\n >>> a = np.arange(10)\n >>> a\n array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n >>> np.where(a < 5, a, 10*a)\n array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90])\n\n This can be used on multidimensional arrays too:\n\n >>> np.where([[True, False], [True, True]],\n ... [[1, 2], [3, 4]],\n ... [[9, 8], [7, 6]])\n array([[1, 8],\n [3, 4]])\n\n The shapes of x, y, and the condition are broadcast together:\n\n >>> x, y = np.ogrid[:3, :4]\n >>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast\n array([[10, 0, 0, 0],\n [10, 11, 1, 1],\n [10, 11, 12, 2]])\n\n >>> a = np.array([[0, 1, 2],\n ... [0, 2, 4],\n ... [0, 3, 6]])\n >>> np.where(a < 4, a, -1) # -1 is broadcast\n array([[ 0, 1, 2],\n [ 0, 2, -1],\n [ 0, 3, -1]])\n ' return (condition, x, y)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort) def lexsort(keys, axis=None): '\n lexsort(keys, axis=-1)\n\n Perform an indirect stable sort using a sequence of keys.\n\n Given multiple sorting keys, which can be interpreted as columns in a\n spreadsheet, lexsort returns an array of integer indices that describes\n the sort order by multiple columns. The last key in the sequence is used\n for the primary sort order, the second-to-last key for the secondary sort\n order, and so on. The keys argument must be a sequence of objects that\n can be converted to arrays of the same shape. If a 2D array is provided\n for the keys argument, it\'s rows are interpreted as the sorting keys and\n sorting is according to the last row, second last row etc.\n\n Parameters\n ----------\n keys : (k, N) array or tuple containing k (N,)-shaped sequences\n The `k` different "columns" to be sorted. The last column (or row if\n `keys` is a 2D array) is the primary sort key.\n axis : int, optional\n Axis to be indirectly sorted. By default, sort over the last axis.\n\n Returns\n -------\n indices : (N,) ndarray of ints\n Array of indices that sort the keys along the specified axis.\n\n See Also\n --------\n argsort : Indirect sort.\n ndarray.sort : In-place sort.\n sort : Return a sorted copy of an array.\n\n Examples\n --------\n Sort names: first by surname, then by name.\n\n >>> surnames = (\'Hertz\', \'Galilei\', \'Hertz\')\n >>> first_names = (\'Heinrich\', \'Galileo\', \'Gustav\')\n >>> ind = np.lexsort((first_names, surnames))\n >>> ind\n array([1, 2, 0])\n\n >>> [surnames[i] + ", " + first_names[i] for i in ind]\n [\'Galilei, Galileo\', \'Hertz, Gustav\', \'Hertz, Heinrich\']\n\n Sort two columns of numbers:\n\n >>> a = [1,5,1,4,3,4,4] # First column\n >>> b = [9,4,0,4,0,2,1] # Second column\n >>> ind = np.lexsort((b,a)) # Sort by a, then by b\n >>> print(ind)\n [2 0 4 6 5 3 1]\n\n >>> [(a[i],b[i]) for i in ind]\n [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]\n\n Note that sorting is first according to the elements of ``a``.\n Secondary sorting is according to the elements of ``b``.\n\n A normal ``argsort`` would have yielded:\n\n >>> [(a[i],b[i]) for i in np.argsort(a)]\n [(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]\n\n Structured arrays are sorted lexically by ``argsort``:\n\n >>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],\n ... dtype=np.dtype([(\'x\', int), (\'y\', int)]))\n\n >>> np.argsort(x) # or np.argsort(x, order=(\'x\', \'y\'))\n array([2, 0, 4, 6, 5, 3, 1])\n\n ' if isinstance(keys, tuple): return keys else: return (keys,)
4,072,387,893,560,209,000
lexsort(keys, axis=-1) Perform an indirect stable sort using a sequence of keys. Given multiple sorting keys, which can be interpreted as columns in a spreadsheet, lexsort returns an array of integer indices that describes the sort order by multiple columns. The last key in the sequence is used for the primary sort order, the second-to-last key for the secondary sort order, and so on. The keys argument must be a sequence of objects that can be converted to arrays of the same shape. If a 2D array is provided for the keys argument, it's rows are interpreted as the sorting keys and sorting is according to the last row, second last row etc. Parameters ---------- keys : (k, N) array or tuple containing k (N,)-shaped sequences The `k` different "columns" to be sorted. The last column (or row if `keys` is a 2D array) is the primary sort key. axis : int, optional Axis to be indirectly sorted. By default, sort over the last axis. Returns ------- indices : (N,) ndarray of ints Array of indices that sort the keys along the specified axis. See Also -------- argsort : Indirect sort. ndarray.sort : In-place sort. sort : Return a sorted copy of an array. Examples -------- Sort names: first by surname, then by name. >>> surnames = ('Hertz', 'Galilei', 'Hertz') >>> first_names = ('Heinrich', 'Galileo', 'Gustav') >>> ind = np.lexsort((first_names, surnames)) >>> ind array([1, 2, 0]) >>> [surnames[i] + ", " + first_names[i] for i in ind] ['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich'] Sort two columns of numbers: >>> a = [1,5,1,4,3,4,4] # First column >>> b = [9,4,0,4,0,2,1] # Second column >>> ind = np.lexsort((b,a)) # Sort by a, then by b >>> print(ind) [2 0 4 6 5 3 1] >>> [(a[i],b[i]) for i in ind] [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)] Note that sorting is first according to the elements of ``a``. Secondary sorting is according to the elements of ``b``. A normal ``argsort`` would have yielded: >>> [(a[i],b[i]) for i in np.argsort(a)] [(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)] Structured arrays are sorted lexically by ``argsort``: >>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)], ... dtype=np.dtype([('x', int), ('y', int)])) >>> np.argsort(x) # or np.argsort(x, order=('x', 'y')) array([2, 0, 4, 6, 5, 3, 1])
venv/lib/python3.7/site-packages/numpy/core/multiarray.py
lexsort
180Studios/LoginApp
python
@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort) def lexsort(keys, axis=None): '\n lexsort(keys, axis=-1)\n\n Perform an indirect stable sort using a sequence of keys.\n\n Given multiple sorting keys, which can be interpreted as columns in a\n spreadsheet, lexsort returns an array of integer indices that describes\n the sort order by multiple columns. The last key in the sequence is used\n for the primary sort order, the second-to-last key for the secondary sort\n order, and so on. The keys argument must be a sequence of objects that\n can be converted to arrays of the same shape. If a 2D array is provided\n for the keys argument, it\'s rows are interpreted as the sorting keys and\n sorting is according to the last row, second last row etc.\n\n Parameters\n ----------\n keys : (k, N) array or tuple containing k (N,)-shaped sequences\n The `k` different "columns" to be sorted. The last column (or row if\n `keys` is a 2D array) is the primary sort key.\n axis : int, optional\n Axis to be indirectly sorted. By default, sort over the last axis.\n\n Returns\n -------\n indices : (N,) ndarray of ints\n Array of indices that sort the keys along the specified axis.\n\n See Also\n --------\n argsort : Indirect sort.\n ndarray.sort : In-place sort.\n sort : Return a sorted copy of an array.\n\n Examples\n --------\n Sort names: first by surname, then by name.\n\n >>> surnames = (\'Hertz\', \'Galilei\', \'Hertz\')\n >>> first_names = (\'Heinrich\', \'Galileo\', \'Gustav\')\n >>> ind = np.lexsort((first_names, surnames))\n >>> ind\n array([1, 2, 0])\n\n >>> [surnames[i] + ", " + first_names[i] for i in ind]\n [\'Galilei, Galileo\', \'Hertz, Gustav\', \'Hertz, Heinrich\']\n\n Sort two columns of numbers:\n\n >>> a = [1,5,1,4,3,4,4] # First column\n >>> b = [9,4,0,4,0,2,1] # Second column\n >>> ind = np.lexsort((b,a)) # Sort by a, then by b\n >>> print(ind)\n [2 0 4 6 5 3 1]\n\n >>> [(a[i],b[i]) for i in ind]\n [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]\n\n Note that sorting is first according to the elements of ``a``.\n Secondary sorting is according to the elements of ``b``.\n\n A normal ``argsort`` would have yielded:\n\n >>> [(a[i],b[i]) for i in np.argsort(a)]\n [(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]\n\n Structured arrays are sorted lexically by ``argsort``:\n\n >>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],\n ... dtype=np.dtype([(\'x\', int), (\'y\', int)]))\n\n >>> np.argsort(x) # or np.argsort(x, order=(\'x\', \'y\'))\n array([2, 0, 4, 6, 5, 3, 1])\n\n ' if isinstance(keys, tuple): return keys else: return (keys,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast) def can_cast(from_, to, casting=None): "\n can_cast(from_, to, casting='safe')\n\n Returns True if cast between data types can occur according to the\n casting rule. If from is a scalar or array scalar, also returns\n True if the scalar value can be cast without overflow or truncation\n to an integer.\n\n Parameters\n ----------\n from_ : dtype, dtype specifier, scalar, or array\n Data type, scalar, or array to cast from.\n to : dtype or dtype specifier\n Data type to cast to.\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional\n Controls what kind of data casting may occur.\n\n * 'no' means the data types should not be cast at all.\n * 'equiv' means only byte-order changes are allowed.\n * 'safe' means only casts which can preserve values are allowed.\n * 'same_kind' means only safe casts or casts within a kind,\n like float64 to float32, are allowed.\n * 'unsafe' means any data conversions may be done.\n\n Returns\n -------\n out : bool\n True if cast can occur according to the casting rule.\n\n Notes\n -----\n Starting in NumPy 1.9, can_cast function now returns False in 'safe'\n casting mode for integer/float dtype and string dtype if the string dtype\n length is not long enough to store the max integer/float value converted\n to a string. Previously can_cast in 'safe' mode returned True for\n integer/float dtype and a string dtype of any length.\n\n See also\n --------\n dtype, result_type\n\n Examples\n --------\n Basic examples\n\n >>> np.can_cast(np.int32, np.int64)\n True\n >>> np.can_cast(np.float64, complex)\n True\n >>> np.can_cast(complex, float)\n False\n\n >>> np.can_cast('i8', 'f8')\n True\n >>> np.can_cast('i8', 'f4')\n False\n >>> np.can_cast('i4', 'S4')\n False\n\n Casting scalars\n\n >>> np.can_cast(100, 'i1')\n True\n >>> np.can_cast(150, 'i1')\n False\n >>> np.can_cast(150, 'u1')\n True\n\n >>> np.can_cast(3.5e100, np.float32)\n False\n >>> np.can_cast(1000.0, np.float32)\n True\n\n Array scalar checks the value, array does not\n\n >>> np.can_cast(np.array(1000.0), np.float32)\n True\n >>> np.can_cast(np.array([1000.0]), np.float32)\n False\n\n Using the casting rules\n\n >>> np.can_cast('i8', 'i8', 'no')\n True\n >>> np.can_cast('<i8', '>i8', 'no')\n False\n\n >>> np.can_cast('<i8', '>i8', 'equiv')\n True\n >>> np.can_cast('<i4', '>i8', 'equiv')\n False\n\n >>> np.can_cast('<i4', '>i8', 'safe')\n True\n >>> np.can_cast('<i8', '>i4', 'safe')\n False\n\n >>> np.can_cast('<i8', '>i4', 'same_kind')\n True\n >>> np.can_cast('<i8', '>u4', 'same_kind')\n False\n\n >>> np.can_cast('<i8', '>u4', 'unsafe')\n True\n\n " return (from_,)
420,385,678,301,806,100
can_cast(from_, to, casting='safe') Returns True if cast between data types can occur according to the casting rule. If from is a scalar or array scalar, also returns True if the scalar value can be cast without overflow or truncation to an integer. Parameters ---------- from_ : dtype, dtype specifier, scalar, or array Data type, scalar, or array to cast from. to : dtype or dtype specifier Data type to cast to. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur. * 'no' means the data types should not be cast at all. * 'equiv' means only byte-order changes are allowed. * 'safe' means only casts which can preserve values are allowed. * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. Returns ------- out : bool True if cast can occur according to the casting rule. Notes ----- Starting in NumPy 1.9, can_cast function now returns False in 'safe' casting mode for integer/float dtype and string dtype if the string dtype length is not long enough to store the max integer/float value converted to a string. Previously can_cast in 'safe' mode returned True for integer/float dtype and a string dtype of any length. See also -------- dtype, result_type Examples -------- Basic examples >>> np.can_cast(np.int32, np.int64) True >>> np.can_cast(np.float64, complex) True >>> np.can_cast(complex, float) False >>> np.can_cast('i8', 'f8') True >>> np.can_cast('i8', 'f4') False >>> np.can_cast('i4', 'S4') False Casting scalars >>> np.can_cast(100, 'i1') True >>> np.can_cast(150, 'i1') False >>> np.can_cast(150, 'u1') True >>> np.can_cast(3.5e100, np.float32) False >>> np.can_cast(1000.0, np.float32) True Array scalar checks the value, array does not >>> np.can_cast(np.array(1000.0), np.float32) True >>> np.can_cast(np.array([1000.0]), np.float32) False Using the casting rules >>> np.can_cast('i8', 'i8', 'no') True >>> np.can_cast('<i8', '>i8', 'no') False >>> np.can_cast('<i8', '>i8', 'equiv') True >>> np.can_cast('<i4', '>i8', 'equiv') False >>> np.can_cast('<i4', '>i8', 'safe') True >>> np.can_cast('<i8', '>i4', 'safe') False >>> np.can_cast('<i8', '>i4', 'same_kind') True >>> np.can_cast('<i8', '>u4', 'same_kind') False >>> np.can_cast('<i8', '>u4', 'unsafe') True
venv/lib/python3.7/site-packages/numpy/core/multiarray.py
can_cast
180Studios/LoginApp
python
@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast) def can_cast(from_, to, casting=None): "\n can_cast(from_, to, casting='safe')\n\n Returns True if cast between data types can occur according to the\n casting rule. If from is a scalar or array scalar, also returns\n True if the scalar value can be cast without overflow or truncation\n to an integer.\n\n Parameters\n ----------\n from_ : dtype, dtype specifier, scalar, or array\n Data type, scalar, or array to cast from.\n to : dtype or dtype specifier\n Data type to cast to.\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional\n Controls what kind of data casting may occur.\n\n * 'no' means the data types should not be cast at all.\n * 'equiv' means only byte-order changes are allowed.\n * 'safe' means only casts which can preserve values are allowed.\n * 'same_kind' means only safe casts or casts within a kind,\n like float64 to float32, are allowed.\n * 'unsafe' means any data conversions may be done.\n\n Returns\n -------\n out : bool\n True if cast can occur according to the casting rule.\n\n Notes\n -----\n Starting in NumPy 1.9, can_cast function now returns False in 'safe'\n casting mode for integer/float dtype and string dtype if the string dtype\n length is not long enough to store the max integer/float value converted\n to a string. Previously can_cast in 'safe' mode returned True for\n integer/float dtype and a string dtype of any length.\n\n See also\n --------\n dtype, result_type\n\n Examples\n --------\n Basic examples\n\n >>> np.can_cast(np.int32, np.int64)\n True\n >>> np.can_cast(np.float64, complex)\n True\n >>> np.can_cast(complex, float)\n False\n\n >>> np.can_cast('i8', 'f8')\n True\n >>> np.can_cast('i8', 'f4')\n False\n >>> np.can_cast('i4', 'S4')\n False\n\n Casting scalars\n\n >>> np.can_cast(100, 'i1')\n True\n >>> np.can_cast(150, 'i1')\n False\n >>> np.can_cast(150, 'u1')\n True\n\n >>> np.can_cast(3.5e100, np.float32)\n False\n >>> np.can_cast(1000.0, np.float32)\n True\n\n Array scalar checks the value, array does not\n\n >>> np.can_cast(np.array(1000.0), np.float32)\n True\n >>> np.can_cast(np.array([1000.0]), np.float32)\n False\n\n Using the casting rules\n\n >>> np.can_cast('i8', 'i8', 'no')\n True\n >>> np.can_cast('<i8', '>i8', 'no')\n False\n\n >>> np.can_cast('<i8', '>i8', 'equiv')\n True\n >>> np.can_cast('<i4', '>i8', 'equiv')\n False\n\n >>> np.can_cast('<i4', '>i8', 'safe')\n True\n >>> np.can_cast('<i8', '>i4', 'safe')\n False\n\n >>> np.can_cast('<i8', '>i4', 'same_kind')\n True\n >>> np.can_cast('<i8', '>u4', 'same_kind')\n False\n\n >>> np.can_cast('<i8', '>u4', 'unsafe')\n True\n\n " return (from_,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type) def min_scalar_type(a): "\n min_scalar_type(a)\n\n For scalar ``a``, returns the data type with the smallest size\n and smallest scalar kind which can hold its value. For non-scalar\n array ``a``, returns the vector's dtype unmodified.\n\n Floating point values are not demoted to integers,\n and complex values are not demoted to floats.\n\n Parameters\n ----------\n a : scalar or array_like\n The value whose minimal data type is to be found.\n\n Returns\n -------\n out : dtype\n The minimal data type.\n\n Notes\n -----\n .. versionadded:: 1.6.0\n\n See Also\n --------\n result_type, promote_types, dtype, can_cast\n\n Examples\n --------\n >>> np.min_scalar_type(10)\n dtype('uint8')\n\n >>> np.min_scalar_type(-260)\n dtype('int16')\n\n >>> np.min_scalar_type(3.1)\n dtype('float16')\n\n >>> np.min_scalar_type(1e50)\n dtype('float64')\n\n >>> np.min_scalar_type(np.arange(4,dtype='f8'))\n dtype('float64')\n\n " return (a,)
-5,644,159,851,517,568,000
min_scalar_type(a) For scalar ``a``, returns the data type with the smallest size and smallest scalar kind which can hold its value. For non-scalar array ``a``, returns the vector's dtype unmodified. Floating point values are not demoted to integers, and complex values are not demoted to floats. Parameters ---------- a : scalar or array_like The value whose minimal data type is to be found. Returns ------- out : dtype The minimal data type. Notes ----- .. versionadded:: 1.6.0 See Also -------- result_type, promote_types, dtype, can_cast Examples -------- >>> np.min_scalar_type(10) dtype('uint8') >>> np.min_scalar_type(-260) dtype('int16') >>> np.min_scalar_type(3.1) dtype('float16') >>> np.min_scalar_type(1e50) dtype('float64') >>> np.min_scalar_type(np.arange(4,dtype='f8')) dtype('float64')
venv/lib/python3.7/site-packages/numpy/core/multiarray.py
min_scalar_type
180Studios/LoginApp
python
@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type) def min_scalar_type(a): "\n min_scalar_type(a)\n\n For scalar ``a``, returns the data type with the smallest size\n and smallest scalar kind which can hold its value. For non-scalar\n array ``a``, returns the vector's dtype unmodified.\n\n Floating point values are not demoted to integers,\n and complex values are not demoted to floats.\n\n Parameters\n ----------\n a : scalar or array_like\n The value whose minimal data type is to be found.\n\n Returns\n -------\n out : dtype\n The minimal data type.\n\n Notes\n -----\n .. versionadded:: 1.6.0\n\n See Also\n --------\n result_type, promote_types, dtype, can_cast\n\n Examples\n --------\n >>> np.min_scalar_type(10)\n dtype('uint8')\n\n >>> np.min_scalar_type(-260)\n dtype('int16')\n\n >>> np.min_scalar_type(3.1)\n dtype('float16')\n\n >>> np.min_scalar_type(1e50)\n dtype('float64')\n\n >>> np.min_scalar_type(np.arange(4,dtype='f8'))\n dtype('float64')\n\n " return (a,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.result_type) def result_type(*arrays_and_dtypes): "\n result_type(*arrays_and_dtypes)\n\n Returns the type that results from applying the NumPy\n type promotion rules to the arguments.\n\n Type promotion in NumPy works similarly to the rules in languages\n like C++, with some slight differences. When both scalars and\n arrays are used, the array's type takes precedence and the actual value\n of the scalar is taken into account.\n\n For example, calculating 3*a, where a is an array of 32-bit floats,\n intuitively should result in a 32-bit float output. If the 3 is a\n 32-bit integer, the NumPy rules indicate it can't convert losslessly\n into a 32-bit float, so a 64-bit float should be the result type.\n By examining the value of the constant, '3', we see that it fits in\n an 8-bit integer, which can be cast losslessly into the 32-bit float.\n\n Parameters\n ----------\n arrays_and_dtypes : list of arrays and dtypes\n The operands of some operation whose result type is needed.\n\n Returns\n -------\n out : dtype\n The result type.\n\n See also\n --------\n dtype, promote_types, min_scalar_type, can_cast\n\n Notes\n -----\n .. versionadded:: 1.6.0\n\n The specific algorithm used is as follows.\n\n Categories are determined by first checking which of boolean,\n integer (int/uint), or floating point (float/complex) the maximum\n kind of all the arrays and the scalars are.\n\n If there are only scalars or the maximum category of the scalars\n is higher than the maximum category of the arrays,\n the data types are combined with :func:`promote_types`\n to produce the return value.\n\n Otherwise, `min_scalar_type` is called on each array, and\n the resulting data types are all combined with :func:`promote_types`\n to produce the return value.\n\n The set of int values is not a subset of the uint values for types\n with the same number of bits, something not reflected in\n :func:`min_scalar_type`, but handled as a special case in `result_type`.\n\n Examples\n --------\n >>> np.result_type(3, np.arange(7, dtype='i1'))\n dtype('int8')\n\n >>> np.result_type('i4', 'c8')\n dtype('complex128')\n\n >>> np.result_type(3.0, -2)\n dtype('float64')\n\n " return arrays_and_dtypes
6,623,818,526,093,711,000
result_type(*arrays_and_dtypes) Returns the type that results from applying the NumPy type promotion rules to the arguments. Type promotion in NumPy works similarly to the rules in languages like C++, with some slight differences. When both scalars and arrays are used, the array's type takes precedence and the actual value of the scalar is taken into account. For example, calculating 3*a, where a is an array of 32-bit floats, intuitively should result in a 32-bit float output. If the 3 is a 32-bit integer, the NumPy rules indicate it can't convert losslessly into a 32-bit float, so a 64-bit float should be the result type. By examining the value of the constant, '3', we see that it fits in an 8-bit integer, which can be cast losslessly into the 32-bit float. Parameters ---------- arrays_and_dtypes : list of arrays and dtypes The operands of some operation whose result type is needed. Returns ------- out : dtype The result type. See also -------- dtype, promote_types, min_scalar_type, can_cast Notes ----- .. versionadded:: 1.6.0 The specific algorithm used is as follows. Categories are determined by first checking which of boolean, integer (int/uint), or floating point (float/complex) the maximum kind of all the arrays and the scalars are. If there are only scalars or the maximum category of the scalars is higher than the maximum category of the arrays, the data types are combined with :func:`promote_types` to produce the return value. Otherwise, `min_scalar_type` is called on each array, and the resulting data types are all combined with :func:`promote_types` to produce the return value. The set of int values is not a subset of the uint values for types with the same number of bits, something not reflected in :func:`min_scalar_type`, but handled as a special case in `result_type`. Examples -------- >>> np.result_type(3, np.arange(7, dtype='i1')) dtype('int8') >>> np.result_type('i4', 'c8') dtype('complex128') >>> np.result_type(3.0, -2) dtype('float64')
venv/lib/python3.7/site-packages/numpy/core/multiarray.py
result_type
180Studios/LoginApp
python
@array_function_from_c_func_and_dispatcher(_multiarray_umath.result_type) def result_type(*arrays_and_dtypes): "\n result_type(*arrays_and_dtypes)\n\n Returns the type that results from applying the NumPy\n type promotion rules to the arguments.\n\n Type promotion in NumPy works similarly to the rules in languages\n like C++, with some slight differences. When both scalars and\n arrays are used, the array's type takes precedence and the actual value\n of the scalar is taken into account.\n\n For example, calculating 3*a, where a is an array of 32-bit floats,\n intuitively should result in a 32-bit float output. If the 3 is a\n 32-bit integer, the NumPy rules indicate it can't convert losslessly\n into a 32-bit float, so a 64-bit float should be the result type.\n By examining the value of the constant, '3', we see that it fits in\n an 8-bit integer, which can be cast losslessly into the 32-bit float.\n\n Parameters\n ----------\n arrays_and_dtypes : list of arrays and dtypes\n The operands of some operation whose result type is needed.\n\n Returns\n -------\n out : dtype\n The result type.\n\n See also\n --------\n dtype, promote_types, min_scalar_type, can_cast\n\n Notes\n -----\n .. versionadded:: 1.6.0\n\n The specific algorithm used is as follows.\n\n Categories are determined by first checking which of boolean,\n integer (int/uint), or floating point (float/complex) the maximum\n kind of all the arrays and the scalars are.\n\n If there are only scalars or the maximum category of the scalars\n is higher than the maximum category of the arrays,\n the data types are combined with :func:`promote_types`\n to produce the return value.\n\n Otherwise, `min_scalar_type` is called on each array, and\n the resulting data types are all combined with :func:`promote_types`\n to produce the return value.\n\n The set of int values is not a subset of the uint values for types\n with the same number of bits, something not reflected in\n :func:`min_scalar_type`, but handled as a special case in `result_type`.\n\n Examples\n --------\n >>> np.result_type(3, np.arange(7, dtype='i1'))\n dtype('int8')\n\n >>> np.result_type('i4', 'c8')\n dtype('complex128')\n\n >>> np.result_type(3.0, -2)\n dtype('float64')\n\n " return arrays_and_dtypes
@array_function_from_c_func_and_dispatcher(_multiarray_umath.dot) def dot(a, b, out=None): "\n dot(a, b, out=None)\n\n Dot product of two arrays. Specifically,\n\n - If both `a` and `b` are 1-D arrays, it is inner product of vectors\n (without complex conjugation).\n\n - If both `a` and `b` are 2-D arrays, it is matrix multiplication,\n but using :func:`matmul` or ``a @ b`` is preferred.\n\n - If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply`\n and using ``numpy.multiply(a, b)`` or ``a * b`` is preferred.\n\n - If `a` is an N-D array and `b` is a 1-D array, it is a sum product over\n the last axis of `a` and `b`.\n\n - If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a\n sum product over the last axis of `a` and the second-to-last axis of `b`::\n\n dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])\n\n Parameters\n ----------\n a : array_like\n First argument.\n b : array_like\n Second argument.\n out : ndarray, optional\n Output argument. This must have the exact kind that would be returned\n if it was not used. In particular, it must have the right type, must be\n C-contiguous, and its dtype must be the dtype that would be returned\n for `dot(a,b)`. This is a performance feature. Therefore, if these\n conditions are not met, an exception is raised, instead of attempting\n to be flexible.\n\n Returns\n -------\n output : ndarray\n Returns the dot product of `a` and `b`. If `a` and `b` are both\n scalars or both 1-D arrays then a scalar is returned; otherwise\n an array is returned.\n If `out` is given, then it is returned.\n\n Raises\n ------\n ValueError\n If the last dimension of `a` is not the same size as\n the second-to-last dimension of `b`.\n\n See Also\n --------\n vdot : Complex-conjugating dot product.\n tensordot : Sum products over arbitrary axes.\n einsum : Einstein summation convention.\n matmul : '@' operator as method with out parameter.\n\n Examples\n --------\n >>> np.dot(3, 4)\n 12\n\n Neither argument is complex-conjugated:\n\n >>> np.dot([2j, 3j], [2j, 3j])\n (-13+0j)\n\n For 2-D arrays it is the matrix product:\n\n >>> a = [[1, 0], [0, 1]]\n >>> b = [[4, 1], [2, 2]]\n >>> np.dot(a, b)\n array([[4, 1],\n [2, 2]])\n\n >>> a = np.arange(3*4*5*6).reshape((3,4,5,6))\n >>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))\n >>> np.dot(a, b)[2,3,2,1,2,2]\n 499128\n >>> sum(a[2,3,2,:] * b[1,2,:,2])\n 499128\n\n " return (a, b, out)
-4,682,007,655,391,947,000
dot(a, b, out=None) Dot product of two arrays. Specifically, - If both `a` and `b` are 1-D arrays, it is inner product of vectors (without complex conjugation). - If both `a` and `b` are 2-D arrays, it is matrix multiplication, but using :func:`matmul` or ``a @ b`` is preferred. - If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply` and using ``numpy.multiply(a, b)`` or ``a * b`` is preferred. - If `a` is an N-D array and `b` is a 1-D array, it is a sum product over the last axis of `a` and `b`. - If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a sum product over the last axis of `a` and the second-to-last axis of `b`:: dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m]) Parameters ---------- a : array_like First argument. b : array_like Second argument. out : ndarray, optional Output argument. This must have the exact kind that would be returned if it was not used. In particular, it must have the right type, must be C-contiguous, and its dtype must be the dtype that would be returned for `dot(a,b)`. This is a performance feature. Therefore, if these conditions are not met, an exception is raised, instead of attempting to be flexible. Returns ------- output : ndarray Returns the dot product of `a` and `b`. If `a` and `b` are both scalars or both 1-D arrays then a scalar is returned; otherwise an array is returned. If `out` is given, then it is returned. Raises ------ ValueError If the last dimension of `a` is not the same size as the second-to-last dimension of `b`. See Also -------- vdot : Complex-conjugating dot product. tensordot : Sum products over arbitrary axes. einsum : Einstein summation convention. matmul : '@' operator as method with out parameter. Examples -------- >>> np.dot(3, 4) 12 Neither argument is complex-conjugated: >>> np.dot([2j, 3j], [2j, 3j]) (-13+0j) For 2-D arrays it is the matrix product: >>> a = [[1, 0], [0, 1]] >>> b = [[4, 1], [2, 2]] >>> np.dot(a, b) array([[4, 1], [2, 2]]) >>> a = np.arange(3*4*5*6).reshape((3,4,5,6)) >>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3)) >>> np.dot(a, b)[2,3,2,1,2,2] 499128 >>> sum(a[2,3,2,:] * b[1,2,:,2]) 499128
venv/lib/python3.7/site-packages/numpy/core/multiarray.py
dot
180Studios/LoginApp
python
@array_function_from_c_func_and_dispatcher(_multiarray_umath.dot) def dot(a, b, out=None): "\n dot(a, b, out=None)\n\n Dot product of two arrays. Specifically,\n\n - If both `a` and `b` are 1-D arrays, it is inner product of vectors\n (without complex conjugation).\n\n - If both `a` and `b` are 2-D arrays, it is matrix multiplication,\n but using :func:`matmul` or ``a @ b`` is preferred.\n\n - If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply`\n and using ``numpy.multiply(a, b)`` or ``a * b`` is preferred.\n\n - If `a` is an N-D array and `b` is a 1-D array, it is a sum product over\n the last axis of `a` and `b`.\n\n - If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a\n sum product over the last axis of `a` and the second-to-last axis of `b`::\n\n dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])\n\n Parameters\n ----------\n a : array_like\n First argument.\n b : array_like\n Second argument.\n out : ndarray, optional\n Output argument. This must have the exact kind that would be returned\n if it was not used. In particular, it must have the right type, must be\n C-contiguous, and its dtype must be the dtype that would be returned\n for `dot(a,b)`. This is a performance feature. Therefore, if these\n conditions are not met, an exception is raised, instead of attempting\n to be flexible.\n\n Returns\n -------\n output : ndarray\n Returns the dot product of `a` and `b`. If `a` and `b` are both\n scalars or both 1-D arrays then a scalar is returned; otherwise\n an array is returned.\n If `out` is given, then it is returned.\n\n Raises\n ------\n ValueError\n If the last dimension of `a` is not the same size as\n the second-to-last dimension of `b`.\n\n See Also\n --------\n vdot : Complex-conjugating dot product.\n tensordot : Sum products over arbitrary axes.\n einsum : Einstein summation convention.\n matmul : '@' operator as method with out parameter.\n\n Examples\n --------\n >>> np.dot(3, 4)\n 12\n\n Neither argument is complex-conjugated:\n\n >>> np.dot([2j, 3j], [2j, 3j])\n (-13+0j)\n\n For 2-D arrays it is the matrix product:\n\n >>> a = [[1, 0], [0, 1]]\n >>> b = [[4, 1], [2, 2]]\n >>> np.dot(a, b)\n array([[4, 1],\n [2, 2]])\n\n >>> a = np.arange(3*4*5*6).reshape((3,4,5,6))\n >>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))\n >>> np.dot(a, b)[2,3,2,1,2,2]\n 499128\n >>> sum(a[2,3,2,:] * b[1,2,:,2])\n 499128\n\n " return (a, b, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot) def vdot(a, b): '\n vdot(a, b)\n\n Return the dot product of two vectors.\n\n The vdot(`a`, `b`) function handles complex numbers differently than\n dot(`a`, `b`). If the first argument is complex the complex conjugate\n of the first argument is used for the calculation of the dot product.\n\n Note that `vdot` handles multidimensional arrays differently than `dot`:\n it does *not* perform a matrix product, but flattens input arguments\n to 1-D vectors first. Consequently, it should only be used for vectors.\n\n Parameters\n ----------\n a : array_like\n If `a` is complex the complex conjugate is taken before calculation\n of the dot product.\n b : array_like\n Second argument to the dot product.\n\n Returns\n -------\n output : ndarray\n Dot product of `a` and `b`. Can be an int, float, or\n complex depending on the types of `a` and `b`.\n\n See Also\n --------\n dot : Return the dot product without using the complex conjugate of the\n first argument.\n\n Examples\n --------\n >>> a = np.array([1+2j,3+4j])\n >>> b = np.array([5+6j,7+8j])\n >>> np.vdot(a, b)\n (70-8j)\n >>> np.vdot(b, a)\n (70+8j)\n\n Note that higher-dimensional arrays are flattened!\n\n >>> a = np.array([[1, 4], [5, 6]])\n >>> b = np.array([[4, 1], [2, 2]])\n >>> np.vdot(a, b)\n 30\n >>> np.vdot(b, a)\n 30\n >>> 1*4 + 4*1 + 5*2 + 6*2\n 30\n\n ' return (a, b)
-7,113,312,379,025,483,000
vdot(a, b) Return the dot product of two vectors. The vdot(`a`, `b`) function handles complex numbers differently than dot(`a`, `b`). If the first argument is complex the complex conjugate of the first argument is used for the calculation of the dot product. Note that `vdot` handles multidimensional arrays differently than `dot`: it does *not* perform a matrix product, but flattens input arguments to 1-D vectors first. Consequently, it should only be used for vectors. Parameters ---------- a : array_like If `a` is complex the complex conjugate is taken before calculation of the dot product. b : array_like Second argument to the dot product. Returns ------- output : ndarray Dot product of `a` and `b`. Can be an int, float, or complex depending on the types of `a` and `b`. See Also -------- dot : Return the dot product without using the complex conjugate of the first argument. Examples -------- >>> a = np.array([1+2j,3+4j]) >>> b = np.array([5+6j,7+8j]) >>> np.vdot(a, b) (70-8j) >>> np.vdot(b, a) (70+8j) Note that higher-dimensional arrays are flattened! >>> a = np.array([[1, 4], [5, 6]]) >>> b = np.array([[4, 1], [2, 2]]) >>> np.vdot(a, b) 30 >>> np.vdot(b, a) 30 >>> 1*4 + 4*1 + 5*2 + 6*2 30
venv/lib/python3.7/site-packages/numpy/core/multiarray.py
vdot
180Studios/LoginApp
python
@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot) def vdot(a, b): '\n vdot(a, b)\n\n Return the dot product of two vectors.\n\n The vdot(`a`, `b`) function handles complex numbers differently than\n dot(`a`, `b`). If the first argument is complex the complex conjugate\n of the first argument is used for the calculation of the dot product.\n\n Note that `vdot` handles multidimensional arrays differently than `dot`:\n it does *not* perform a matrix product, but flattens input arguments\n to 1-D vectors first. Consequently, it should only be used for vectors.\n\n Parameters\n ----------\n a : array_like\n If `a` is complex the complex conjugate is taken before calculation\n of the dot product.\n b : array_like\n Second argument to the dot product.\n\n Returns\n -------\n output : ndarray\n Dot product of `a` and `b`. Can be an int, float, or\n complex depending on the types of `a` and `b`.\n\n See Also\n --------\n dot : Return the dot product without using the complex conjugate of the\n first argument.\n\n Examples\n --------\n >>> a = np.array([1+2j,3+4j])\n >>> b = np.array([5+6j,7+8j])\n >>> np.vdot(a, b)\n (70-8j)\n >>> np.vdot(b, a)\n (70+8j)\n\n Note that higher-dimensional arrays are flattened!\n\n >>> a = np.array([[1, 4], [5, 6]])\n >>> b = np.array([[4, 1], [2, 2]])\n >>> np.vdot(a, b)\n 30\n >>> np.vdot(b, a)\n 30\n >>> 1*4 + 4*1 + 5*2 + 6*2\n 30\n\n ' return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount) def bincount(x, weights=None, minlength=None): '\n bincount(x, weights=None, minlength=0)\n\n Count number of occurrences of each value in array of non-negative ints.\n\n The number of bins (of size 1) is one larger than the largest value in\n `x`. If `minlength` is specified, there will be at least this number\n of bins in the output array (though it will be longer if necessary,\n depending on the contents of `x`).\n Each bin gives the number of occurrences of its index value in `x`.\n If `weights` is specified the input array is weighted by it, i.e. if a\n value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead\n of ``out[n] += 1``.\n\n Parameters\n ----------\n x : array_like, 1 dimension, nonnegative ints\n Input array.\n weights : array_like, optional\n Weights, array of the same shape as `x`.\n minlength : int, optional\n A minimum number of bins for the output array.\n\n .. versionadded:: 1.6.0\n\n Returns\n -------\n out : ndarray of ints\n The result of binning the input array.\n The length of `out` is equal to ``np.amax(x)+1``.\n\n Raises\n ------\n ValueError\n If the input is not 1-dimensional, or contains elements with negative\n values, or if `minlength` is negative.\n TypeError\n If the type of the input is float or complex.\n\n See Also\n --------\n histogram, digitize, unique\n\n Examples\n --------\n >>> np.bincount(np.arange(5))\n array([1, 1, 1, 1, 1])\n >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))\n array([1, 3, 1, 1, 0, 0, 0, 1])\n\n >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])\n >>> np.bincount(x).size == np.amax(x)+1\n True\n\n The input array needs to be of integer dtype, otherwise a\n TypeError is raised:\n\n >>> np.bincount(np.arange(5, dtype=float))\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: array cannot be safely cast to required type\n\n A possible use of ``bincount`` is to perform sums over\n variable-size chunks of an array, using the ``weights`` keyword.\n\n >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights\n >>> x = np.array([0, 1, 1, 2, 2, 2])\n >>> np.bincount(x, weights=w)\n array([ 0.3, 0.7, 1.1])\n\n ' return (x, weights)
-8,931,369,888,445,359,000
bincount(x, weights=None, minlength=0) Count number of occurrences of each value in array of non-negative ints. The number of bins (of size 1) is one larger than the largest value in `x`. If `minlength` is specified, there will be at least this number of bins in the output array (though it will be longer if necessary, depending on the contents of `x`). Each bin gives the number of occurrences of its index value in `x`. If `weights` is specified the input array is weighted by it, i.e. if a value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead of ``out[n] += 1``. Parameters ---------- x : array_like, 1 dimension, nonnegative ints Input array. weights : array_like, optional Weights, array of the same shape as `x`. minlength : int, optional A minimum number of bins for the output array. .. versionadded:: 1.6.0 Returns ------- out : ndarray of ints The result of binning the input array. The length of `out` is equal to ``np.amax(x)+1``. Raises ------ ValueError If the input is not 1-dimensional, or contains elements with negative values, or if `minlength` is negative. TypeError If the type of the input is float or complex. See Also -------- histogram, digitize, unique Examples -------- >>> np.bincount(np.arange(5)) array([1, 1, 1, 1, 1]) >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7])) array([1, 3, 1, 1, 0, 0, 0, 1]) >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23]) >>> np.bincount(x).size == np.amax(x)+1 True The input array needs to be of integer dtype, otherwise a TypeError is raised: >>> np.bincount(np.arange(5, dtype=float)) Traceback (most recent call last): File "<stdin>", line 1, in <module> TypeError: array cannot be safely cast to required type A possible use of ``bincount`` is to perform sums over variable-size chunks of an array, using the ``weights`` keyword. >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights >>> x = np.array([0, 1, 1, 2, 2, 2]) >>> np.bincount(x, weights=w) array([ 0.3, 0.7, 1.1])
venv/lib/python3.7/site-packages/numpy/core/multiarray.py
bincount
180Studios/LoginApp
python
@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount) def bincount(x, weights=None, minlength=None): '\n bincount(x, weights=None, minlength=0)\n\n Count number of occurrences of each value in array of non-negative ints.\n\n The number of bins (of size 1) is one larger than the largest value in\n `x`. If `minlength` is specified, there will be at least this number\n of bins in the output array (though it will be longer if necessary,\n depending on the contents of `x`).\n Each bin gives the number of occurrences of its index value in `x`.\n If `weights` is specified the input array is weighted by it, i.e. if a\n value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead\n of ``out[n] += 1``.\n\n Parameters\n ----------\n x : array_like, 1 dimension, nonnegative ints\n Input array.\n weights : array_like, optional\n Weights, array of the same shape as `x`.\n minlength : int, optional\n A minimum number of bins for the output array.\n\n .. versionadded:: 1.6.0\n\n Returns\n -------\n out : ndarray of ints\n The result of binning the input array.\n The length of `out` is equal to ``np.amax(x)+1``.\n\n Raises\n ------\n ValueError\n If the input is not 1-dimensional, or contains elements with negative\n values, or if `minlength` is negative.\n TypeError\n If the type of the input is float or complex.\n\n See Also\n --------\n histogram, digitize, unique\n\n Examples\n --------\n >>> np.bincount(np.arange(5))\n array([1, 1, 1, 1, 1])\n >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))\n array([1, 3, 1, 1, 0, 0, 0, 1])\n\n >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])\n >>> np.bincount(x).size == np.amax(x)+1\n True\n\n The input array needs to be of integer dtype, otherwise a\n TypeError is raised:\n\n >>> np.bincount(np.arange(5, dtype=float))\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: array cannot be safely cast to required type\n\n A possible use of ``bincount`` is to perform sums over\n variable-size chunks of an array, using the ``weights`` keyword.\n\n >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights\n >>> x = np.array([0, 1, 1, 2, 2, 2])\n >>> np.bincount(x, weights=w)\n array([ 0.3, 0.7, 1.1])\n\n ' return (x, weights)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index) def ravel_multi_index(multi_index, dims, mode=None, order=None): "\n ravel_multi_index(multi_index, dims, mode='raise', order='C')\n\n Converts a tuple of index arrays into an array of flat\n indices, applying boundary modes to the multi-index.\n\n Parameters\n ----------\n multi_index : tuple of array_like\n A tuple of integer arrays, one array for each dimension.\n dims : tuple of ints\n The shape of array into which the indices from ``multi_index`` apply.\n mode : {'raise', 'wrap', 'clip'}, optional\n Specifies how out-of-bounds indices are handled. Can specify\n either one mode or a tuple of modes, one mode per index.\n\n * 'raise' -- raise an error (default)\n * 'wrap' -- wrap around\n * 'clip' -- clip to the range\n\n In 'clip' mode, a negative index which would normally\n wrap will clip to 0 instead.\n order : {'C', 'F'}, optional\n Determines whether the multi-index should be viewed as\n indexing in row-major (C-style) or column-major\n (Fortran-style) order.\n\n Returns\n -------\n raveled_indices : ndarray\n An array of indices into the flattened version of an array\n of dimensions ``dims``.\n\n See Also\n --------\n unravel_index\n\n Notes\n -----\n .. versionadded:: 1.6.0\n\n Examples\n --------\n >>> arr = np.array([[3,6,6],[4,5,1]])\n >>> np.ravel_multi_index(arr, (7,6))\n array([22, 41, 37])\n >>> np.ravel_multi_index(arr, (7,6), order='F')\n array([31, 41, 13])\n >>> np.ravel_multi_index(arr, (4,6), mode='clip')\n array([22, 23, 19])\n >>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))\n array([12, 13, 13])\n\n >>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))\n 1621\n " return multi_index
6,791,245,878,041,759,000
ravel_multi_index(multi_index, dims, mode='raise', order='C') Converts a tuple of index arrays into an array of flat indices, applying boundary modes to the multi-index. Parameters ---------- multi_index : tuple of array_like A tuple of integer arrays, one array for each dimension. dims : tuple of ints The shape of array into which the indices from ``multi_index`` apply. mode : {'raise', 'wrap', 'clip'}, optional Specifies how out-of-bounds indices are handled. Can specify either one mode or a tuple of modes, one mode per index. * 'raise' -- raise an error (default) * 'wrap' -- wrap around * 'clip' -- clip to the range In 'clip' mode, a negative index which would normally wrap will clip to 0 instead. order : {'C', 'F'}, optional Determines whether the multi-index should be viewed as indexing in row-major (C-style) or column-major (Fortran-style) order. Returns ------- raveled_indices : ndarray An array of indices into the flattened version of an array of dimensions ``dims``. See Also -------- unravel_index Notes ----- .. versionadded:: 1.6.0 Examples -------- >>> arr = np.array([[3,6,6],[4,5,1]]) >>> np.ravel_multi_index(arr, (7,6)) array([22, 41, 37]) >>> np.ravel_multi_index(arr, (7,6), order='F') array([31, 41, 13]) >>> np.ravel_multi_index(arr, (4,6), mode='clip') array([22, 23, 19]) >>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap')) array([12, 13, 13]) >>> np.ravel_multi_index((3,1,4,1), (6,7,8,9)) 1621
venv/lib/python3.7/site-packages/numpy/core/multiarray.py
ravel_multi_index
180Studios/LoginApp
python
@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index) def ravel_multi_index(multi_index, dims, mode=None, order=None): "\n ravel_multi_index(multi_index, dims, mode='raise', order='C')\n\n Converts a tuple of index arrays into an array of flat\n indices, applying boundary modes to the multi-index.\n\n Parameters\n ----------\n multi_index : tuple of array_like\n A tuple of integer arrays, one array for each dimension.\n dims : tuple of ints\n The shape of array into which the indices from ``multi_index`` apply.\n mode : {'raise', 'wrap', 'clip'}, optional\n Specifies how out-of-bounds indices are handled. Can specify\n either one mode or a tuple of modes, one mode per index.\n\n * 'raise' -- raise an error (default)\n * 'wrap' -- wrap around\n * 'clip' -- clip to the range\n\n In 'clip' mode, a negative index which would normally\n wrap will clip to 0 instead.\n order : {'C', 'F'}, optional\n Determines whether the multi-index should be viewed as\n indexing in row-major (C-style) or column-major\n (Fortran-style) order.\n\n Returns\n -------\n raveled_indices : ndarray\n An array of indices into the flattened version of an array\n of dimensions ``dims``.\n\n See Also\n --------\n unravel_index\n\n Notes\n -----\n .. versionadded:: 1.6.0\n\n Examples\n --------\n >>> arr = np.array([[3,6,6],[4,5,1]])\n >>> np.ravel_multi_index(arr, (7,6))\n array([22, 41, 37])\n >>> np.ravel_multi_index(arr, (7,6), order='F')\n array([31, 41, 13])\n >>> np.ravel_multi_index(arr, (4,6), mode='clip')\n array([22, 23, 19])\n >>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))\n array([12, 13, 13])\n\n >>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))\n 1621\n " return multi_index
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index) def unravel_index(indices, shape=None, order=None, dims=None): "\n unravel_index(indices, shape, order='C')\n\n Converts a flat index or array of flat indices into a tuple\n of coordinate arrays.\n\n Parameters\n ----------\n indices : array_like\n An integer array whose elements are indices into the flattened\n version of an array of dimensions ``shape``. Before version 1.6.0,\n this function accepted just one index value.\n shape : tuple of ints\n The shape of the array to use for unraveling ``indices``.\n\n .. versionchanged:: 1.16.0\n Renamed from ``dims`` to ``shape``.\n\n order : {'C', 'F'}, optional\n Determines whether the indices should be viewed as indexing in\n row-major (C-style) or column-major (Fortran-style) order.\n\n .. versionadded:: 1.6.0\n\n Returns\n -------\n unraveled_coords : tuple of ndarray\n Each array in the tuple has the same shape as the ``indices``\n array.\n\n See Also\n --------\n ravel_multi_index\n\n Examples\n --------\n >>> np.unravel_index([22, 41, 37], (7,6))\n (array([3, 6, 6]), array([4, 5, 1]))\n >>> np.unravel_index([31, 41, 13], (7,6), order='F')\n (array([3, 6, 6]), array([4, 5, 1]))\n\n >>> np.unravel_index(1621, (6,7,8,9))\n (3, 1, 4, 1)\n\n " if (dims is not None): warnings.warn("'shape' argument should be used instead of 'dims'", DeprecationWarning, stacklevel=3) return (indices,)
-5,508,050,244,993,584,000
unravel_index(indices, shape, order='C') Converts a flat index or array of flat indices into a tuple of coordinate arrays. Parameters ---------- indices : array_like An integer array whose elements are indices into the flattened version of an array of dimensions ``shape``. Before version 1.6.0, this function accepted just one index value. shape : tuple of ints The shape of the array to use for unraveling ``indices``. .. versionchanged:: 1.16.0 Renamed from ``dims`` to ``shape``. order : {'C', 'F'}, optional Determines whether the indices should be viewed as indexing in row-major (C-style) or column-major (Fortran-style) order. .. versionadded:: 1.6.0 Returns ------- unraveled_coords : tuple of ndarray Each array in the tuple has the same shape as the ``indices`` array. See Also -------- ravel_multi_index Examples -------- >>> np.unravel_index([22, 41, 37], (7,6)) (array([3, 6, 6]), array([4, 5, 1])) >>> np.unravel_index([31, 41, 13], (7,6), order='F') (array([3, 6, 6]), array([4, 5, 1])) >>> np.unravel_index(1621, (6,7,8,9)) (3, 1, 4, 1)
venv/lib/python3.7/site-packages/numpy/core/multiarray.py
unravel_index
180Studios/LoginApp
python
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index) def unravel_index(indices, shape=None, order=None, dims=None): "\n unravel_index(indices, shape, order='C')\n\n Converts a flat index or array of flat indices into a tuple\n of coordinate arrays.\n\n Parameters\n ----------\n indices : array_like\n An integer array whose elements are indices into the flattened\n version of an array of dimensions ``shape``. Before version 1.6.0,\n this function accepted just one index value.\n shape : tuple of ints\n The shape of the array to use for unraveling ``indices``.\n\n .. versionchanged:: 1.16.0\n Renamed from ``dims`` to ``shape``.\n\n order : {'C', 'F'}, optional\n Determines whether the indices should be viewed as indexing in\n row-major (C-style) or column-major (Fortran-style) order.\n\n .. versionadded:: 1.6.0\n\n Returns\n -------\n unraveled_coords : tuple of ndarray\n Each array in the tuple has the same shape as the ``indices``\n array.\n\n See Also\n --------\n ravel_multi_index\n\n Examples\n --------\n >>> np.unravel_index([22, 41, 37], (7,6))\n (array([3, 6, 6]), array([4, 5, 1]))\n >>> np.unravel_index([31, 41, 13], (7,6), order='F')\n (array([3, 6, 6]), array([4, 5, 1]))\n\n >>> np.unravel_index(1621, (6,7,8,9))\n (3, 1, 4, 1)\n\n " if (dims is not None): warnings.warn("'shape' argument should be used instead of 'dims'", DeprecationWarning, stacklevel=3) return (indices,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto) def copyto(dst, src, casting=None, where=None): "\n copyto(dst, src, casting='same_kind', where=True)\n\n Copies values from one array to another, broadcasting as necessary.\n\n Raises a TypeError if the `casting` rule is violated, and if\n `where` is provided, it selects which elements to copy.\n\n .. versionadded:: 1.7.0\n\n Parameters\n ----------\n dst : ndarray\n The array into which values are copied.\n src : array_like\n The array from which values are copied.\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional\n Controls what kind of data casting may occur when copying.\n\n * 'no' means the data types should not be cast at all.\n * 'equiv' means only byte-order changes are allowed.\n * 'safe' means only casts which can preserve values are allowed.\n * 'same_kind' means only safe casts or casts within a kind,\n like float64 to float32, are allowed.\n * 'unsafe' means any data conversions may be done.\n where : array_like of bool, optional\n A boolean array which is broadcasted to match the dimensions\n of `dst`, and selects elements to copy from `src` to `dst`\n wherever it contains the value True.\n " return (dst, src, where)
3,615,085,328,127,619,000
copyto(dst, src, casting='same_kind', where=True) Copies values from one array to another, broadcasting as necessary. Raises a TypeError if the `casting` rule is violated, and if `where` is provided, it selects which elements to copy. .. versionadded:: 1.7.0 Parameters ---------- dst : ndarray The array into which values are copied. src : array_like The array from which values are copied. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur when copying. * 'no' means the data types should not be cast at all. * 'equiv' means only byte-order changes are allowed. * 'safe' means only casts which can preserve values are allowed. * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. where : array_like of bool, optional A boolean array which is broadcasted to match the dimensions of `dst`, and selects elements to copy from `src` to `dst` wherever it contains the value True.
venv/lib/python3.7/site-packages/numpy/core/multiarray.py
copyto
180Studios/LoginApp
python
@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto) def copyto(dst, src, casting=None, where=None): "\n copyto(dst, src, casting='same_kind', where=True)\n\n Copies values from one array to another, broadcasting as necessary.\n\n Raises a TypeError if the `casting` rule is violated, and if\n `where` is provided, it selects which elements to copy.\n\n .. versionadded:: 1.7.0\n\n Parameters\n ----------\n dst : ndarray\n The array into which values are copied.\n src : array_like\n The array from which values are copied.\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional\n Controls what kind of data casting may occur when copying.\n\n * 'no' means the data types should not be cast at all.\n * 'equiv' means only byte-order changes are allowed.\n * 'safe' means only casts which can preserve values are allowed.\n * 'same_kind' means only safe casts or casts within a kind,\n like float64 to float32, are allowed.\n * 'unsafe' means any data conversions may be done.\n where : array_like of bool, optional\n A boolean array which is broadcasted to match the dimensions\n of `dst`, and selects elements to copy from `src` to `dst`\n wherever it contains the value True.\n " return (dst, src, where)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask) def putmask(a, mask, values): '\n putmask(a, mask, values)\n\n Changes elements of an array based on conditional and input values.\n\n Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.\n\n If `values` is not the same size as `a` and `mask` then it will repeat.\n This gives behavior different from ``a[mask] = values``.\n\n Parameters\n ----------\n a : array_like\n Target array.\n mask : array_like\n Boolean mask array. It has to be the same shape as `a`.\n values : array_like\n Values to put into `a` where `mask` is True. If `values` is smaller\n than `a` it will be repeated.\n\n See Also\n --------\n place, put, take, copyto\n\n Examples\n --------\n >>> x = np.arange(6).reshape(2, 3)\n >>> np.putmask(x, x>2, x**2)\n >>> x\n array([[ 0, 1, 2],\n [ 9, 16, 25]])\n\n If `values` is smaller than `a` it is repeated:\n\n >>> x = np.arange(5)\n >>> np.putmask(x, x>1, [-33, -44])\n >>> x\n array([ 0, 1, -33, -44, -33])\n\n ' return (a, mask, values)
-2,530,559,739,271,771,600
putmask(a, mask, values) Changes elements of an array based on conditional and input values. Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``. If `values` is not the same size as `a` and `mask` then it will repeat. This gives behavior different from ``a[mask] = values``. Parameters ---------- a : array_like Target array. mask : array_like Boolean mask array. It has to be the same shape as `a`. values : array_like Values to put into `a` where `mask` is True. If `values` is smaller than `a` it will be repeated. See Also -------- place, put, take, copyto Examples -------- >>> x = np.arange(6).reshape(2, 3) >>> np.putmask(x, x>2, x**2) >>> x array([[ 0, 1, 2], [ 9, 16, 25]]) If `values` is smaller than `a` it is repeated: >>> x = np.arange(5) >>> np.putmask(x, x>1, [-33, -44]) >>> x array([ 0, 1, -33, -44, -33])
venv/lib/python3.7/site-packages/numpy/core/multiarray.py
putmask
180Studios/LoginApp
python
@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask) def putmask(a, mask, values): '\n putmask(a, mask, values)\n\n Changes elements of an array based on conditional and input values.\n\n Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.\n\n If `values` is not the same size as `a` and `mask` then it will repeat.\n This gives behavior different from ``a[mask] = values``.\n\n Parameters\n ----------\n a : array_like\n Target array.\n mask : array_like\n Boolean mask array. It has to be the same shape as `a`.\n values : array_like\n Values to put into `a` where `mask` is True. If `values` is smaller\n than `a` it will be repeated.\n\n See Also\n --------\n place, put, take, copyto\n\n Examples\n --------\n >>> x = np.arange(6).reshape(2, 3)\n >>> np.putmask(x, x>2, x**2)\n >>> x\n array([[ 0, 1, 2],\n [ 9, 16, 25]])\n\n If `values` is smaller than `a` it is repeated:\n\n >>> x = np.arange(5)\n >>> np.putmask(x, x>1, [-33, -44])\n >>> x\n array([ 0, 1, -33, -44, -33])\n\n ' return (a, mask, values)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits) def packbits(myarray, axis=None): '\n packbits(myarray, axis=None)\n\n Packs the elements of a binary-valued array into bits in a uint8 array.\n\n The result is padded to full bytes by inserting zero bits at the end.\n\n Parameters\n ----------\n myarray : array_like\n An array of integers or booleans whose elements should be packed to\n bits.\n axis : int, optional\n The dimension over which bit-packing is done.\n ``None`` implies packing the flattened array.\n\n Returns\n -------\n packed : ndarray\n Array of type uint8 whose elements represent bits corresponding to the\n logical (0 or nonzero) value of the input elements. The shape of\n `packed` has the same number of dimensions as the input (unless `axis`\n is None, in which case the output is 1-D).\n\n See Also\n --------\n unpackbits: Unpacks elements of a uint8 array into a binary-valued output\n array.\n\n Examples\n --------\n >>> a = np.array([[[1,0,1],\n ... [0,1,0]],\n ... [[1,1,0],\n ... [0,0,1]]])\n >>> b = np.packbits(a, axis=-1)\n >>> b\n array([[[160],[64]],[[192],[32]]], dtype=uint8)\n\n Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000,\n and 32 = 0010 0000.\n\n ' return (myarray,)
-5,699,911,325,572,923,000
packbits(myarray, axis=None) Packs the elements of a binary-valued array into bits in a uint8 array. The result is padded to full bytes by inserting zero bits at the end. Parameters ---------- myarray : array_like An array of integers or booleans whose elements should be packed to bits. axis : int, optional The dimension over which bit-packing is done. ``None`` implies packing the flattened array. Returns ------- packed : ndarray Array of type uint8 whose elements represent bits corresponding to the logical (0 or nonzero) value of the input elements. The shape of `packed` has the same number of dimensions as the input (unless `axis` is None, in which case the output is 1-D). See Also -------- unpackbits: Unpacks elements of a uint8 array into a binary-valued output array. Examples -------- >>> a = np.array([[[1,0,1], ... [0,1,0]], ... [[1,1,0], ... [0,0,1]]]) >>> b = np.packbits(a, axis=-1) >>> b array([[[160],[64]],[[192],[32]]], dtype=uint8) Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000, and 32 = 0010 0000.
venv/lib/python3.7/site-packages/numpy/core/multiarray.py
packbits
180Studios/LoginApp
python
@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits) def packbits(myarray, axis=None): '\n packbits(myarray, axis=None)\n\n Packs the elements of a binary-valued array into bits in a uint8 array.\n\n The result is padded to full bytes by inserting zero bits at the end.\n\n Parameters\n ----------\n myarray : array_like\n An array of integers or booleans whose elements should be packed to\n bits.\n axis : int, optional\n The dimension over which bit-packing is done.\n ``None`` implies packing the flattened array.\n\n Returns\n -------\n packed : ndarray\n Array of type uint8 whose elements represent bits corresponding to the\n logical (0 or nonzero) value of the input elements. The shape of\n `packed` has the same number of dimensions as the input (unless `axis`\n is None, in which case the output is 1-D).\n\n See Also\n --------\n unpackbits: Unpacks elements of a uint8 array into a binary-valued output\n array.\n\n Examples\n --------\n >>> a = np.array([[[1,0,1],\n ... [0,1,0]],\n ... [[1,1,0],\n ... [0,0,1]]])\n >>> b = np.packbits(a, axis=-1)\n >>> b\n array([[[160],[64]],[[192],[32]]], dtype=uint8)\n\n Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000,\n and 32 = 0010 0000.\n\n ' return (myarray,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits) def unpackbits(myarray, axis=None): '\n unpackbits(myarray, axis=None)\n\n Unpacks elements of a uint8 array into a binary-valued output array.\n\n Each element of `myarray` represents a bit-field that should be unpacked\n into a binary-valued output array. The shape of the output array is either\n 1-D (if `axis` is None) or the same shape as the input array with unpacking\n done along the axis specified.\n\n Parameters\n ----------\n myarray : ndarray, uint8 type\n Input array.\n axis : int, optional\n The dimension over which bit-unpacking is done.\n ``None`` implies unpacking the flattened array.\n\n Returns\n -------\n unpacked : ndarray, uint8 type\n The elements are binary-valued (0 or 1).\n\n See Also\n --------\n packbits : Packs the elements of a binary-valued array into bits in a uint8\n array.\n\n Examples\n --------\n >>> a = np.array([[2], [7], [23]], dtype=np.uint8)\n >>> a\n array([[ 2],\n [ 7],\n [23]], dtype=uint8)\n >>> b = np.unpackbits(a, axis=1)\n >>> b\n array([[0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)\n\n ' return (myarray,)
4,681,147,811,743,044,000
unpackbits(myarray, axis=None) Unpacks elements of a uint8 array into a binary-valued output array. Each element of `myarray` represents a bit-field that should be unpacked into a binary-valued output array. The shape of the output array is either 1-D (if `axis` is None) or the same shape as the input array with unpacking done along the axis specified. Parameters ---------- myarray : ndarray, uint8 type Input array. axis : int, optional The dimension over which bit-unpacking is done. ``None`` implies unpacking the flattened array. Returns ------- unpacked : ndarray, uint8 type The elements are binary-valued (0 or 1). See Also -------- packbits : Packs the elements of a binary-valued array into bits in a uint8 array. Examples -------- >>> a = np.array([[2], [7], [23]], dtype=np.uint8) >>> a array([[ 2], [ 7], [23]], dtype=uint8) >>> b = np.unpackbits(a, axis=1) >>> b array([[0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)
venv/lib/python3.7/site-packages/numpy/core/multiarray.py
unpackbits
180Studios/LoginApp
python
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits) def unpackbits(myarray, axis=None): '\n unpackbits(myarray, axis=None)\n\n Unpacks elements of a uint8 array into a binary-valued output array.\n\n Each element of `myarray` represents a bit-field that should be unpacked\n into a binary-valued output array. The shape of the output array is either\n 1-D (if `axis` is None) or the same shape as the input array with unpacking\n done along the axis specified.\n\n Parameters\n ----------\n myarray : ndarray, uint8 type\n Input array.\n axis : int, optional\n The dimension over which bit-unpacking is done.\n ``None`` implies unpacking the flattened array.\n\n Returns\n -------\n unpacked : ndarray, uint8 type\n The elements are binary-valued (0 or 1).\n\n See Also\n --------\n packbits : Packs the elements of a binary-valued array into bits in a uint8\n array.\n\n Examples\n --------\n >>> a = np.array([[2], [7], [23]], dtype=np.uint8)\n >>> a\n array([[ 2],\n [ 7],\n [23]], dtype=uint8)\n >>> b = np.unpackbits(a, axis=1)\n >>> b\n array([[0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)\n\n ' return (myarray,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory) def shares_memory(a, b, max_work=None): '\n shares_memory(a, b, max_work=None)\n\n Determine if two arrays share memory\n\n Parameters\n ----------\n a, b : ndarray\n Input arrays\n max_work : int, optional\n Effort to spend on solving the overlap problem (maximum number\n of candidate solutions to consider). The following special\n values are recognized:\n\n max_work=MAY_SHARE_EXACT (default)\n The problem is solved exactly. In this case, the function returns\n True only if there is an element shared between the arrays.\n max_work=MAY_SHARE_BOUNDS\n Only the memory bounds of a and b are checked.\n\n Raises\n ------\n numpy.TooHardError\n Exceeded max_work.\n\n Returns\n -------\n out : bool\n\n See Also\n --------\n may_share_memory\n\n Examples\n --------\n >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))\n False\n\n ' return (a, b)
-2,958,432,600,631,115,000
shares_memory(a, b, max_work=None) Determine if two arrays share memory Parameters ---------- a, b : ndarray Input arrays max_work : int, optional Effort to spend on solving the overlap problem (maximum number of candidate solutions to consider). The following special values are recognized: max_work=MAY_SHARE_EXACT (default) The problem is solved exactly. In this case, the function returns True only if there is an element shared between the arrays. max_work=MAY_SHARE_BOUNDS Only the memory bounds of a and b are checked. Raises ------ numpy.TooHardError Exceeded max_work. Returns ------- out : bool See Also -------- may_share_memory Examples -------- >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) False
venv/lib/python3.7/site-packages/numpy/core/multiarray.py
shares_memory
180Studios/LoginApp
python
@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory) def shares_memory(a, b, max_work=None): '\n shares_memory(a, b, max_work=None)\n\n Determine if two arrays share memory\n\n Parameters\n ----------\n a, b : ndarray\n Input arrays\n max_work : int, optional\n Effort to spend on solving the overlap problem (maximum number\n of candidate solutions to consider). The following special\n values are recognized:\n\n max_work=MAY_SHARE_EXACT (default)\n The problem is solved exactly. In this case, the function returns\n True only if there is an element shared between the arrays.\n max_work=MAY_SHARE_BOUNDS\n Only the memory bounds of a and b are checked.\n\n Raises\n ------\n numpy.TooHardError\n Exceeded max_work.\n\n Returns\n -------\n out : bool\n\n See Also\n --------\n may_share_memory\n\n Examples\n --------\n >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))\n False\n\n ' return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory) def may_share_memory(a, b, max_work=None): '\n may_share_memory(a, b, max_work=None)\n\n Determine if two arrays might share memory\n\n A return of True does not necessarily mean that the two arrays\n share any element. It just means that they *might*.\n\n Only the memory bounds of a and b are checked by default.\n\n Parameters\n ----------\n a, b : ndarray\n Input arrays\n max_work : int, optional\n Effort to spend on solving the overlap problem. See\n `shares_memory` for details. Default for ``may_share_memory``\n is to do a bounds check.\n\n Returns\n -------\n out : bool\n\n See Also\n --------\n shares_memory\n\n Examples\n --------\n >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))\n False\n >>> x = np.zeros([3, 4])\n >>> np.may_share_memory(x[:,0], x[:,1])\n True\n\n ' return (a, b)
379,643,540,804,239,400
may_share_memory(a, b, max_work=None) Determine if two arrays might share memory A return of True does not necessarily mean that the two arrays share any element. It just means that they *might*. Only the memory bounds of a and b are checked by default. Parameters ---------- a, b : ndarray Input arrays max_work : int, optional Effort to spend on solving the overlap problem. See `shares_memory` for details. Default for ``may_share_memory`` is to do a bounds check. Returns ------- out : bool See Also -------- shares_memory Examples -------- >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) False >>> x = np.zeros([3, 4]) >>> np.may_share_memory(x[:,0], x[:,1]) True
venv/lib/python3.7/site-packages/numpy/core/multiarray.py
may_share_memory
180Studios/LoginApp
python
@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory) def may_share_memory(a, b, max_work=None): '\n may_share_memory(a, b, max_work=None)\n\n Determine if two arrays might share memory\n\n A return of True does not necessarily mean that the two arrays\n share any element. It just means that they *might*.\n\n Only the memory bounds of a and b are checked by default.\n\n Parameters\n ----------\n a, b : ndarray\n Input arrays\n max_work : int, optional\n Effort to spend on solving the overlap problem. See\n `shares_memory` for details. Default for ``may_share_memory``\n is to do a bounds check.\n\n Returns\n -------\n out : bool\n\n See Also\n --------\n shares_memory\n\n Examples\n --------\n >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))\n False\n >>> x = np.zeros([3, 4])\n >>> np.may_share_memory(x[:,0], x[:,1])\n True\n\n ' return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday) def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None): '\n is_busday(dates, weekmask=\'1111100\', holidays=None, busdaycal=None, out=None)\n\n Calculates which of the given dates are valid days, and which are not.\n\n .. versionadded:: 1.7.0\n\n Parameters\n ----------\n dates : array_like of datetime64[D]\n The array of dates to process.\n weekmask : str or array_like of bool, optional\n A seven-element array indicating which of Monday through Sunday are\n valid days. May be specified as a length-seven list or array, like\n [1,1,1,1,1,0,0]; a length-seven string, like \'1111100\'; or a string\n like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for\n weekdays, optionally separated by white space. Valid abbreviations\n are: Mon Tue Wed Thu Fri Sat Sun\n holidays : array_like of datetime64[D], optional\n An array of dates to consider as invalid dates. They may be\n specified in any order, and NaT (not-a-time) dates are ignored.\n This list is saved in a normalized form that is suited for\n fast calculations of valid days.\n busdaycal : busdaycalendar, optional\n A `busdaycalendar` object which specifies the valid days. If this\n parameter is provided, neither weekmask nor holidays may be\n provided.\n out : array of bool, optional\n If provided, this array is filled with the result.\n\n Returns\n -------\n out : array of bool\n An array with the same shape as ``dates``, containing True for\n each valid day, and False for each invalid day.\n\n See Also\n --------\n busdaycalendar: An object that specifies a custom set of valid days.\n busday_offset : Applies an offset counted in valid days.\n busday_count : Counts how many valid days are in a half-open date range.\n\n Examples\n --------\n >>> # The weekdays are Friday, Saturday, and Monday\n ... np.is_busday([\'2011-07-01\', \'2011-07-02\', \'2011-07-18\'],\n ... holidays=[\'2011-07-01\', \'2011-07-04\', \'2011-07-17\'])\n array([False, False, True], dtype=\'bool\')\n ' return (dates, weekmask, holidays, out)
-3,946,965,257,007,669,000
is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None) Calculates which of the given dates are valid days, and which are not. .. versionadded:: 1.7.0 Parameters ---------- dates : array_like of datetime64[D] The array of dates to process. weekmask : str or array_like of bool, optional A seven-element array indicating which of Monday through Sunday are valid days. May be specified as a length-seven list or array, like [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for weekdays, optionally separated by white space. Valid abbreviations are: Mon Tue Wed Thu Fri Sat Sun holidays : array_like of datetime64[D], optional An array of dates to consider as invalid dates. They may be specified in any order, and NaT (not-a-time) dates are ignored. This list is saved in a normalized form that is suited for fast calculations of valid days. busdaycal : busdaycalendar, optional A `busdaycalendar` object which specifies the valid days. If this parameter is provided, neither weekmask nor holidays may be provided. out : array of bool, optional If provided, this array is filled with the result. Returns ------- out : array of bool An array with the same shape as ``dates``, containing True for each valid day, and False for each invalid day. See Also -------- busdaycalendar: An object that specifies a custom set of valid days. busday_offset : Applies an offset counted in valid days. busday_count : Counts how many valid days are in a half-open date range. Examples -------- >>> # The weekdays are Friday, Saturday, and Monday ... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'], ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) array([False, False, True], dtype='bool')
venv/lib/python3.7/site-packages/numpy/core/multiarray.py
is_busday
180Studios/LoginApp
python
@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday) def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None): '\n is_busday(dates, weekmask=\'1111100\', holidays=None, busdaycal=None, out=None)\n\n Calculates which of the given dates are valid days, and which are not.\n\n .. versionadded:: 1.7.0\n\n Parameters\n ----------\n dates : array_like of datetime64[D]\n The array of dates to process.\n weekmask : str or array_like of bool, optional\n A seven-element array indicating which of Monday through Sunday are\n valid days. May be specified as a length-seven list or array, like\n [1,1,1,1,1,0,0]; a length-seven string, like \'1111100\'; or a string\n like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for\n weekdays, optionally separated by white space. Valid abbreviations\n are: Mon Tue Wed Thu Fri Sat Sun\n holidays : array_like of datetime64[D], optional\n An array of dates to consider as invalid dates. They may be\n specified in any order, and NaT (not-a-time) dates are ignored.\n This list is saved in a normalized form that is suited for\n fast calculations of valid days.\n busdaycal : busdaycalendar, optional\n A `busdaycalendar` object which specifies the valid days. If this\n parameter is provided, neither weekmask nor holidays may be\n provided.\n out : array of bool, optional\n If provided, this array is filled with the result.\n\n Returns\n -------\n out : array of bool\n An array with the same shape as ``dates``, containing True for\n each valid day, and False for each invalid day.\n\n See Also\n --------\n busdaycalendar: An object that specifies a custom set of valid days.\n busday_offset : Applies an offset counted in valid days.\n busday_count : Counts how many valid days are in a half-open date range.\n\n Examples\n --------\n >>> # The weekdays are Friday, Saturday, and Monday\n ... np.is_busday([\'2011-07-01\', \'2011-07-02\', \'2011-07-18\'],\n ... holidays=[\'2011-07-01\', \'2011-07-04\', \'2011-07-17\'])\n array([False, False, True], dtype=\'bool\')\n ' return (dates, weekmask, holidays, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset) def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, busdaycal=None, out=None): '\n busday_offset(dates, offsets, roll=\'raise\', weekmask=\'1111100\', holidays=None, busdaycal=None, out=None)\n\n First adjusts the date to fall on a valid day according to\n the ``roll`` rule, then applies offsets to the given dates\n counted in valid days.\n\n .. versionadded:: 1.7.0\n\n Parameters\n ----------\n dates : array_like of datetime64[D]\n The array of dates to process.\n offsets : array_like of int\n The array of offsets, which is broadcast with ``dates``.\n roll : {\'raise\', \'nat\', \'forward\', \'following\', \'backward\', \'preceding\', \'modifiedfollowing\', \'modifiedpreceding\'}, optional\n How to treat dates that do not fall on a valid day. The default\n is \'raise\'.\n\n * \'raise\' means to raise an exception for an invalid day.\n * \'nat\' means to return a NaT (not-a-time) for an invalid day.\n * \'forward\' and \'following\' mean to take the first valid day\n later in time.\n * \'backward\' and \'preceding\' mean to take the first valid day\n earlier in time.\n * \'modifiedfollowing\' means to take the first valid day\n later in time unless it is across a Month boundary, in which\n case to take the first valid day earlier in time.\n * \'modifiedpreceding\' means to take the first valid day\n earlier in time unless it is across a Month boundary, in which\n case to take the first valid day later in time.\n weekmask : str or array_like of bool, optional\n A seven-element array indicating which of Monday through Sunday are\n valid days. May be specified as a length-seven list or array, like\n [1,1,1,1,1,0,0]; a length-seven string, like \'1111100\'; or a string\n like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for\n weekdays, optionally separated by white space. Valid abbreviations\n are: Mon Tue Wed Thu Fri Sat Sun\n holidays : array_like of datetime64[D], optional\n An array of dates to consider as invalid dates. They may be\n specified in any order, and NaT (not-a-time) dates are ignored.\n This list is saved in a normalized form that is suited for\n fast calculations of valid days.\n busdaycal : busdaycalendar, optional\n A `busdaycalendar` object which specifies the valid days. If this\n parameter is provided, neither weekmask nor holidays may be\n provided.\n out : array of datetime64[D], optional\n If provided, this array is filled with the result.\n\n Returns\n -------\n out : array of datetime64[D]\n An array with a shape from broadcasting ``dates`` and ``offsets``\n together, containing the dates with offsets applied.\n\n See Also\n --------\n busdaycalendar: An object that specifies a custom set of valid days.\n is_busday : Returns a boolean array indicating valid days.\n busday_count : Counts how many valid days are in a half-open date range.\n\n Examples\n --------\n >>> # First business day in October 2011 (not accounting for holidays)\n ... np.busday_offset(\'2011-10\', 0, roll=\'forward\')\n numpy.datetime64(\'2011-10-03\',\'D\')\n >>> # Last business day in February 2012 (not accounting for holidays)\n ... np.busday_offset(\'2012-03\', -1, roll=\'forward\')\n numpy.datetime64(\'2012-02-29\',\'D\')\n >>> # Third Wednesday in January 2011\n ... np.busday_offset(\'2011-01\', 2, roll=\'forward\', weekmask=\'Wed\')\n numpy.datetime64(\'2011-01-19\',\'D\')\n >>> # 2012 Mother\'s Day in Canada and the U.S.\n ... np.busday_offset(\'2012-05\', 1, roll=\'forward\', weekmask=\'Sun\')\n numpy.datetime64(\'2012-05-13\',\'D\')\n\n >>> # First business day on or after a date\n ... np.busday_offset(\'2011-03-20\', 0, roll=\'forward\')\n numpy.datetime64(\'2011-03-21\',\'D\')\n >>> np.busday_offset(\'2011-03-22\', 0, roll=\'forward\')\n numpy.datetime64(\'2011-03-22\',\'D\')\n >>> # First business day after a date\n ... np.busday_offset(\'2011-03-20\', 1, roll=\'backward\')\n numpy.datetime64(\'2011-03-21\',\'D\')\n >>> np.busday_offset(\'2011-03-22\', 1, roll=\'backward\')\n numpy.datetime64(\'2011-03-23\',\'D\')\n ' return (dates, offsets, weekmask, holidays, out)
-7,629,953,265,631,859,000
busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None) First adjusts the date to fall on a valid day according to the ``roll`` rule, then applies offsets to the given dates counted in valid days. .. versionadded:: 1.7.0 Parameters ---------- dates : array_like of datetime64[D] The array of dates to process. offsets : array_like of int The array of offsets, which is broadcast with ``dates``. roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional How to treat dates that do not fall on a valid day. The default is 'raise'. * 'raise' means to raise an exception for an invalid day. * 'nat' means to return a NaT (not-a-time) for an invalid day. * 'forward' and 'following' mean to take the first valid day later in time. * 'backward' and 'preceding' mean to take the first valid day earlier in time. * 'modifiedfollowing' means to take the first valid day later in time unless it is across a Month boundary, in which case to take the first valid day earlier in time. * 'modifiedpreceding' means to take the first valid day earlier in time unless it is across a Month boundary, in which case to take the first valid day later in time. weekmask : str or array_like of bool, optional A seven-element array indicating which of Monday through Sunday are valid days. May be specified as a length-seven list or array, like [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for weekdays, optionally separated by white space. Valid abbreviations are: Mon Tue Wed Thu Fri Sat Sun holidays : array_like of datetime64[D], optional An array of dates to consider as invalid dates. They may be specified in any order, and NaT (not-a-time) dates are ignored. This list is saved in a normalized form that is suited for fast calculations of valid days. busdaycal : busdaycalendar, optional A `busdaycalendar` object which specifies the valid days. If this parameter is provided, neither weekmask nor holidays may be provided. out : array of datetime64[D], optional If provided, this array is filled with the result. Returns ------- out : array of datetime64[D] An array with a shape from broadcasting ``dates`` and ``offsets`` together, containing the dates with offsets applied. See Also -------- busdaycalendar: An object that specifies a custom set of valid days. is_busday : Returns a boolean array indicating valid days. busday_count : Counts how many valid days are in a half-open date range. Examples -------- >>> # First business day in October 2011 (not accounting for holidays) ... np.busday_offset('2011-10', 0, roll='forward') numpy.datetime64('2011-10-03','D') >>> # Last business day in February 2012 (not accounting for holidays) ... np.busday_offset('2012-03', -1, roll='forward') numpy.datetime64('2012-02-29','D') >>> # Third Wednesday in January 2011 ... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed') numpy.datetime64('2011-01-19','D') >>> # 2012 Mother's Day in Canada and the U.S. ... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun') numpy.datetime64('2012-05-13','D') >>> # First business day on or after a date ... np.busday_offset('2011-03-20', 0, roll='forward') numpy.datetime64('2011-03-21','D') >>> np.busday_offset('2011-03-22', 0, roll='forward') numpy.datetime64('2011-03-22','D') >>> # First business day after a date ... np.busday_offset('2011-03-20', 1, roll='backward') numpy.datetime64('2011-03-21','D') >>> np.busday_offset('2011-03-22', 1, roll='backward') numpy.datetime64('2011-03-23','D')
venv/lib/python3.7/site-packages/numpy/core/multiarray.py
busday_offset
180Studios/LoginApp
python
@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset) def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, busdaycal=None, out=None): '\n busday_offset(dates, offsets, roll=\'raise\', weekmask=\'1111100\', holidays=None, busdaycal=None, out=None)\n\n First adjusts the date to fall on a valid day according to\n the ``roll`` rule, then applies offsets to the given dates\n counted in valid days.\n\n .. versionadded:: 1.7.0\n\n Parameters\n ----------\n dates : array_like of datetime64[D]\n The array of dates to process.\n offsets : array_like of int\n The array of offsets, which is broadcast with ``dates``.\n roll : {\'raise\', \'nat\', \'forward\', \'following\', \'backward\', \'preceding\', \'modifiedfollowing\', \'modifiedpreceding\'}, optional\n How to treat dates that do not fall on a valid day. The default\n is \'raise\'.\n\n * \'raise\' means to raise an exception for an invalid day.\n * \'nat\' means to return a NaT (not-a-time) for an invalid day.\n * \'forward\' and \'following\' mean to take the first valid day\n later in time.\n * \'backward\' and \'preceding\' mean to take the first valid day\n earlier in time.\n * \'modifiedfollowing\' means to take the first valid day\n later in time unless it is across a Month boundary, in which\n case to take the first valid day earlier in time.\n * \'modifiedpreceding\' means to take the first valid day\n earlier in time unless it is across a Month boundary, in which\n case to take the first valid day later in time.\n weekmask : str or array_like of bool, optional\n A seven-element array indicating which of Monday through Sunday are\n valid days. May be specified as a length-seven list or array, like\n [1,1,1,1,1,0,0]; a length-seven string, like \'1111100\'; or a string\n like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for\n weekdays, optionally separated by white space. Valid abbreviations\n are: Mon Tue Wed Thu Fri Sat Sun\n holidays : array_like of datetime64[D], optional\n An array of dates to consider as invalid dates. They may be\n specified in any order, and NaT (not-a-time) dates are ignored.\n This list is saved in a normalized form that is suited for\n fast calculations of valid days.\n busdaycal : busdaycalendar, optional\n A `busdaycalendar` object which specifies the valid days. If this\n parameter is provided, neither weekmask nor holidays may be\n provided.\n out : array of datetime64[D], optional\n If provided, this array is filled with the result.\n\n Returns\n -------\n out : array of datetime64[D]\n An array with a shape from broadcasting ``dates`` and ``offsets``\n together, containing the dates with offsets applied.\n\n See Also\n --------\n busdaycalendar: An object that specifies a custom set of valid days.\n is_busday : Returns a boolean array indicating valid days.\n busday_count : Counts how many valid days are in a half-open date range.\n\n Examples\n --------\n >>> # First business day in October 2011 (not accounting for holidays)\n ... np.busday_offset(\'2011-10\', 0, roll=\'forward\')\n numpy.datetime64(\'2011-10-03\',\'D\')\n >>> # Last business day in February 2012 (not accounting for holidays)\n ... np.busday_offset(\'2012-03\', -1, roll=\'forward\')\n numpy.datetime64(\'2012-02-29\',\'D\')\n >>> # Third Wednesday in January 2011\n ... np.busday_offset(\'2011-01\', 2, roll=\'forward\', weekmask=\'Wed\')\n numpy.datetime64(\'2011-01-19\',\'D\')\n >>> # 2012 Mother\'s Day in Canada and the U.S.\n ... np.busday_offset(\'2012-05\', 1, roll=\'forward\', weekmask=\'Sun\')\n numpy.datetime64(\'2012-05-13\',\'D\')\n\n >>> # First business day on or after a date\n ... np.busday_offset(\'2011-03-20\', 0, roll=\'forward\')\n numpy.datetime64(\'2011-03-21\',\'D\')\n >>> np.busday_offset(\'2011-03-22\', 0, roll=\'forward\')\n numpy.datetime64(\'2011-03-22\',\'D\')\n >>> # First business day after a date\n ... np.busday_offset(\'2011-03-20\', 1, roll=\'backward\')\n numpy.datetime64(\'2011-03-21\',\'D\')\n >>> np.busday_offset(\'2011-03-22\', 1, roll=\'backward\')\n numpy.datetime64(\'2011-03-23\',\'D\')\n ' return (dates, offsets, weekmask, holidays, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count) def busday_count(begindates, enddates, weekmask=None, holidays=None, busdaycal=None, out=None): '\n busday_count(begindates, enddates, weekmask=\'1111100\', holidays=[], busdaycal=None, out=None)\n\n Counts the number of valid days between `begindates` and\n `enddates`, not including the day of `enddates`.\n\n If ``enddates`` specifies a date value that is earlier than the\n corresponding ``begindates`` date value, the count will be negative.\n\n .. versionadded:: 1.7.0\n\n Parameters\n ----------\n begindates : array_like of datetime64[D]\n The array of the first dates for counting.\n enddates : array_like of datetime64[D]\n The array of the end dates for counting, which are excluded\n from the count themselves.\n weekmask : str or array_like of bool, optional\n A seven-element array indicating which of Monday through Sunday are\n valid days. May be specified as a length-seven list or array, like\n [1,1,1,1,1,0,0]; a length-seven string, like \'1111100\'; or a string\n like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for\n weekdays, optionally separated by white space. Valid abbreviations\n are: Mon Tue Wed Thu Fri Sat Sun\n holidays : array_like of datetime64[D], optional\n An array of dates to consider as invalid dates. They may be\n specified in any order, and NaT (not-a-time) dates are ignored.\n This list is saved in a normalized form that is suited for\n fast calculations of valid days.\n busdaycal : busdaycalendar, optional\n A `busdaycalendar` object which specifies the valid days. If this\n parameter is provided, neither weekmask nor holidays may be\n provided.\n out : array of int, optional\n If provided, this array is filled with the result.\n\n Returns\n -------\n out : array of int\n An array with a shape from broadcasting ``begindates`` and ``enddates``\n together, containing the number of valid days between\n the begin and end dates.\n\n See Also\n --------\n busdaycalendar: An object that specifies a custom set of valid days.\n is_busday : Returns a boolean array indicating valid days.\n busday_offset : Applies an offset counted in valid days.\n\n Examples\n --------\n >>> # Number of weekdays in January 2011\n ... np.busday_count(\'2011-01\', \'2011-02\')\n 21\n >>> # Number of weekdays in 2011\n ... np.busday_count(\'2011\', \'2012\')\n 260\n >>> # Number of Saturdays in 2011\n ... np.busday_count(\'2011\', \'2012\', weekmask=\'Sat\')\n 53\n ' return (begindates, enddates, weekmask, holidays, out)
2,000,849,704,293,497,000
busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None) Counts the number of valid days between `begindates` and `enddates`, not including the day of `enddates`. If ``enddates`` specifies a date value that is earlier than the corresponding ``begindates`` date value, the count will be negative. .. versionadded:: 1.7.0 Parameters ---------- begindates : array_like of datetime64[D] The array of the first dates for counting. enddates : array_like of datetime64[D] The array of the end dates for counting, which are excluded from the count themselves. weekmask : str or array_like of bool, optional A seven-element array indicating which of Monday through Sunday are valid days. May be specified as a length-seven list or array, like [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for weekdays, optionally separated by white space. Valid abbreviations are: Mon Tue Wed Thu Fri Sat Sun holidays : array_like of datetime64[D], optional An array of dates to consider as invalid dates. They may be specified in any order, and NaT (not-a-time) dates are ignored. This list is saved in a normalized form that is suited for fast calculations of valid days. busdaycal : busdaycalendar, optional A `busdaycalendar` object which specifies the valid days. If this parameter is provided, neither weekmask nor holidays may be provided. out : array of int, optional If provided, this array is filled with the result. Returns ------- out : array of int An array with a shape from broadcasting ``begindates`` and ``enddates`` together, containing the number of valid days between the begin and end dates. See Also -------- busdaycalendar: An object that specifies a custom set of valid days. is_busday : Returns a boolean array indicating valid days. busday_offset : Applies an offset counted in valid days. Examples -------- >>> # Number of weekdays in January 2011 ... np.busday_count('2011-01', '2011-02') 21 >>> # Number of weekdays in 2011 ... np.busday_count('2011', '2012') 260 >>> # Number of Saturdays in 2011 ... np.busday_count('2011', '2012', weekmask='Sat') 53
venv/lib/python3.7/site-packages/numpy/core/multiarray.py
busday_count
180Studios/LoginApp
python
@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count) def busday_count(begindates, enddates, weekmask=None, holidays=None, busdaycal=None, out=None): '\n busday_count(begindates, enddates, weekmask=\'1111100\', holidays=[], busdaycal=None, out=None)\n\n Counts the number of valid days between `begindates` and\n `enddates`, not including the day of `enddates`.\n\n If ``enddates`` specifies a date value that is earlier than the\n corresponding ``begindates`` date value, the count will be negative.\n\n .. versionadded:: 1.7.0\n\n Parameters\n ----------\n begindates : array_like of datetime64[D]\n The array of the first dates for counting.\n enddates : array_like of datetime64[D]\n The array of the end dates for counting, which are excluded\n from the count themselves.\n weekmask : str or array_like of bool, optional\n A seven-element array indicating which of Monday through Sunday are\n valid days. May be specified as a length-seven list or array, like\n [1,1,1,1,1,0,0]; a length-seven string, like \'1111100\'; or a string\n like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for\n weekdays, optionally separated by white space. Valid abbreviations\n are: Mon Tue Wed Thu Fri Sat Sun\n holidays : array_like of datetime64[D], optional\n An array of dates to consider as invalid dates. They may be\n specified in any order, and NaT (not-a-time) dates are ignored.\n This list is saved in a normalized form that is suited for\n fast calculations of valid days.\n busdaycal : busdaycalendar, optional\n A `busdaycalendar` object which specifies the valid days. If this\n parameter is provided, neither weekmask nor holidays may be\n provided.\n out : array of int, optional\n If provided, this array is filled with the result.\n\n Returns\n -------\n out : array of int\n An array with a shape from broadcasting ``begindates`` and ``enddates``\n together, containing the number of valid days between\n the begin and end dates.\n\n See Also\n --------\n busdaycalendar: An object that specifies a custom set of valid days.\n is_busday : Returns a boolean array indicating valid days.\n busday_offset : Applies an offset counted in valid days.\n\n Examples\n --------\n >>> # Number of weekdays in January 2011\n ... np.busday_count(\'2011-01\', \'2011-02\')\n 21\n >>> # Number of weekdays in 2011\n ... np.busday_count(\'2011\', \'2012\')\n 260\n >>> # Number of Saturdays in 2011\n ... np.busday_count(\'2011\', \'2012\', weekmask=\'Sat\')\n 53\n ' return (begindates, enddates, weekmask, holidays, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.datetime_as_string) def datetime_as_string(arr, unit=None, timezone=None, casting=None): "\n datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind')\n\n Convert an array of datetimes into an array of strings.\n\n Parameters\n ----------\n arr : array_like of datetime64\n The array of UTC timestamps to format.\n unit : str\n One of None, 'auto', or a :ref:`datetime unit <arrays.dtypes.dateunits>`.\n timezone : {'naive', 'UTC', 'local'} or tzinfo\n Timezone information to use when displaying the datetime. If 'UTC', end\n with a Z to indicate UTC time. If 'local', convert to the local timezone\n first, and suffix with a +-#### timezone offset. If a tzinfo object,\n then do as with 'local', but use the specified timezone.\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}\n Casting to allow when changing between datetime units.\n\n Returns\n -------\n str_arr : ndarray\n An array of strings the same shape as `arr`.\n\n Examples\n --------\n >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]')\n >>> d\n array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30',\n '2002-10-27T07:30'], dtype='datetime64[m]')\n\n Setting the timezone to UTC shows the same information, but with a Z suffix\n\n >>> np.datetime_as_string(d, timezone='UTC')\n array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z',\n '2002-10-27T07:30Z'], dtype='<U35')\n\n Note that we picked datetimes that cross a DST boundary. Passing in a\n ``pytz`` timezone object will print the appropriate offset\n\n >>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern'))\n array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400',\n '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='<U39')\n\n Passing in a unit will change the precision\n\n >>> np.datetime_as_string(d, unit='h')\n array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'],\n dtype='<U32')\n >>> np.datetime_as_string(d, unit='s')\n array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00',\n '2002-10-27T07:30:00'], dtype='<U38')\n\n 'casting' can be used to specify whether precision can be changed\n\n >>> np.datetime_as_string(d, unit='h', casting='safe')\n TypeError: Cannot create a datetime string as units 'h' from a NumPy\n datetime with units 'm' according to the rule 'safe'\n " return (arr,)
7,093,229,090,673,673,000
datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind') Convert an array of datetimes into an array of strings. Parameters ---------- arr : array_like of datetime64 The array of UTC timestamps to format. unit : str One of None, 'auto', or a :ref:`datetime unit <arrays.dtypes.dateunits>`. timezone : {'naive', 'UTC', 'local'} or tzinfo Timezone information to use when displaying the datetime. If 'UTC', end with a Z to indicate UTC time. If 'local', convert to the local timezone first, and suffix with a +-#### timezone offset. If a tzinfo object, then do as with 'local', but use the specified timezone. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'} Casting to allow when changing between datetime units. Returns ------- str_arr : ndarray An array of strings the same shape as `arr`. Examples -------- >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]') >>> d array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30', '2002-10-27T07:30'], dtype='datetime64[m]') Setting the timezone to UTC shows the same information, but with a Z suffix >>> np.datetime_as_string(d, timezone='UTC') array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z', '2002-10-27T07:30Z'], dtype='<U35') Note that we picked datetimes that cross a DST boundary. Passing in a ``pytz`` timezone object will print the appropriate offset >>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern')) array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400', '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='<U39') Passing in a unit will change the precision >>> np.datetime_as_string(d, unit='h') array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'], dtype='<U32') >>> np.datetime_as_string(d, unit='s') array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00', '2002-10-27T07:30:00'], dtype='<U38') 'casting' can be used to specify whether precision can be changed >>> np.datetime_as_string(d, unit='h', casting='safe') TypeError: Cannot create a datetime string as units 'h' from a NumPy datetime with units 'm' according to the rule 'safe'
venv/lib/python3.7/site-packages/numpy/core/multiarray.py
datetime_as_string
180Studios/LoginApp
python
@array_function_from_c_func_and_dispatcher(_multiarray_umath.datetime_as_string) def datetime_as_string(arr, unit=None, timezone=None, casting=None): "\n datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind')\n\n Convert an array of datetimes into an array of strings.\n\n Parameters\n ----------\n arr : array_like of datetime64\n The array of UTC timestamps to format.\n unit : str\n One of None, 'auto', or a :ref:`datetime unit <arrays.dtypes.dateunits>`.\n timezone : {'naive', 'UTC', 'local'} or tzinfo\n Timezone information to use when displaying the datetime. If 'UTC', end\n with a Z to indicate UTC time. If 'local', convert to the local timezone\n first, and suffix with a +-#### timezone offset. If a tzinfo object,\n then do as with 'local', but use the specified timezone.\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}\n Casting to allow when changing between datetime units.\n\n Returns\n -------\n str_arr : ndarray\n An array of strings the same shape as `arr`.\n\n Examples\n --------\n >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]')\n >>> d\n array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30',\n '2002-10-27T07:30'], dtype='datetime64[m]')\n\n Setting the timezone to UTC shows the same information, but with a Z suffix\n\n >>> np.datetime_as_string(d, timezone='UTC')\n array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z',\n '2002-10-27T07:30Z'], dtype='<U35')\n\n Note that we picked datetimes that cross a DST boundary. Passing in a\n ``pytz`` timezone object will print the appropriate offset\n\n >>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern'))\n array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400',\n '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='<U39')\n\n Passing in a unit will change the precision\n\n >>> np.datetime_as_string(d, unit='h')\n array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'],\n dtype='<U32')\n >>> np.datetime_as_string(d, unit='s')\n array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00',\n '2002-10-27T07:30:00'], dtype='<U38')\n\n 'casting' can be used to specify whether precision can be changed\n\n >>> np.datetime_as_string(d, unit='h', casting='safe')\n TypeError: Cannot create a datetime string as units 'h' from a NumPy\n datetime with units 'm' according to the rule 'safe'\n " return (arr,)
def set_seed(seed): 'Set seed for reproduction.\n ' seed = (seed + dist.get_rank()) random.seed(seed) np.random.seed(seed) paddle.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed)
-1,180,362,922,598,118,700
Set seed for reproduction.
apps/Graph4KG/utils.py
set_seed
LemonNoel/PGL
python
def set_seed(seed): '\n ' seed = (seed + dist.get_rank()) random.seed(seed) np.random.seed(seed) paddle.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed)
def set_logger(args): 'Write logs to console and log file.\n ' log_file = os.path.join(args.save_path, 'train.log') logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S', filename=log_file, filemode='a+') if args.print_on_screen: console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s') console.setFormatter(formatter) logging.getLogger('').addHandler(console) for arg in vars(args): logging.info('{:20}:{}'.format(arg, getattr(args, arg)))
1,284,161,568,627,359,000
Write logs to console and log file.
apps/Graph4KG/utils.py
set_logger
LemonNoel/PGL
python
def set_logger(args): '\n ' log_file = os.path.join(args.save_path, 'train.log') logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S', filename=log_file, filemode='a+') if args.print_on_screen: console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s') console.setFormatter(formatter) logging.getLogger().addHandler(console) for arg in vars(args): logging.info('{:20}:{}'.format(arg, getattr(args, arg)))
def print_log(step, interval, log, timer, time_sum): 'Print log to logger.\n ' logging.info(('[GPU %d] step: %d, loss: %.5f, reg: %.4e, speed: %.2f steps/s, time: %.2f s' % (dist.get_rank(), step, (log['loss'] / interval), (log['reg'] / interval), (interval / time_sum), time_sum))) logging.info(('sample: %f, forward: %f, backward: %f, update: %f' % (timer['sample'], timer['forward'], timer['backward'], timer['update'])))
-7,832,974,644,119,050,000
Print log to logger.
apps/Graph4KG/utils.py
print_log
LemonNoel/PGL
python
def print_log(step, interval, log, timer, time_sum): '\n ' logging.info(('[GPU %d] step: %d, loss: %.5f, reg: %.4e, speed: %.2f steps/s, time: %.2f s' % (dist.get_rank(), step, (log['loss'] / interval), (log['reg'] / interval), (interval / time_sum), time_sum))) logging.info(('sample: %f, forward: %f, backward: %f, update: %f' % (timer['sample'], timer['forward'], timer['backward'], timer['update'])))
def uniform(low, high, size, dtype=np.float32, seed=0): 'Memory efficient uniform implementation.\n ' rng = np.random.default_rng(seed) out = (((high - low) * rng.random(size, dtype=dtype)) + low) return out
5,091,072,456,343,243,000
Memory efficient uniform implementation.
apps/Graph4KG/utils.py
uniform
LemonNoel/PGL
python
def uniform(low, high, size, dtype=np.float32, seed=0): '\n ' rng = np.random.default_rng(seed) out = (((high - low) * rng.random(size, dtype=dtype)) + low) return out
def timer_wrapper(name): 'Time counter wrapper.\n ' def decorate(func): 'decorate func\n ' @functools.wraps(func) def wrapper(*args, **kwargs): 'wrapper func\n ' logging.info(f'[{name}] start...') ts = time.time() result = func(*args, **kwargs) te = time.time() costs = (te - ts) if (costs < 0.0001): cost_str = ('%f sec' % costs) elif (costs > 3600): cost_str = ('%.4f sec (%.4f hours)' % (costs, (costs / 3600.0))) else: cost_str = ('%.4f sec' % costs) logging.info(f'[{name}] finished! It takes {cost_str} s') return result return wrapper return decorate
-1,646,729,750,719,834,600
Time counter wrapper.
apps/Graph4KG/utils.py
timer_wrapper
LemonNoel/PGL
python
def timer_wrapper(name): '\n ' def decorate(func): 'decorate func\n ' @functools.wraps(func) def wrapper(*args, **kwargs): 'wrapper func\n ' logging.info(f'[{name}] start...') ts = time.time() result = func(*args, **kwargs) te = time.time() costs = (te - ts) if (costs < 0.0001): cost_str = ('%f sec' % costs) elif (costs > 3600): cost_str = ('%.4f sec (%.4f hours)' % (costs, (costs / 3600.0))) else: cost_str = ('%.4f sec' % costs) logging.info(f'[{name}] finished! It takes {cost_str} s') return result return wrapper return decorate
def calculate_metrics(scores, corr_idxs, filter_list): 'Calculate metrics according to scores.\n ' logs = [] for i in range(scores.shape[0]): rank = (scores[i] > scores[i][corr_idxs[i]]).astype('float32') if (filter_list is not None): mask = paddle.ones(rank.shape, dtype='float32') mask[filter_list[i]] = 0.0 rank = (rank * mask) rank = (paddle.sum(rank) + 1) logs.append({'MRR': (1.0 / rank), 'MR': float(rank), 'HITS@1': (1.0 if (rank <= 1) else 0.0), 'HITS@3': (1.0 if (rank <= 3) else 0.0), 'HITS@10': (1.0 if (rank <= 10) else 0.0)}) return logs
-941,842,204,942,299,900
Calculate metrics according to scores.
apps/Graph4KG/utils.py
calculate_metrics
LemonNoel/PGL
python
def calculate_metrics(scores, corr_idxs, filter_list): '\n ' logs = [] for i in range(scores.shape[0]): rank = (scores[i] > scores[i][corr_idxs[i]]).astype('float32') if (filter_list is not None): mask = paddle.ones(rank.shape, dtype='float32') mask[filter_list[i]] = 0.0 rank = (rank * mask) rank = (paddle.sum(rank) + 1) logs.append({'MRR': (1.0 / rank), 'MR': float(rank), 'HITS@1': (1.0 if (rank <= 1) else 0.0), 'HITS@3': (1.0 if (rank <= 3) else 0.0), 'HITS@10': (1.0 if (rank <= 10) else 0.0)}) return logs
@timer_wrapper('evaluation') def evaluate(model, loader, evaluate_mode='test', filter_dict=None, save_path='./tmp/', data_mode='hrt'): 'Evaluate given KGE model.\n ' if (data_mode == 'wikikg2'): evaluate_wikikg2(model, loader, evaluate_mode, save_path) elif (data_mode == 'wikikg90m'): evaluate_wikikg90m(model, loader, evaluate_mode, save_path) else: model.eval() with paddle.no_grad(): h_metrics = [] t_metrics = [] output = {'h,r->t': {}, 't,r->h': {}, 'average': {}} for (h, r, t) in tqdm(loader): t_score = model.predict(h, r, mode='tail') h_score = model.predict(t, r, mode='head') if (filter_dict is not None): h_filter_list = [filter_dict['head'][(ti, ri)] for (ti, ri) in zip(t.numpy(), r.numpy())] t_filter_list = [filter_dict['tail'][(hi, ri)] for (hi, ri) in zip(h.numpy(), r.numpy())] else: h_filter_list = None t_filter_list = None h_metrics += calculate_metrics(h_score, h, h_filter_list) t_metrics += calculate_metrics(t_score, t, t_filter_list) for metric in h_metrics[0].keys(): output['t,r->h'][metric] = np.mean([x[metric] for x in h_metrics]) output['h,r->t'][metric] = np.mean([x[metric] for x in t_metrics]) output['average'][metric] = ((output['t,r->h'][metric] + output['h,r->t'][metric]) / 2) logging.info(('-------------- %s result --------------' % evaluate_mode)) logging.info(('t,r->h |' + ' '.join(['{}: {}'.format(k, v) for (k, v) in output['t,r->h'].items()]))) logging.info(('h,r->t |' + ' '.join(['{}: {}'.format(k, v) for (k, v) in output['h,r->t'].items()]))) logging.info(('average |' + ' '.join(['{}: {}'.format(k, v) for (k, v) in output['average'].items()]))) logging.info('-----------------------------------------')
8,041,928,486,932,966,000
Evaluate given KGE model.
apps/Graph4KG/utils.py
evaluate
LemonNoel/PGL
python
@timer_wrapper('evaluation') def evaluate(model, loader, evaluate_mode='test', filter_dict=None, save_path='./tmp/', data_mode='hrt'): '\n ' if (data_mode == 'wikikg2'): evaluate_wikikg2(model, loader, evaluate_mode, save_path) elif (data_mode == 'wikikg90m'): evaluate_wikikg90m(model, loader, evaluate_mode, save_path) else: model.eval() with paddle.no_grad(): h_metrics = [] t_metrics = [] output = {'h,r->t': {}, 't,r->h': {}, 'average': {}} for (h, r, t) in tqdm(loader): t_score = model.predict(h, r, mode='tail') h_score = model.predict(t, r, mode='head') if (filter_dict is not None): h_filter_list = [filter_dict['head'][(ti, ri)] for (ti, ri) in zip(t.numpy(), r.numpy())] t_filter_list = [filter_dict['tail'][(hi, ri)] for (hi, ri) in zip(h.numpy(), r.numpy())] else: h_filter_list = None t_filter_list = None h_metrics += calculate_metrics(h_score, h, h_filter_list) t_metrics += calculate_metrics(t_score, t, t_filter_list) for metric in h_metrics[0].keys(): output['t,r->h'][metric] = np.mean([x[metric] for x in h_metrics]) output['h,r->t'][metric] = np.mean([x[metric] for x in t_metrics]) output['average'][metric] = ((output['t,r->h'][metric] + output['h,r->t'][metric]) / 2) logging.info(('-------------- %s result --------------' % evaluate_mode)) logging.info(('t,r->h |' + ' '.join(['{}: {}'.format(k, v) for (k, v) in output['t,r->h'].items()]))) logging.info(('h,r->t |' + ' '.join(['{}: {}'.format(k, v) for (k, v) in output['h,r->t'].items()]))) logging.info(('average |' + ' '.join(['{}: {}'.format(k, v) for (k, v) in output['average'].items()]))) logging.info('-----------------------------------------')
def gram_schimidt_process(embeds, num_elem, use_scale): ' Orthogonalize embeddings.\n ' num_embed = embeds.shape[0] assert (embeds.shape[1] == num_elem) assert (embeds.shape[2] == (num_elem + int(use_scale))) if use_scale: scales = embeds[:, :, (- 1)] embeds = embeds[:, :, :num_elem] u = [embeds[:, 0]] uu = ([0] * num_elem) uu[0] = (u[0] * u[0]).sum(axis=(- 1)) u_d = embeds[:, 1:] ushape = (num_embed, 1, (- 1)) for i in range(1, num_elem): tmp_a = (embeds[:, i:] * u[(i - 1)].reshape(ushape)).sum(axis=(- 1)) tmp_b = uu[(i - 1)].reshape((num_embed, (- 1))) tmp_u = (tmp_a / tmp_b).reshape((num_embed, (- 1), 1)) u_d = (u_d - (u[(- 1)].reshape(ushape) * tmp_u)) u_i = u_d[:, 0] if (u_d.shape[1] > 1): u_d = u_d[:, 1:] uu[i] = (u_i * u_i).sum(axis=(- 1)) u.append(u_i) u = np.stack(u, axis=1) u_norm = np.linalg.norm(u, axis=(- 1), keepdims=True) u = (u / u_norm) if use_scale: u = np.concatenate([u, scales.reshape((num_embed, (- 1), 1))], axis=(- 1)) return u
8,071,425,646,455,714,000
Orthogonalize embeddings.
apps/Graph4KG/utils.py
gram_schimidt_process
LemonNoel/PGL
python
def gram_schimidt_process(embeds, num_elem, use_scale): ' \n ' num_embed = embeds.shape[0] assert (embeds.shape[1] == num_elem) assert (embeds.shape[2] == (num_elem + int(use_scale))) if use_scale: scales = embeds[:, :, (- 1)] embeds = embeds[:, :, :num_elem] u = [embeds[:, 0]] uu = ([0] * num_elem) uu[0] = (u[0] * u[0]).sum(axis=(- 1)) u_d = embeds[:, 1:] ushape = (num_embed, 1, (- 1)) for i in range(1, num_elem): tmp_a = (embeds[:, i:] * u[(i - 1)].reshape(ushape)).sum(axis=(- 1)) tmp_b = uu[(i - 1)].reshape((num_embed, (- 1))) tmp_u = (tmp_a / tmp_b).reshape((num_embed, (- 1), 1)) u_d = (u_d - (u[(- 1)].reshape(ushape) * tmp_u)) u_i = u_d[:, 0] if (u_d.shape[1] > 1): u_d = u_d[:, 1:] uu[i] = (u_i * u_i).sum(axis=(- 1)) u.append(u_i) u = np.stack(u, axis=1) u_norm = np.linalg.norm(u, axis=(- 1), keepdims=True) u = (u / u_norm) if use_scale: u = np.concatenate([u, scales.reshape((num_embed, (- 1), 1))], axis=(- 1)) return u
def decorate(func): 'decorate func\n ' @functools.wraps(func) def wrapper(*args, **kwargs): 'wrapper func\n ' logging.info(f'[{name}] start...') ts = time.time() result = func(*args, **kwargs) te = time.time() costs = (te - ts) if (costs < 0.0001): cost_str = ('%f sec' % costs) elif (costs > 3600): cost_str = ('%.4f sec (%.4f hours)' % (costs, (costs / 3600.0))) else: cost_str = ('%.4f sec' % costs) logging.info(f'[{name}] finished! It takes {cost_str} s') return result return wrapper
-8,385,293,892,817,383,000
decorate func
apps/Graph4KG/utils.py
decorate
LemonNoel/PGL
python
def decorate(func): '\n ' @functools.wraps(func) def wrapper(*args, **kwargs): 'wrapper func\n ' logging.info(f'[{name}] start...') ts = time.time() result = func(*args, **kwargs) te = time.time() costs = (te - ts) if (costs < 0.0001): cost_str = ('%f sec' % costs) elif (costs > 3600): cost_str = ('%.4f sec (%.4f hours)' % (costs, (costs / 3600.0))) else: cost_str = ('%.4f sec' % costs) logging.info(f'[{name}] finished! It takes {cost_str} s') return result return wrapper
@functools.wraps(func) def wrapper(*args, **kwargs): 'wrapper func\n ' logging.info(f'[{name}] start...') ts = time.time() result = func(*args, **kwargs) te = time.time() costs = (te - ts) if (costs < 0.0001): cost_str = ('%f sec' % costs) elif (costs > 3600): cost_str = ('%.4f sec (%.4f hours)' % (costs, (costs / 3600.0))) else: cost_str = ('%.4f sec' % costs) logging.info(f'[{name}] finished! It takes {cost_str} s') return result
5,612,435,469,472,388,000
wrapper func
apps/Graph4KG/utils.py
wrapper
LemonNoel/PGL
python
@functools.wraps(func) def wrapper(*args, **kwargs): '\n ' logging.info(f'[{name}] start...') ts = time.time() result = func(*args, **kwargs) te = time.time() costs = (te - ts) if (costs < 0.0001): cost_str = ('%f sec' % costs) elif (costs > 3600): cost_str = ('%.4f sec (%.4f hours)' % (costs, (costs / 3600.0))) else: cost_str = ('%.4f sec' % costs) logging.info(f'[{name}] finished! It takes {cost_str} s') return result
def test_address(self): 'Tests address makes an address that identifies as the correct AddressSpace' user_id = addresser.user.unique_id() user_address = addresser.user.address(user_id) self.assertIsAddress(user_address) self.assertEqual(addresser.get_address_type(user_address), addresser.AddressSpace.USER)
-6,768,783,794,536,796,000
Tests address makes an address that identifies as the correct AddressSpace
tests/rbac/common/addresser/user_test.py
test_address
kthblmfld/sawtooth-next-directory
python
def test_address(self): user_id = addresser.user.unique_id() user_address = addresser.user.address(user_id) self.assertIsAddress(user_address) self.assertEqual(addresser.get_address_type(user_address), addresser.AddressSpace.USER)
def test_unique_id(self): 'Tests that unique_id generates a unique identifier and is unique' id1 = addresser.user.unique_id() id2 = addresser.user.unique_id() self.assertIsIdentifier(id1) self.assertIsIdentifier(id2) self.assertNotEqual(id1, id2)
-7,196,710,362,681,734,000
Tests that unique_id generates a unique identifier and is unique
tests/rbac/common/addresser/user_test.py
test_unique_id
kthblmfld/sawtooth-next-directory
python
def test_unique_id(self): id1 = addresser.user.unique_id() id2 = addresser.user.unique_id() self.assertIsIdentifier(id1) self.assertIsIdentifier(id2) self.assertNotEqual(id1, id2)
def test_get_address_type(self): 'Tests that get_address_type returns AddressSpace.USER if it is a user\n address, and None if it is of another address type' user_address = addresser.user.address(addresser.user.unique_id()) role_address = addresser.role.address(addresser.role.unique_id()) self.assertEqual(addresser.get_address_type(user_address), addresser.AddressSpace.USER) self.assertEqual(addresser.user.get_address_type(user_address), addresser.AddressSpace.USER) self.assertIsNone(addresser.user.get_address_type(role_address)) self.assertEqual(addresser.get_address_type(role_address), addresser.AddressSpace.ROLES_ATTRIBUTES)
-2,365,047,442,419,383,000
Tests that get_address_type returns AddressSpace.USER if it is a user address, and None if it is of another address type
tests/rbac/common/addresser/user_test.py
test_get_address_type
kthblmfld/sawtooth-next-directory
python
def test_get_address_type(self): 'Tests that get_address_type returns AddressSpace.USER if it is a user\n address, and None if it is of another address type' user_address = addresser.user.address(addresser.user.unique_id()) role_address = addresser.role.address(addresser.role.unique_id()) self.assertEqual(addresser.get_address_type(user_address), addresser.AddressSpace.USER) self.assertEqual(addresser.user.get_address_type(user_address), addresser.AddressSpace.USER) self.assertIsNone(addresser.user.get_address_type(role_address)) self.assertEqual(addresser.get_address_type(role_address), addresser.AddressSpace.ROLES_ATTRIBUTES)
def test_get_addresser(self): 'Test that get_addresser returns the addresser class if it is a\n user address, and None if it is of another address type' user_address = addresser.user.address(addresser.user.unique_id()) other_address = addresser.role.address(addresser.role.unique_id()) self.assertIsInstance(addresser.get_addresser(user_address), type(addresser.user)) self.assertIsInstance(addresser.user.get_addresser(user_address), type(addresser.user)) self.assertIsNone(addresser.user.get_addresser(other_address))
-6,577,220,945,678,064,000
Test that get_addresser returns the addresser class if it is a user address, and None if it is of another address type
tests/rbac/common/addresser/user_test.py
test_get_addresser
kthblmfld/sawtooth-next-directory
python
def test_get_addresser(self): 'Test that get_addresser returns the addresser class if it is a\n user address, and None if it is of another address type' user_address = addresser.user.address(addresser.user.unique_id()) other_address = addresser.role.address(addresser.role.unique_id()) self.assertIsInstance(addresser.get_addresser(user_address), type(addresser.user)) self.assertIsInstance(addresser.user.get_addresser(user_address), type(addresser.user)) self.assertIsNone(addresser.user.get_addresser(other_address))
def test_user_parse(self): 'Test addresser.user.parse returns a parsed address if it is a user address' user_id = addresser.user.unique_id() user_address = addresser.user.address(user_id) parsed = addresser.user.parse(user_address) self.assertEqual(parsed.object_type, addresser.ObjectType.USER) self.assertEqual(parsed.related_type, addresser.ObjectType.NONE) self.assertEqual(parsed.relationship_type, addresser.RelationshipType.ATTRIBUTES) self.assertEqual(parsed.address_type, addresser.AddressSpace.USER) self.assertEqual(parsed.object_id, user_id) self.assertEqual(parsed.related_id, None)
433,924,049,134,503,230
Test addresser.user.parse returns a parsed address if it is a user address
tests/rbac/common/addresser/user_test.py
test_user_parse
kthblmfld/sawtooth-next-directory
python
def test_user_parse(self): user_id = addresser.user.unique_id() user_address = addresser.user.address(user_id) parsed = addresser.user.parse(user_address) self.assertEqual(parsed.object_type, addresser.ObjectType.USER) self.assertEqual(parsed.related_type, addresser.ObjectType.NONE) self.assertEqual(parsed.relationship_type, addresser.RelationshipType.ATTRIBUTES) self.assertEqual(parsed.address_type, addresser.AddressSpace.USER) self.assertEqual(parsed.object_id, user_id) self.assertEqual(parsed.related_id, None)
def test_addresser_parse(self): 'Test addresser.parse returns a parsed address' user_id = addresser.user.unique_id() user_address = addresser.user.address(user_id) parsed = addresser.parse(user_address) self.assertEqual(parsed.object_type, addresser.ObjectType.USER) self.assertEqual(parsed.related_type, addresser.ObjectType.NONE) self.assertEqual(parsed.relationship_type, addresser.RelationshipType.ATTRIBUTES) self.assertEqual(parsed.address_type, addresser.AddressSpace.USER) self.assertEqual(parsed.object_id, user_id) self.assertEqual(parsed.related_id, None)
7,162,493,697,817,564,000
Test addresser.parse returns a parsed address
tests/rbac/common/addresser/user_test.py
test_addresser_parse
kthblmfld/sawtooth-next-directory
python
def test_addresser_parse(self): user_id = addresser.user.unique_id() user_address = addresser.user.address(user_id) parsed = addresser.parse(user_address) self.assertEqual(parsed.object_type, addresser.ObjectType.USER) self.assertEqual(parsed.related_type, addresser.ObjectType.NONE) self.assertEqual(parsed.relationship_type, addresser.RelationshipType.ATTRIBUTES) self.assertEqual(parsed.address_type, addresser.AddressSpace.USER) self.assertEqual(parsed.object_id, user_id) self.assertEqual(parsed.related_id, None)
def test_parse_other(self): 'Test that parse returns None if it is not a user address' other_address = addresser.role.address(addresser.role.unique_id()) self.assertIsNone(addresser.user.parse(other_address))
-610,400,045,570,985,700
Test that parse returns None if it is not a user address
tests/rbac/common/addresser/user_test.py
test_parse_other
kthblmfld/sawtooth-next-directory
python
def test_parse_other(self): other_address = addresser.role.address(addresser.role.unique_id()) self.assertIsNone(addresser.user.parse(other_address))
def test_addresses_are(self): 'Test that addresses_are returns True if all addresses are a user\n addresses, and False if any addresses are if a different address type' user_address1 = addresser.user.address(addresser.user.unique_id()) user_address2 = addresser.user.address(addresser.user.unique_id()) other_address = addresser.role.address(addresser.role.unique_id()) self.assertTrue(addresser.user.addresses_are([user_address1])) self.assertTrue(addresser.user.addresses_are([user_address1, user_address2])) self.assertFalse(addresser.user.addresses_are([other_address])) self.assertFalse(addresser.user.addresses_are([user_address1, other_address])) self.assertFalse(addresser.user.addresses_are([other_address, user_address1])) self.assertTrue(addresser.user.addresses_are([]))
779,494,870,157,806,300
Test that addresses_are returns True if all addresses are a user addresses, and False if any addresses are if a different address type
tests/rbac/common/addresser/user_test.py
test_addresses_are
kthblmfld/sawtooth-next-directory
python
def test_addresses_are(self): 'Test that addresses_are returns True if all addresses are a user\n addresses, and False if any addresses are if a different address type' user_address1 = addresser.user.address(addresser.user.unique_id()) user_address2 = addresser.user.address(addresser.user.unique_id()) other_address = addresser.role.address(addresser.role.unique_id()) self.assertTrue(addresser.user.addresses_are([user_address1])) self.assertTrue(addresser.user.addresses_are([user_address1, user_address2])) self.assertFalse(addresser.user.addresses_are([other_address])) self.assertFalse(addresser.user.addresses_are([user_address1, other_address])) self.assertFalse(addresser.user.addresses_are([other_address, user_address1])) self.assertTrue(addresser.user.addresses_are([]))
def test_address_deterministic(self): 'Tests address makes an address that identifies as the correct AddressSpace' user_id1 = addresser.user.unique_id() user_address1 = addresser.user.address(user_id1) user_address2 = addresser.user.address(user_id1) self.assertIsAddress(user_address1) self.assertIsAddress(user_address2) self.assertEqual(user_address1, user_address2) self.assertEqual(addresser.get_address_type(user_address1), addresser.AddressSpace.USER)
507,684,526,630,221,630
Tests address makes an address that identifies as the correct AddressSpace
tests/rbac/common/addresser/user_test.py
test_address_deterministic
kthblmfld/sawtooth-next-directory
python
def test_address_deterministic(self): user_id1 = addresser.user.unique_id() user_address1 = addresser.user.address(user_id1) user_address2 = addresser.user.address(user_id1) self.assertIsAddress(user_address1) self.assertIsAddress(user_address2) self.assertEqual(user_address1, user_address2) self.assertEqual(addresser.get_address_type(user_address1), addresser.AddressSpace.USER)
def test_address_random(self): 'Tests address makes a unique address given different inputs' user_id1 = addresser.user.unique_id() user_id2 = addresser.user.unique_id() user_address1 = addresser.user.address(user_id1) user_address2 = addresser.user.address(user_id2) self.assertIsAddress(user_address1) self.assertIsAddress(user_address2) self.assertNotEqual(user_address1, user_address2) self.assertEqual(addresser.get_address_type(user_address1), addresser.AddressSpace.USER) self.assertEqual(addresser.get_address_type(user_address2), addresser.AddressSpace.USER)
-283,297,163,203,004,260
Tests address makes a unique address given different inputs
tests/rbac/common/addresser/user_test.py
test_address_random
kthblmfld/sawtooth-next-directory
python
def test_address_random(self): user_id1 = addresser.user.unique_id() user_id2 = addresser.user.unique_id() user_address1 = addresser.user.address(user_id1) user_address2 = addresser.user.address(user_id2) self.assertIsAddress(user_address1) self.assertIsAddress(user_address2) self.assertNotEqual(user_address1, user_address2) self.assertEqual(addresser.get_address_type(user_address1), addresser.AddressSpace.USER) self.assertEqual(addresser.get_address_type(user_address2), addresser.AddressSpace.USER)
def tile_index(A, B): '\n Entrywise comparison index of tile index (column) vectors.\n ' (AA, BB) = broadcast_arrays(A, B) if DEBUGGING: shape = (max(A.shape[0], B.shape[0]), 1) _check_shape('AA', AA, shape) _check_shape('BB', BB, shape) return (AA, BB)
1,716,995,872,579,375,000
Entrywise comparison index of tile index (column) vectors.
neuroswarms/matrix.py
tile_index
jdmonaco/neuroswarms
python
def tile_index(A, B): '\n \n ' (AA, BB) = broadcast_arrays(A, B) if DEBUGGING: shape = (max(A.shape[0], B.shape[0]), 1) _check_shape('AA', AA, shape) _check_shape('BB', BB, shape) return (AA, BB)
def pairwise_tile_index(A, B): '\n Pairwise comparison index of tile index (column) vectors.\n ' (AA, BB) = broadcast_arrays(A, B.T) if DEBUGGING: shape = (len(A), len(B)) _check_shape('AA', AA, shape) _check_shape('BB', BB, shape) return (AA, BB)
-8,375,266,190,173,897,000
Pairwise comparison index of tile index (column) vectors.
neuroswarms/matrix.py
pairwise_tile_index
jdmonaco/neuroswarms
python
def pairwise_tile_index(A, B): '\n \n ' (AA, BB) = broadcast_arrays(A, B.T) if DEBUGGING: shape = (len(A), len(B)) _check_shape('AA', AA, shape) _check_shape('BB', BB, shape) return (AA, BB)
def pairwise_phasediffs(A, B): '\n Compute synchronizing phase differences between phase pairs.\n ' N_A = len(A) N_B = len(B) DD_shape = (N_A, N_B) if DEBUGGING: _check_ndim('A', A, 2) _check_ndim('B', B, 2) _check_shape('A', A, 1, axis=1) _check_shape('B', B, 1, axis=1) return (B.T - A)
3,069,492,397,436,846,000
Compute synchronizing phase differences between phase pairs.
neuroswarms/matrix.py
pairwise_phasediffs
jdmonaco/neuroswarms
python
def pairwise_phasediffs(A, B): '\n \n ' N_A = len(A) N_B = len(B) DD_shape = (N_A, N_B) if DEBUGGING: _check_ndim('A', A, 2) _check_ndim('B', B, 2) _check_shape('A', A, 1, axis=1) _check_shape('B', B, 1, axis=1) return (B.T - A)
def distances(A, B): '\n Compute distances between points in entrywise order.\n ' (AA, BB) = broadcast_arrays(A, B) shape = AA.shape if DEBUGGING: _check_ndim('AA', AA, 2) _check_ndim('BB', BB, 2) _check_shape('AA', AA, 2, axis=1) _check_shape('BB', BB, 2, axis=1) return hypot((AA[:, 0] - BB[:, 0]), (AA[:, 1] - BB[:, 1]))[:, AX]
-7,030,238,118,766,872,000
Compute distances between points in entrywise order.
neuroswarms/matrix.py
distances
jdmonaco/neuroswarms
python
def distances(A, B): '\n \n ' (AA, BB) = broadcast_arrays(A, B) shape = AA.shape if DEBUGGING: _check_ndim('AA', AA, 2) _check_ndim('BB', BB, 2) _check_shape('AA', AA, 2, axis=1) _check_shape('BB', BB, 2, axis=1) return hypot((AA[:, 0] - BB[:, 0]), (AA[:, 1] - BB[:, 1]))[:, AX]
def pairwise_unit_diffs(A, B): '\n Compute attracting unit-vector differences between pairs of points.\n ' DD = pairwise_position_deltas(A, B) D_norm = hypot(DD[(..., 0)], DD[(..., 1)]) nz = D_norm.nonzero() DD[nz] /= D_norm[nz][(..., AX)] return DD
-424,086,748,636,339,500
Compute attracting unit-vector differences between pairs of points.
neuroswarms/matrix.py
pairwise_unit_diffs
jdmonaco/neuroswarms
python
def pairwise_unit_diffs(A, B): '\n \n ' DD = pairwise_position_deltas(A, B) D_norm = hypot(DD[(..., 0)], DD[(..., 1)]) nz = D_norm.nonzero() DD[nz] /= D_norm[nz][(..., AX)] return DD
def pairwise_distances(A, B): '\n Compute distances between pairs of points.\n ' DD = pairwise_position_deltas(A, B) return hypot(DD[(..., 0)], DD[(..., 1)])
721,351,548,684,608,900
Compute distances between pairs of points.
neuroswarms/matrix.py
pairwise_distances
jdmonaco/neuroswarms
python
def pairwise_distances(A, B): '\n \n ' DD = pairwise_position_deltas(A, B) return hypot(DD[(..., 0)], DD[(..., 1)])
def pairwise_position_deltas(A, B): '\n Compute attracting component deltas between pairs of points.\n ' N_A = len(A) N_B = len(B) if DEBUGGING: _check_ndim('A', A, 2) _check_ndim('B', B, 2) _check_shape('A', A, 2, axis=1) _check_shape('B', B, 2, axis=1) AA = empty((N_A, N_B, 2), DISTANCE_DTYPE) AA[:] = A[:, AX, :] return (B[(AX, ...)] - AA)
-5,928,309,153,118,387,000
Compute attracting component deltas between pairs of points.
neuroswarms/matrix.py
pairwise_position_deltas
jdmonaco/neuroswarms
python
def pairwise_position_deltas(A, B): '\n \n ' N_A = len(A) N_B = len(B) if DEBUGGING: _check_ndim('A', A, 2) _check_ndim('B', B, 2) _check_shape('A', A, 2, axis=1) _check_shape('B', B, 2, axis=1) AA = empty((N_A, N_B, 2), DISTANCE_DTYPE) AA[:] = A[:, AX, :] return (B[(AX, ...)] - AA)
def somatic_motion_update(D_up, D_cur, X, V): "\n Compute updated positions by averaging pairwise difference vectors for\n mutually visible pairs with equal bidirectional adjustments within each\n pair. The updated distance matrix does not need to be symmetric; it\n represents 'desired' updates based on recurrent learning.\n\n :D_up: R(N,N)-matrix of updated distances\n :D_cur: R(N,N)-matrix of current distances\n :X: R(N,2)-matrix of current positions\n :V: {0,1}(N,2)-matrix of current agent visibility\n :returns: R(N,2)-matrix of updated positions\n " N = len(X) D_shape = (N, N) if DEBUGGING: _check_ndim('X', X, 2) _check_shape('X', X, 2, axis=1) _check_shape('D_up', D_up, D_shape) _check_shape('D_cur', D_cur, D_shape) _check_shape('V', V, D_shape) XX = empty((N, N, 2)) XX[:] = X[:, AX, :] XT = swapaxes(XX, 0, 1) D_inf = (D_up == inf) norm = (V * (~ D_inf)) N = norm.sum(axis=1) valid = N.nonzero()[0] norm[valid] /= (2 * N[(valid, AX)]) D_up[D_inf] = D_cur[D_inf] = 0.0 DX = (XX - XT) DX_norm = hypot(DX[(..., 0)], DX[(..., 1)]) valid = DX_norm.nonzero() DX[valid] /= DX_norm[valid][:, AX] return ((norm[(..., AX)] * (D_up - D_cur)[(..., AX)]) * DX).sum(axis=1)
5,209,787,987,385,210,000
Compute updated positions by averaging pairwise difference vectors for mutually visible pairs with equal bidirectional adjustments within each pair. The updated distance matrix does not need to be symmetric; it represents 'desired' updates based on recurrent learning. :D_up: R(N,N)-matrix of updated distances :D_cur: R(N,N)-matrix of current distances :X: R(N,2)-matrix of current positions :V: {0,1}(N,2)-matrix of current agent visibility :returns: R(N,2)-matrix of updated positions
neuroswarms/matrix.py
somatic_motion_update
jdmonaco/neuroswarms
python
def somatic_motion_update(D_up, D_cur, X, V): "\n Compute updated positions by averaging pairwise difference vectors for\n mutually visible pairs with equal bidirectional adjustments within each\n pair. The updated distance matrix does not need to be symmetric; it\n represents 'desired' updates based on recurrent learning.\n\n :D_up: R(N,N)-matrix of updated distances\n :D_cur: R(N,N)-matrix of current distances\n :X: R(N,2)-matrix of current positions\n :V: {0,1}(N,2)-matrix of current agent visibility\n :returns: R(N,2)-matrix of updated positions\n " N = len(X) D_shape = (N, N) if DEBUGGING: _check_ndim('X', X, 2) _check_shape('X', X, 2, axis=1) _check_shape('D_up', D_up, D_shape) _check_shape('D_cur', D_cur, D_shape) _check_shape('V', V, D_shape) XX = empty((N, N, 2)) XX[:] = X[:, AX, :] XT = swapaxes(XX, 0, 1) D_inf = (D_up == inf) norm = (V * (~ D_inf)) N = norm.sum(axis=1) valid = N.nonzero()[0] norm[valid] /= (2 * N[(valid, AX)]) D_up[D_inf] = D_cur[D_inf] = 0.0 DX = (XX - XT) DX_norm = hypot(DX[(..., 0)], DX[(..., 1)]) valid = DX_norm.nonzero() DX[valid] /= DX_norm[valid][:, AX] return ((norm[(..., AX)] * (D_up - D_cur)[(..., AX)]) * DX).sum(axis=1)
def reward_motion_update(D_up, D_cur, X, R, V): "\n Compute updated positions by averaging reward-based unit vectors for\n adjustments of the point only. The updated distance matrix represents\n 'desired' updates based on reward learning.\n\n :D_up: R(N,N_R)-matrix of updated distances between points and rewards\n :D_cur: R(N,N_R)-matrix of current distances between points and rewards\n :X: R(N,2)-matrix of current point positions\n :R: R(N_R,2)-matrix of current reward positions\n :V: {0,1}(N_R,2)-matrix of current agent-reward visibility\n :returns: R(N,2)-matrix of updated positions\n " N = len(X) N_R = len(R) D_shape = (N, N_R) if DEBUGGING: _check_ndim('X', X, 2) _check_ndim('R', R, 2) _check_shape('X', X, 2, axis=1) _check_shape('R', R, 2, axis=1) _check_shape('D_up', D_up, D_shape) _check_shape('D_cur', D_cur, D_shape) _check_shape('V', V, D_shape) XX = empty((N, N_R, 2)) XX[:] = X[:, AX, :] D_inf = (D_up == inf) norm = (V * (~ D_inf)) N = norm.sum(axis=1) valid = N.nonzero()[0] norm[valid] /= N[(valid, AX)] D_up[D_inf] = D_cur[D_inf] = 0.0 DR = (XX - R[AX]) DR_norm = hypot(DR[(..., 0)], DR[(..., 1)]) valid = DR_norm.nonzero() DR[valid] /= DR_norm[valid][:, AX] return ((norm[(..., AX)] * (D_up - D_cur)[(..., AX)]) * DR).sum(axis=1)
7,204,605,029,253,445,000
Compute updated positions by averaging reward-based unit vectors for adjustments of the point only. The updated distance matrix represents 'desired' updates based on reward learning. :D_up: R(N,N_R)-matrix of updated distances between points and rewards :D_cur: R(N,N_R)-matrix of current distances between points and rewards :X: R(N,2)-matrix of current point positions :R: R(N_R,2)-matrix of current reward positions :V: {0,1}(N_R,2)-matrix of current agent-reward visibility :returns: R(N,2)-matrix of updated positions
neuroswarms/matrix.py
reward_motion_update
jdmonaco/neuroswarms
python
def reward_motion_update(D_up, D_cur, X, R, V): "\n Compute updated positions by averaging reward-based unit vectors for\n adjustments of the point only. The updated distance matrix represents\n 'desired' updates based on reward learning.\n\n :D_up: R(N,N_R)-matrix of updated distances between points and rewards\n :D_cur: R(N,N_R)-matrix of current distances between points and rewards\n :X: R(N,2)-matrix of current point positions\n :R: R(N_R,2)-matrix of current reward positions\n :V: {0,1}(N_R,2)-matrix of current agent-reward visibility\n :returns: R(N,2)-matrix of updated positions\n " N = len(X) N_R = len(R) D_shape = (N, N_R) if DEBUGGING: _check_ndim('X', X, 2) _check_ndim('R', R, 2) _check_shape('X', X, 2, axis=1) _check_shape('R', R, 2, axis=1) _check_shape('D_up', D_up, D_shape) _check_shape('D_cur', D_cur, D_shape) _check_shape('V', V, D_shape) XX = empty((N, N_R, 2)) XX[:] = X[:, AX, :] D_inf = (D_up == inf) norm = (V * (~ D_inf)) N = norm.sum(axis=1) valid = N.nonzero()[0] norm[valid] /= N[(valid, AX)] D_up[D_inf] = D_cur[D_inf] = 0.0 DR = (XX - R[AX]) DR_norm = hypot(DR[(..., 0)], DR[(..., 1)]) valid = DR_norm.nonzero() DR[valid] /= DR_norm[valid][:, AX] return ((norm[(..., AX)] * (D_up - D_cur)[(..., AX)]) * DR).sum(axis=1)
def run_shortcut(name: str): 'Runs a shortcut on macOS' pass
-6,645,128,257,056,837,000
Runs a shortcut on macOS
code/platforms/mac/user.py
run_shortcut
palexjo/pokey_talon
python
def run_shortcut(name: str): pass
def cleaner(dummy, value, *_): 'Cleans out unsafe HTML tags.\n\n Uses bleach and unescape until it reaches a fix point.\n\n Args:\n dummy: unused, sqalchemy will pass in the model class\n value: html (string) to be cleaned\n Returns:\n Html (string) without unsafe tags.\n ' if (value is None): return value if (not isinstance(value, basestring)): return value value = unicode(value) buggy_strings = re.finditer(BUGGY_STRINGS_PATTERN, PARSER.unescape(value)) while True: lastvalue = value value = PARSER.unescape(CLEANER.clean(value)) if (value == lastvalue): break if buggy_strings: backup_value = value updated_buggy_strings = re.finditer(BUGGY_STRINGS_PATTERN, value) for match in updated_buggy_strings: try: old_value = buggy_strings.next().group() (start, finish) = match.span() value = ((value[:start] + old_value) + value[finish:]) except StopIteration: return backup_value return value
6,719,119,775,714,724,000
Cleans out unsafe HTML tags. Uses bleach and unescape until it reaches a fix point. Args: dummy: unused, sqalchemy will pass in the model class value: html (string) to be cleaned Returns: Html (string) without unsafe tags.
src/ggrc/utils/html_cleaner.py
cleaner
VRolich/ggrc-core
python
def cleaner(dummy, value, *_): 'Cleans out unsafe HTML tags.\n\n Uses bleach and unescape until it reaches a fix point.\n\n Args:\n dummy: unused, sqalchemy will pass in the model class\n value: html (string) to be cleaned\n Returns:\n Html (string) without unsafe tags.\n ' if (value is None): return value if (not isinstance(value, basestring)): return value value = unicode(value) buggy_strings = re.finditer(BUGGY_STRINGS_PATTERN, PARSER.unescape(value)) while True: lastvalue = value value = PARSER.unescape(CLEANER.clean(value)) if (value == lastvalue): break if buggy_strings: backup_value = value updated_buggy_strings = re.finditer(BUGGY_STRINGS_PATTERN, value) for match in updated_buggy_strings: try: old_value = buggy_strings.next().group() (start, finish) = match.span() value = ((value[:start] + old_value) + value[finish:]) except StopIteration: return backup_value return value
def predict(self, X): ' Predict the class index of the feature X ' Y_predict = self.clf.predict(self.pca.transform(X)) return Y_predict
4,818,342,988,168,666,000
Predict the class index of the feature X
utils/lib_classifier.py
predict
eddylamhw/trAIner24
python
def predict(self, X): ' ' Y_predict = self.clf.predict(self.pca.transform(X)) return Y_predict
def predict_and_evaluate(self, te_X, te_Y): ' Test model on test set and obtain accuracy ' te_Y_predict = self.predict(te_X) N = len(te_Y) n = sum((te_Y_predict == te_Y)) accu = (n / N) return (accu, te_Y_predict)
-3,017,998,082,432,039,000
Test model on test set and obtain accuracy
utils/lib_classifier.py
predict_and_evaluate
eddylamhw/trAIner24
python
def predict_and_evaluate(self, te_X, te_Y): ' ' te_Y_predict = self.predict(te_X) N = len(te_Y) n = sum((te_Y_predict == te_Y)) accu = (n / N) return (accu, te_Y_predict)
def train(self, X, Y): ' Train model. The result is saved into self.clf ' n_components = min(NUM_FEATURES_FROM_PCA, X.shape[1]) self.pca = PCA(n_components=n_components, whiten=True) self.pca.fit(X) print('Sum eig values:', np.sum(self.pca.explained_variance_ratio_)) X_new = self.pca.transform(X) print('After PCA, X.shape = ', X_new.shape) self.clf.fit(X_new, Y)
-6,929,529,958,043,339,000
Train model. The result is saved into self.clf
utils/lib_classifier.py
train
eddylamhw/trAIner24
python
def train(self, X, Y): ' ' n_components = min(NUM_FEATURES_FROM_PCA, X.shape[1]) self.pca = PCA(n_components=n_components, whiten=True) self.pca.fit(X) print('Sum eig values:', np.sum(self.pca.explained_variance_ratio_)) X_new = self.pca.transform(X) print('After PCA, X.shape = ', X_new.shape) self.clf.fit(X_new, Y)
def _predict_proba(self, X): ' Predict the probability of feature X belonging to each of the class Y[i] ' Y_probs = self.clf.predict_proba(self.pca.transform(X)) return Y_probs
-5,710,001,211,008,620,000
Predict the probability of feature X belonging to each of the class Y[i]
utils/lib_classifier.py
_predict_proba
eddylamhw/trAIner24
python
def _predict_proba(self, X): ' ' Y_probs = self.clf.predict_proba(self.pca.transform(X)) return Y_probs
def predict(self, skeleton): ' Predict the class (string) of the input raw skeleton ' LABEL_UNKNOWN = '' (is_features_good, features) = self.feature_generator.add_cur_skeleton(skeleton) if is_features_good: features = features.reshape((- 1), features.shape[0]) curr_scores = self.model._predict_proba(features)[0] self.scores = self.smooth_scores(curr_scores) if (self.scores.max() < self.THRESHOLD_SCORE_FOR_DISP): prediced_label = LABEL_UNKNOWN else: predicted_idx = self.scores.argmax() prediced_label = self.action_labels[predicted_idx] else: prediced_label = LABEL_UNKNOWN return prediced_label
-2,700,110,827,794,370,600
Predict the class (string) of the input raw skeleton
utils/lib_classifier.py
predict
eddylamhw/trAIner24
python
def predict(self, skeleton): ' ' LABEL_UNKNOWN = (is_features_good, features) = self.feature_generator.add_cur_skeleton(skeleton) if is_features_good: features = features.reshape((- 1), features.shape[0]) curr_scores = self.model._predict_proba(features)[0] self.scores = self.smooth_scores(curr_scores) if (self.scores.max() < self.THRESHOLD_SCORE_FOR_DISP): prediced_label = LABEL_UNKNOWN else: predicted_idx = self.scores.argmax() prediced_label = self.action_labels[predicted_idx] else: prediced_label = LABEL_UNKNOWN return prediced_label
def smooth_scores(self, curr_scores): ' Smooth the current prediction score\n by taking the average with previous scores\n ' self.scores_hist.append(curr_scores) DEQUE_MAX_SIZE = 2 if (len(self.scores_hist) > DEQUE_MAX_SIZE): self.scores_hist.popleft() if 1: score_sums = np.zeros((len(self.action_labels),)) for score in self.scores_hist: score_sums += score score_sums /= len(self.scores_hist) print('\nMean score:\n', score_sums) return score_sums else: score_mul = np.ones((len(self.action_labels),)) for score in self.scores_hist: score_mul *= score return score_mul
-7,176,214,101,721,385,000
Smooth the current prediction score by taking the average with previous scores
utils/lib_classifier.py
smooth_scores
eddylamhw/trAIner24
python
def smooth_scores(self, curr_scores): ' Smooth the current prediction score\n by taking the average with previous scores\n ' self.scores_hist.append(curr_scores) DEQUE_MAX_SIZE = 2 if (len(self.scores_hist) > DEQUE_MAX_SIZE): self.scores_hist.popleft() if 1: score_sums = np.zeros((len(self.action_labels),)) for score in self.scores_hist: score_sums += score score_sums /= len(self.scores_hist) print('\nMean score:\n', score_sums) return score_sums else: score_mul = np.ones((len(self.action_labels),)) for score in self.scores_hist: score_mul *= score return score_mul
def args(*types, **ktypes): 'Allow testing of input types:\n argkey=(types) or argkey=type' def decorator(func): def modified(*args, **kargs): position = 1 for (arg, T) in zip(args, types): if (not isinstance(arg, T)): raise TypeError(('Positional arg (%d) should be of type(s) %s, got %s' % (position, T, type(arg)))) position += 1 for (key, arg) in kargs.items(): if (key in ktypes): T = ktypes[key] if (not isinstance(arg, T)): raise TypeError(("Keyworded arg '%s' should be of type(s) %s, got %s" % (key, T, type(arg)))) return func(*args, **kargs) return modified return decorator
8,286,300,610,226,825,000
Allow testing of input types: argkey=(types) or argkey=type
WolfEyes/Utils/TypeChecker.py
args
TBIproject/WolfEye
python
def args(*types, **ktypes): 'Allow testing of input types:\n argkey=(types) or argkey=type' def decorator(func): def modified(*args, **kargs): position = 1 for (arg, T) in zip(args, types): if (not isinstance(arg, T)): raise TypeError(('Positional arg (%d) should be of type(s) %s, got %s' % (position, T, type(arg)))) position += 1 for (key, arg) in kargs.items(): if (key in ktypes): T = ktypes[key] if (not isinstance(arg, T)): raise TypeError(("Keyworded arg '%s' should be of type(s) %s, got %s" % (key, T, type(arg)))) return func(*args, **kargs) return modified return decorator
def update_sonic_environment(bootloader, binary_image_version): 'Prepare sonic environment variable using incoming image template file. If incoming image template does not exist\n use current image template file.\n ' SONIC_ENV_TEMPLATE_FILE = os.path.join('usr', 'share', 'sonic', 'templates', 'sonic-environment.j2') SONIC_VERSION_YML_FILE = os.path.join('etc', 'sonic', 'sonic_version.yml') sonic_version = re.sub(IMAGE_PREFIX, '', binary_image_version) new_image_dir = bootloader.get_image_path(binary_image_version) new_image_mount = os.path.join('/', 'tmp', 'image-{0}-fs'.format(sonic_version)) env_dir = os.path.join(new_image_dir, 'sonic-config') env_file = os.path.join(env_dir, 'sonic-environment') with bootloader.get_rootfs_path(new_image_dir) as new_image_squashfs_path: try: mount_squash_fs(new_image_squashfs_path, new_image_mount) next_sonic_env_template_file = os.path.join(new_image_mount, SONIC_ENV_TEMPLATE_FILE) next_sonic_version_yml_file = os.path.join(new_image_mount, SONIC_VERSION_YML_FILE) sonic_env = run_command_or_raise(['sonic-cfggen', '-d', '-y', next_sonic_version_yml_file, '-t', next_sonic_env_template_file]) os.mkdir(env_dir, 493) with open(env_file, 'w+') as ef: print(sonic_env, file=ef) os.chmod(env_file, 420) except SonicRuntimeException as ex: echo_and_log('Warning: SONiC environment variables are not supported for this image: {0}'.format(str(ex)), LOG_ERR, fg='red') if os.path.exists(env_file): os.remove(env_file) os.rmdir(env_dir) finally: umount(new_image_mount)
-8,889,302,718,236,318,000
Prepare sonic environment variable using incoming image template file. If incoming image template does not exist use current image template file.
sonic_installer/main.py
update_sonic_environment
Cosmin-Jinga-MS/sonic-utilities
python
def update_sonic_environment(bootloader, binary_image_version): 'Prepare sonic environment variable using incoming image template file. If incoming image template does not exist\n use current image template file.\n ' SONIC_ENV_TEMPLATE_FILE = os.path.join('usr', 'share', 'sonic', 'templates', 'sonic-environment.j2') SONIC_VERSION_YML_FILE = os.path.join('etc', 'sonic', 'sonic_version.yml') sonic_version = re.sub(IMAGE_PREFIX, , binary_image_version) new_image_dir = bootloader.get_image_path(binary_image_version) new_image_mount = os.path.join('/', 'tmp', 'image-{0}-fs'.format(sonic_version)) env_dir = os.path.join(new_image_dir, 'sonic-config') env_file = os.path.join(env_dir, 'sonic-environment') with bootloader.get_rootfs_path(new_image_dir) as new_image_squashfs_path: try: mount_squash_fs(new_image_squashfs_path, new_image_mount) next_sonic_env_template_file = os.path.join(new_image_mount, SONIC_ENV_TEMPLATE_FILE) next_sonic_version_yml_file = os.path.join(new_image_mount, SONIC_VERSION_YML_FILE) sonic_env = run_command_or_raise(['sonic-cfggen', '-d', '-y', next_sonic_version_yml_file, '-t', next_sonic_env_template_file]) os.mkdir(env_dir, 493) with open(env_file, 'w+') as ef: print(sonic_env, file=ef) os.chmod(env_file, 420) except SonicRuntimeException as ex: echo_and_log('Warning: SONiC environment variables are not supported for this image: {0}'.format(str(ex)), LOG_ERR, fg='red') if os.path.exists(env_file): os.remove(env_file) os.rmdir(env_dir) finally: umount(new_image_mount)
def migrate_sonic_packages(bootloader, binary_image_version): ' Migrate SONiC packages to new SONiC image. ' SONIC_PACKAGE_MANAGER = 'sonic-package-manager' PACKAGE_MANAGER_DIR = '/var/lib/sonic-package-manager/' DOCKER_CTL_SCRIPT = '/usr/lib/docker/docker.sh' DOCKERD_SOCK = 'docker.sock' VAR_RUN_PATH = '/var/run/' tmp_dir = 'tmp' packages_file = 'packages.json' packages_path = os.path.join(PACKAGE_MANAGER_DIR, packages_file) sonic_version = re.sub(IMAGE_PREFIX, '', binary_image_version) new_image_dir = bootloader.get_image_path(binary_image_version) new_image_upper_dir = os.path.join(new_image_dir, UPPERDIR_NAME) new_image_work_dir = os.path.join(new_image_dir, WORKDIR_NAME) new_image_docker_dir = os.path.join(new_image_dir, DOCKERDIR_NAME) new_image_mount = os.path.join('/', tmp_dir, 'image-{0}-fs'.format(sonic_version)) new_image_docker_mount = os.path.join(new_image_mount, 'var', 'lib', 'docker') if (not os.path.isdir(new_image_docker_dir)): echo_and_log('Error: SONiC package migration cannot proceed due to missing docker folder', LOG_ERR, fg='red') return docker_started = False with bootloader.get_rootfs_path(new_image_dir) as new_image_squashfs_path: try: mount_squash_fs(new_image_squashfs_path, new_image_mount) run_command_or_raise(['mkdir', '-p', new_image_upper_dir]) run_command_or_raise(['mkdir', '-p', new_image_work_dir]) mount_overlay_fs(new_image_mount, new_image_upper_dir, new_image_work_dir, new_image_mount) mount_bind(new_image_docker_dir, new_image_docker_mount) mount_procfs_chroot(new_image_mount) mount_sysfs_chroot(new_image_mount) if (not os.path.exists(os.path.join(new_image_mount, os.path.relpath(DOCKER_CTL_SCRIPT, os.path.abspath(os.sep))))): echo_and_log('Warning: SONiC Application Extension is not supported in this image', LOG_WARN, fg='yellow') return run_command_or_raise(['chroot', new_image_mount, DOCKER_CTL_SCRIPT, 'start']) docker_started = True run_command_or_raise(['cp', packages_path, os.path.join(new_image_mount, tmp_dir, packages_file)]) run_command_or_raise(['touch', os.path.join(new_image_mount, 'tmp', DOCKERD_SOCK)]) run_command_or_raise(['mount', '--bind', os.path.join(VAR_RUN_PATH, DOCKERD_SOCK), os.path.join(new_image_mount, 'tmp', DOCKERD_SOCK)]) run_command_or_raise(['chroot', new_image_mount, 'sh', '-c', 'command -v {}'.format(SONIC_PACKAGE_MANAGER)]) run_command_or_raise(['chroot', new_image_mount, SONIC_PACKAGE_MANAGER, 'migrate', os.path.join('/', tmp_dir, packages_file), '--dockerd-socket', os.path.join('/', tmp_dir, DOCKERD_SOCK), '-y']) finally: if docker_started: run_command_or_raise(['chroot', new_image_mount, DOCKER_CTL_SCRIPT, 'stop'], raise_exception=False) umount(new_image_mount, recursive=True, read_only=False, remove_dir=False, raise_exception=False) umount(new_image_mount, raise_exception=False)
9,047,976,012,657,931,000
Migrate SONiC packages to new SONiC image.
sonic_installer/main.py
migrate_sonic_packages
Cosmin-Jinga-MS/sonic-utilities
python
def migrate_sonic_packages(bootloader, binary_image_version): ' ' SONIC_PACKAGE_MANAGER = 'sonic-package-manager' PACKAGE_MANAGER_DIR = '/var/lib/sonic-package-manager/' DOCKER_CTL_SCRIPT = '/usr/lib/docker/docker.sh' DOCKERD_SOCK = 'docker.sock' VAR_RUN_PATH = '/var/run/' tmp_dir = 'tmp' packages_file = 'packages.json' packages_path = os.path.join(PACKAGE_MANAGER_DIR, packages_file) sonic_version = re.sub(IMAGE_PREFIX, , binary_image_version) new_image_dir = bootloader.get_image_path(binary_image_version) new_image_upper_dir = os.path.join(new_image_dir, UPPERDIR_NAME) new_image_work_dir = os.path.join(new_image_dir, WORKDIR_NAME) new_image_docker_dir = os.path.join(new_image_dir, DOCKERDIR_NAME) new_image_mount = os.path.join('/', tmp_dir, 'image-{0}-fs'.format(sonic_version)) new_image_docker_mount = os.path.join(new_image_mount, 'var', 'lib', 'docker') if (not os.path.isdir(new_image_docker_dir)): echo_and_log('Error: SONiC package migration cannot proceed due to missing docker folder', LOG_ERR, fg='red') return docker_started = False with bootloader.get_rootfs_path(new_image_dir) as new_image_squashfs_path: try: mount_squash_fs(new_image_squashfs_path, new_image_mount) run_command_or_raise(['mkdir', '-p', new_image_upper_dir]) run_command_or_raise(['mkdir', '-p', new_image_work_dir]) mount_overlay_fs(new_image_mount, new_image_upper_dir, new_image_work_dir, new_image_mount) mount_bind(new_image_docker_dir, new_image_docker_mount) mount_procfs_chroot(new_image_mount) mount_sysfs_chroot(new_image_mount) if (not os.path.exists(os.path.join(new_image_mount, os.path.relpath(DOCKER_CTL_SCRIPT, os.path.abspath(os.sep))))): echo_and_log('Warning: SONiC Application Extension is not supported in this image', LOG_WARN, fg='yellow') return run_command_or_raise(['chroot', new_image_mount, DOCKER_CTL_SCRIPT, 'start']) docker_started = True run_command_or_raise(['cp', packages_path, os.path.join(new_image_mount, tmp_dir, packages_file)]) run_command_or_raise(['touch', os.path.join(new_image_mount, 'tmp', DOCKERD_SOCK)]) run_command_or_raise(['mount', '--bind', os.path.join(VAR_RUN_PATH, DOCKERD_SOCK), os.path.join(new_image_mount, 'tmp', DOCKERD_SOCK)]) run_command_or_raise(['chroot', new_image_mount, 'sh', '-c', 'command -v {}'.format(SONIC_PACKAGE_MANAGER)]) run_command_or_raise(['chroot', new_image_mount, SONIC_PACKAGE_MANAGER, 'migrate', os.path.join('/', tmp_dir, packages_file), '--dockerd-socket', os.path.join('/', tmp_dir, DOCKERD_SOCK), '-y']) finally: if docker_started: run_command_or_raise(['chroot', new_image_mount, DOCKER_CTL_SCRIPT, 'stop'], raise_exception=False) umount(new_image_mount, recursive=True, read_only=False, remove_dir=False, raise_exception=False) umount(new_image_mount, raise_exception=False)
def validate_positive_int(ctx, param, value): 'Callback to validate param passed is a positive integer.' if (isinstance(value, int) and (value > 0)): return value raise click.BadParameter('Must be a positive integer')
-7,279,381,408,099,939,000
Callback to validate param passed is a positive integer.
sonic_installer/main.py
validate_positive_int
Cosmin-Jinga-MS/sonic-utilities
python
def validate_positive_int(ctx, param, value): if (isinstance(value, int) and (value > 0)): return value raise click.BadParameter('Must be a positive integer')
@click.group(cls=AliasedGroup) def sonic_installer(): ' SONiC image installation manager ' if (os.geteuid() != 0): exit('Root privileges required for this operation') if (os.path.basename(sys.argv[0]) == 'sonic_installer'): print_deprecation_warning('sonic_installer', 'sonic-installer')
-2,693,594,652,722,779,600
SONiC image installation manager
sonic_installer/main.py
sonic_installer
Cosmin-Jinga-MS/sonic-utilities
python
@click.group(cls=AliasedGroup) def sonic_installer(): ' ' if (os.geteuid() != 0): exit('Root privileges required for this operation') if (os.path.basename(sys.argv[0]) == 'sonic_installer'): print_deprecation_warning('sonic_installer', 'sonic-installer')
@sonic_installer.command('install') @click.option('-y', '--yes', is_flag=True, callback=abort_if_false, expose_value=False, prompt='New image will be installed, continue?') @click.option('-f', '--force', is_flag=True, help='Force installation of an image of a type which differs from that of the current running image') @click.option('--skip_migration', is_flag=True, help='Do not migrate current configuration to the newly installed image') @click.option('--skip-package-migration', is_flag=True, help='Do not migrate current packages to the newly installed image') @click.option('--skip-setup-swap', is_flag=True, help='Skip setup temporary SWAP memory used for installation') @click.option('--swap-mem-size', default=1024, type=int, show_default='1024 MiB', help='SWAP memory space size', callback=validate_positive_int, cls=clicommon.MutuallyExclusiveOption, mutually_exclusive=['skip_setup_swap']) @click.option('--total-mem-threshold', default=2048, type=int, show_default='2048 MiB', help='If system total memory is lower than threshold, setup SWAP memory', cls=clicommon.MutuallyExclusiveOption, mutually_exclusive=['skip_setup_swap'], callback=validate_positive_int) @click.option('--available-mem-threshold', default=1200, type=int, show_default='1200 MiB', help='If system available memory is lower than threhold, setup SWAP memory', cls=clicommon.MutuallyExclusiveOption, mutually_exclusive=['skip_setup_swap'], callback=validate_positive_int) @click.argument('url') def install(url, force, skip_migration=False, skip_package_migration=False, skip_setup_swap=False, swap_mem_size=None, total_mem_threshold=None, available_mem_threshold=None): ' Install image from local binary or URL' bootloader = get_bootloader() if (url.startswith('http://') or url.startswith('https://')): echo_and_log('Downloading image...') validate_url_or_abort(url) try: urlretrieve(url, bootloader.DEFAULT_IMAGE_PATH, reporthook) click.echo('') except Exception as e: echo_and_log('Download error', e) raise click.Abort() image_path = bootloader.DEFAULT_IMAGE_PATH else: image_path = os.path.join('./', url) binary_image_version = bootloader.get_binary_image_version(image_path) if (not binary_image_version): echo_and_log('Image file does not exist or is not a valid SONiC image file', LOG_ERR) raise click.Abort() if (binary_image_version in bootloader.get_installed_images()): echo_and_log('Image {} is already installed. Setting it as default...'.format(binary_image_version)) if (not bootloader.set_default_image(binary_image_version)): echo_and_log('Error: Failed to set image as default', LOG_ERR) raise click.Abort() else: if ((not bootloader.verify_binary_image(image_path)) and (not force)): echo_and_log((("Image file '{}' is of a different type than running image.\n".format(url) + 'If you are sure you want to install this image, use -f|--force.\n') + 'Aborting...'), LOG_ERR) raise click.Abort() echo_and_log('Installing image {} and setting it as default...'.format(binary_image_version)) with SWAPAllocator((not skip_setup_swap), swap_mem_size, total_mem_threshold, available_mem_threshold): bootloader.install_image(image_path) if skip_migration: echo_and_log('Skipping configuration migration as requested in the command option.') else: run_command('config-setup backup') update_sonic_environment(bootloader, binary_image_version) if ((not bootloader.supports_package_migration(binary_image_version)) and (not skip_package_migration)): echo_and_log('Warning: SONiC package migration is not supported for this bootloader/image', fg='yellow') skip_package_migration = True if (not skip_package_migration): migrate_sonic_packages(bootloader, binary_image_version) run_command('sync;sync;sync') run_command('sleep 3') echo_and_log('Done')
-8,643,368,970,254,067,000
Install image from local binary or URL
sonic_installer/main.py
install
Cosmin-Jinga-MS/sonic-utilities
python
@sonic_installer.command('install') @click.option('-y', '--yes', is_flag=True, callback=abort_if_false, expose_value=False, prompt='New image will be installed, continue?') @click.option('-f', '--force', is_flag=True, help='Force installation of an image of a type which differs from that of the current running image') @click.option('--skip_migration', is_flag=True, help='Do not migrate current configuration to the newly installed image') @click.option('--skip-package-migration', is_flag=True, help='Do not migrate current packages to the newly installed image') @click.option('--skip-setup-swap', is_flag=True, help='Skip setup temporary SWAP memory used for installation') @click.option('--swap-mem-size', default=1024, type=int, show_default='1024 MiB', help='SWAP memory space size', callback=validate_positive_int, cls=clicommon.MutuallyExclusiveOption, mutually_exclusive=['skip_setup_swap']) @click.option('--total-mem-threshold', default=2048, type=int, show_default='2048 MiB', help='If system total memory is lower than threshold, setup SWAP memory', cls=clicommon.MutuallyExclusiveOption, mutually_exclusive=['skip_setup_swap'], callback=validate_positive_int) @click.option('--available-mem-threshold', default=1200, type=int, show_default='1200 MiB', help='If system available memory is lower than threhold, setup SWAP memory', cls=clicommon.MutuallyExclusiveOption, mutually_exclusive=['skip_setup_swap'], callback=validate_positive_int) @click.argument('url') def install(url, force, skip_migration=False, skip_package_migration=False, skip_setup_swap=False, swap_mem_size=None, total_mem_threshold=None, available_mem_threshold=None): ' ' bootloader = get_bootloader() if (url.startswith('http://') or url.startswith('https://')): echo_and_log('Downloading image...') validate_url_or_abort(url) try: urlretrieve(url, bootloader.DEFAULT_IMAGE_PATH, reporthook) click.echo() except Exception as e: echo_and_log('Download error', e) raise click.Abort() image_path = bootloader.DEFAULT_IMAGE_PATH else: image_path = os.path.join('./', url) binary_image_version = bootloader.get_binary_image_version(image_path) if (not binary_image_version): echo_and_log('Image file does not exist or is not a valid SONiC image file', LOG_ERR) raise click.Abort() if (binary_image_version in bootloader.get_installed_images()): echo_and_log('Image {} is already installed. Setting it as default...'.format(binary_image_version)) if (not bootloader.set_default_image(binary_image_version)): echo_and_log('Error: Failed to set image as default', LOG_ERR) raise click.Abort() else: if ((not bootloader.verify_binary_image(image_path)) and (not force)): echo_and_log((("Image file '{}' is of a different type than running image.\n".format(url) + 'If you are sure you want to install this image, use -f|--force.\n') + 'Aborting...'), LOG_ERR) raise click.Abort() echo_and_log('Installing image {} and setting it as default...'.format(binary_image_version)) with SWAPAllocator((not skip_setup_swap), swap_mem_size, total_mem_threshold, available_mem_threshold): bootloader.install_image(image_path) if skip_migration: echo_and_log('Skipping configuration migration as requested in the command option.') else: run_command('config-setup backup') update_sonic_environment(bootloader, binary_image_version) if ((not bootloader.supports_package_migration(binary_image_version)) and (not skip_package_migration)): echo_and_log('Warning: SONiC package migration is not supported for this bootloader/image', fg='yellow') skip_package_migration = True if (not skip_package_migration): migrate_sonic_packages(bootloader, binary_image_version) run_command('sync;sync;sync') run_command('sleep 3') echo_and_log('Done')
@sonic_installer.command('list') def list_command(): ' Print installed images ' bootloader = get_bootloader() images = bootloader.get_installed_images() curimage = bootloader.get_current_image() nextimage = bootloader.get_next_image() click.echo(('Current: ' + curimage)) click.echo(('Next: ' + nextimage)) click.echo('Available: ') for image in images: click.echo(image)
2,618,359,676,817,890,300
Print installed images
sonic_installer/main.py
list_command
Cosmin-Jinga-MS/sonic-utilities
python
@sonic_installer.command('list') def list_command(): ' ' bootloader = get_bootloader() images = bootloader.get_installed_images() curimage = bootloader.get_current_image() nextimage = bootloader.get_next_image() click.echo(('Current: ' + curimage)) click.echo(('Next: ' + nextimage)) click.echo('Available: ') for image in images: click.echo(image)
@sonic_installer.command('set-default') @click.argument('image') def set_default(image): ' Choose image to boot from by default ' if ('set_default' in sys.argv): print_deprecation_warning('set_default', 'set-default') bootloader = get_bootloader() if (image not in bootloader.get_installed_images()): echo_and_log('Error: Image does not exist', LOG_ERR) raise click.Abort() bootloader.set_default_image(image)
-7,517,770,441,170,818,000
Choose image to boot from by default
sonic_installer/main.py
set_default
Cosmin-Jinga-MS/sonic-utilities
python
@sonic_installer.command('set-default') @click.argument('image') def set_default(image): ' ' if ('set_default' in sys.argv): print_deprecation_warning('set_default', 'set-default') bootloader = get_bootloader() if (image not in bootloader.get_installed_images()): echo_and_log('Error: Image does not exist', LOG_ERR) raise click.Abort() bootloader.set_default_image(image)
@sonic_installer.command('set-next-boot') @click.argument('image') def set_next_boot(image): ' Choose image for next reboot (one time action) ' if ('set_next_boot' in sys.argv): print_deprecation_warning('set_next_boot', 'set-next-boot') bootloader = get_bootloader() if (image not in bootloader.get_installed_images()): echo_and_log('Error: Image does not exist', LOG_ERR) sys.exit(1) bootloader.set_next_image(image)
-6,706,689,704,284,489,000
Choose image for next reboot (one time action)
sonic_installer/main.py
set_next_boot
Cosmin-Jinga-MS/sonic-utilities
python
@sonic_installer.command('set-next-boot') @click.argument('image') def set_next_boot(image): ' ' if ('set_next_boot' in sys.argv): print_deprecation_warning('set_next_boot', 'set-next-boot') bootloader = get_bootloader() if (image not in bootloader.get_installed_images()): echo_and_log('Error: Image does not exist', LOG_ERR) sys.exit(1) bootloader.set_next_image(image)
@sonic_installer.command('remove') @click.option('-y', '--yes', is_flag=True, callback=abort_if_false, expose_value=False, prompt='Image will be removed, continue?') @click.argument('image') def remove(image): ' Uninstall image ' bootloader = get_bootloader() images = bootloader.get_installed_images() current = bootloader.get_current_image() if (image not in images): echo_and_log('Image does not exist', LOG_ERR) sys.exit(1) if (image == current): echo_and_log('Cannot remove current image', LOG_ERR) sys.exit(1) bootloader.remove_image(image)
-1,843,418,837,334,930,400
Uninstall image
sonic_installer/main.py
remove
Cosmin-Jinga-MS/sonic-utilities
python
@sonic_installer.command('remove') @click.option('-y', '--yes', is_flag=True, callback=abort_if_false, expose_value=False, prompt='Image will be removed, continue?') @click.argument('image') def remove(image): ' ' bootloader = get_bootloader() images = bootloader.get_installed_images() current = bootloader.get_current_image() if (image not in images): echo_and_log('Image does not exist', LOG_ERR) sys.exit(1) if (image == current): echo_and_log('Cannot remove current image', LOG_ERR) sys.exit(1) bootloader.remove_image(image)
@sonic_installer.command('binary-version') @click.argument('binary_image_path') def binary_version(binary_image_path): ' Get version from local binary image file ' if ('binary_version' in sys.argv): print_deprecation_warning('binary_version', 'binary-version') bootloader = get_bootloader() version = bootloader.get_binary_image_version(binary_image_path) if (not version): click.echo('Image file does not exist or is not a valid SONiC image file') sys.exit(1) else: click.echo(version)
3,922,998,441,759,064,600
Get version from local binary image file
sonic_installer/main.py
binary_version
Cosmin-Jinga-MS/sonic-utilities
python
@sonic_installer.command('binary-version') @click.argument('binary_image_path') def binary_version(binary_image_path): ' ' if ('binary_version' in sys.argv): print_deprecation_warning('binary_version', 'binary-version') bootloader = get_bootloader() version = bootloader.get_binary_image_version(binary_image_path) if (not version): click.echo('Image file does not exist or is not a valid SONiC image file') sys.exit(1) else: click.echo(version)
@sonic_installer.command('cleanup') @click.option('-y', '--yes', is_flag=True, callback=abort_if_false, expose_value=False, prompt='Remove images which are not current and next, continue?') def cleanup(): ' Remove installed images which are not current and next ' bootloader = get_bootloader() images = bootloader.get_installed_images() curimage = bootloader.get_current_image() nextimage = bootloader.get_next_image() image_removed = 0 for image in images: if ((image != curimage) and (image != nextimage)): echo_and_log(('Removing image %s' % image)) bootloader.remove_image(image) image_removed += 1 if (image_removed == 0): echo_and_log('No image(s) to remove')
-158,115,806,792,518,000
Remove installed images which are not current and next
sonic_installer/main.py
cleanup
Cosmin-Jinga-MS/sonic-utilities
python
@sonic_installer.command('cleanup') @click.option('-y', '--yes', is_flag=True, callback=abort_if_false, expose_value=False, prompt='Remove images which are not current and next, continue?') def cleanup(): ' ' bootloader = get_bootloader() images = bootloader.get_installed_images() curimage = bootloader.get_current_image() nextimage = bootloader.get_next_image() image_removed = 0 for image in images: if ((image != curimage) and (image != nextimage)): echo_and_log(('Removing image %s' % image)) bootloader.remove_image(image) image_removed += 1 if (image_removed == 0): echo_and_log('No image(s) to remove')
@sonic_installer.command('upgrade-docker') @click.option('-y', '--yes', is_flag=True, callback=abort_if_false, expose_value=False, prompt='New docker image will be installed, continue?') @click.option('--cleanup_image', is_flag=True, help='Clean up old docker image') @click.option('--skip_check', is_flag=True, help='Skip task check for docker upgrade') @click.option('--tag', type=str, help='Tag for the new docker image') @click.option('--warm', is_flag=True, help='Perform warm upgrade') @click.argument('container_name', metavar='<container_name>', required=True, type=click.Choice(DOCKER_CONTAINER_LIST)) @click.argument('url') def upgrade_docker(container_name, url, cleanup_image, skip_check, tag, warm): ' Upgrade docker image from local binary or URL' if ('upgrade_docker' in sys.argv): print_deprecation_warning('upgrade_docker', 'upgrade-docker') image_name = get_container_image_name(container_name) image_latest = (image_name + ':latest') image_id_previous = get_container_image_id(image_latest) DEFAULT_IMAGE_PATH = os.path.join('/tmp/', image_name) if (url.startswith('http://') or url.startswith('https://')): echo_and_log('Downloading image...') validate_url_or_abort(url) try: urlretrieve(url, DEFAULT_IMAGE_PATH, reporthook) except Exception as e: echo_and_log('Download error: {}'.format(e), LOG_ERR) raise click.Abort() image_path = DEFAULT_IMAGE_PATH else: image_path = os.path.join('./', url) if (not os.path.isfile(image_path)): echo_and_log("Image file '{}' does not exist or is not a regular file. Aborting...".format(image_path), LOG_ERR) raise click.Abort() warm_configured = False state_db = SonicV2Connector(host='127.0.0.1') state_db.connect(state_db.STATE_DB, False) TABLE_NAME_SEPARATOR = '|' prefix = ('WARM_RESTART_ENABLE_TABLE' + TABLE_NAME_SEPARATOR) _hash = '{}{}'.format(prefix, container_name) if (state_db.get(state_db.STATE_DB, _hash, 'enable') == 'true'): warm_configured = True state_db.close(state_db.STATE_DB) if ((container_name == 'swss') or (container_name == 'bgp') or (container_name == 'teamd')): if ((warm_configured is False) and warm): run_command(('config warm_restart enable %s' % container_name)) tag_previous = get_docker_tag_name(image_latest) run_command(('docker load < %s' % image_path)) warm_app_names = [] if ((warm_configured is True) or warm): if (container_name == 'swss'): skipPendingTaskCheck = '' if skip_check: skipPendingTaskCheck = ' -s' cmd = ('docker exec -i swss orchagent_restart_check -w 2000 -r 5 ' + skipPendingTaskCheck) proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True) (out, err) = proc.communicate() if (proc.returncode != 0): if (not skip_check): echo_and_log('Orchagent is not in clean state, RESTARTCHECK failed', LOG_ERR) if ((warm_configured is False) and warm): run_command(('config warm_restart disable %s' % container_name)) image_id_latest = get_container_image_id(image_latest) run_command(('docker rmi -f %s' % image_id_latest)) run_command(('docker tag %s:%s %s' % (image_name, tag_previous, image_latest))) sys.exit(proc.returncode) else: echo_and_log('Orchagent is not in clean state, upgrading it anyway') else: echo_and_log('Orchagent is in clean state and frozen for warm upgrade') warm_app_names = ['orchagent', 'neighsyncd'] elif (container_name == 'bgp'): echo_and_log('Stopping bgp ...') run_command('docker exec -i bgp pkill -9 zebra') run_command('docker exec -i bgp pkill -9 bgpd') warm_app_names = ['bgp'] echo_and_log('Stopped bgp ...') elif (container_name == 'teamd'): echo_and_log('Stopping teamd ...') run_command('docker exec -i teamd pkill -USR1 teamd > /dev/null') warm_app_names = ['teamsyncd'] echo_and_log('Stopped teamd ...') for warm_app_name in warm_app_names: hdel_warm_restart_table('STATE_DB', 'WARM_RESTART_TABLE', warm_app_name, 'state') run_command(('docker kill %s > /dev/null' % container_name)) run_command(('docker rm %s ' % container_name)) if (tag is None): tag = get_docker_tag_name(image_latest) run_command(('docker tag %s:latest %s:%s' % (image_name, image_name, tag))) run_command(('systemctl restart %s' % container_name)) image_id_all = get_container_image_id_all(image_name) image_id_latest = get_container_image_id(image_latest) for id in image_id_all: if (id != image_id_latest): if ((not cleanup_image) and (id == image_id_previous)): continue run_command(('docker rmi -f %s' % id)) exp_state = 'reconciled' state = '' if ((warm_configured is True) or warm): count = 0 for warm_app_name in warm_app_names: state = '' while ((state != exp_state) and (count < 90)): sys.stdout.write('\r {}: '.format(warm_app_name)) sys.stdout.write(('[%-s' % ('=' * count))) sys.stdout.flush() count += 1 time.sleep(2) state = hget_warm_restart_table('STATE_DB', 'WARM_RESTART_TABLE', warm_app_name, 'state') log.log_notice(('%s reached %s state' % (warm_app_name, state))) sys.stdout.write(']\n\r') if (state != exp_state): echo_and_log(('%s failed to reach %s state' % (warm_app_name, exp_state)), LOG_ERR) else: exp_state = '' if ((warm_configured is False) and warm): if ((container_name == 'swss') or (container_name == 'bgp') or (container_name == 'teamd')): run_command(('config warm_restart disable %s' % container_name)) if (state == exp_state): echo_and_log('Done') else: echo_and_log('Failed', LOG_ERR) sys.exit(1)
-6,863,555,456,993,927,000
Upgrade docker image from local binary or URL
sonic_installer/main.py
upgrade_docker
Cosmin-Jinga-MS/sonic-utilities
python
@sonic_installer.command('upgrade-docker') @click.option('-y', '--yes', is_flag=True, callback=abort_if_false, expose_value=False, prompt='New docker image will be installed, continue?') @click.option('--cleanup_image', is_flag=True, help='Clean up old docker image') @click.option('--skip_check', is_flag=True, help='Skip task check for docker upgrade') @click.option('--tag', type=str, help='Tag for the new docker image') @click.option('--warm', is_flag=True, help='Perform warm upgrade') @click.argument('container_name', metavar='<container_name>', required=True, type=click.Choice(DOCKER_CONTAINER_LIST)) @click.argument('url') def upgrade_docker(container_name, url, cleanup_image, skip_check, tag, warm): ' ' if ('upgrade_docker' in sys.argv): print_deprecation_warning('upgrade_docker', 'upgrade-docker') image_name = get_container_image_name(container_name) image_latest = (image_name + ':latest') image_id_previous = get_container_image_id(image_latest) DEFAULT_IMAGE_PATH = os.path.join('/tmp/', image_name) if (url.startswith('http://') or url.startswith('https://')): echo_and_log('Downloading image...') validate_url_or_abort(url) try: urlretrieve(url, DEFAULT_IMAGE_PATH, reporthook) except Exception as e: echo_and_log('Download error: {}'.format(e), LOG_ERR) raise click.Abort() image_path = DEFAULT_IMAGE_PATH else: image_path = os.path.join('./', url) if (not os.path.isfile(image_path)): echo_and_log("Image file '{}' does not exist or is not a regular file. Aborting...".format(image_path), LOG_ERR) raise click.Abort() warm_configured = False state_db = SonicV2Connector(host='127.0.0.1') state_db.connect(state_db.STATE_DB, False) TABLE_NAME_SEPARATOR = '|' prefix = ('WARM_RESTART_ENABLE_TABLE' + TABLE_NAME_SEPARATOR) _hash = '{}{}'.format(prefix, container_name) if (state_db.get(state_db.STATE_DB, _hash, 'enable') == 'true'): warm_configured = True state_db.close(state_db.STATE_DB) if ((container_name == 'swss') or (container_name == 'bgp') or (container_name == 'teamd')): if ((warm_configured is False) and warm): run_command(('config warm_restart enable %s' % container_name)) tag_previous = get_docker_tag_name(image_latest) run_command(('docker load < %s' % image_path)) warm_app_names = [] if ((warm_configured is True) or warm): if (container_name == 'swss'): skipPendingTaskCheck = if skip_check: skipPendingTaskCheck = ' -s' cmd = ('docker exec -i swss orchagent_restart_check -w 2000 -r 5 ' + skipPendingTaskCheck) proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True) (out, err) = proc.communicate() if (proc.returncode != 0): if (not skip_check): echo_and_log('Orchagent is not in clean state, RESTARTCHECK failed', LOG_ERR) if ((warm_configured is False) and warm): run_command(('config warm_restart disable %s' % container_name)) image_id_latest = get_container_image_id(image_latest) run_command(('docker rmi -f %s' % image_id_latest)) run_command(('docker tag %s:%s %s' % (image_name, tag_previous, image_latest))) sys.exit(proc.returncode) else: echo_and_log('Orchagent is not in clean state, upgrading it anyway') else: echo_and_log('Orchagent is in clean state and frozen for warm upgrade') warm_app_names = ['orchagent', 'neighsyncd'] elif (container_name == 'bgp'): echo_and_log('Stopping bgp ...') run_command('docker exec -i bgp pkill -9 zebra') run_command('docker exec -i bgp pkill -9 bgpd') warm_app_names = ['bgp'] echo_and_log('Stopped bgp ...') elif (container_name == 'teamd'): echo_and_log('Stopping teamd ...') run_command('docker exec -i teamd pkill -USR1 teamd > /dev/null') warm_app_names = ['teamsyncd'] echo_and_log('Stopped teamd ...') for warm_app_name in warm_app_names: hdel_warm_restart_table('STATE_DB', 'WARM_RESTART_TABLE', warm_app_name, 'state') run_command(('docker kill %s > /dev/null' % container_name)) run_command(('docker rm %s ' % container_name)) if (tag is None): tag = get_docker_tag_name(image_latest) run_command(('docker tag %s:latest %s:%s' % (image_name, image_name, tag))) run_command(('systemctl restart %s' % container_name)) image_id_all = get_container_image_id_all(image_name) image_id_latest = get_container_image_id(image_latest) for id in image_id_all: if (id != image_id_latest): if ((not cleanup_image) and (id == image_id_previous)): continue run_command(('docker rmi -f %s' % id)) exp_state = 'reconciled' state = if ((warm_configured is True) or warm): count = 0 for warm_app_name in warm_app_names: state = while ((state != exp_state) and (count < 90)): sys.stdout.write('\r {}: '.format(warm_app_name)) sys.stdout.write(('[%-s' % ('=' * count))) sys.stdout.flush() count += 1 time.sleep(2) state = hget_warm_restart_table('STATE_DB', 'WARM_RESTART_TABLE', warm_app_name, 'state') log.log_notice(('%s reached %s state' % (warm_app_name, state))) sys.stdout.write(']\n\r') if (state != exp_state): echo_and_log(('%s failed to reach %s state' % (warm_app_name, exp_state)), LOG_ERR) else: exp_state = if ((warm_configured is False) and warm): if ((container_name == 'swss') or (container_name == 'bgp') or (container_name == 'teamd')): run_command(('config warm_restart disable %s' % container_name)) if (state == exp_state): echo_and_log('Done') else: echo_and_log('Failed', LOG_ERR) sys.exit(1)
@sonic_installer.command('rollback-docker') @click.option('-y', '--yes', is_flag=True, callback=abort_if_false, expose_value=False, prompt='Docker image will be rolled back, continue?') @click.argument('container_name', metavar='<container_name>', required=True, type=click.Choice(DOCKER_CONTAINER_LIST)) def rollback_docker(container_name): ' Rollback docker image to previous version' if ('rollback_docker' in sys.argv): print_deprecation_warning('rollback_docker', 'rollback-docker') image_name = get_container_image_name(container_name) image_id_all = get_container_image_id_all(image_name) if (len(image_id_all) != 2): echo_and_log("Two images required, but there are '{}' images for '{}'. Aborting...".format(len(image_id_all), image_name), LOG_ERR) raise click.Abort() image_latest = (image_name + ':latest') image_id_previous = get_container_image_id(image_latest) version_tag = '' for id in image_id_all: if (id != image_id_previous): version_tag = get_docker_tag_name(id) run_command(('docker tag %s:%s %s:latest' % (image_name, version_tag, image_name))) if ((container_name == 'swss') or (container_name == 'bgp') or (container_name == 'teamd')): echo_and_log("Cold reboot is required to restore system state after '{}' rollback !!".format(container_name), LOG_ERR) else: run_command(('systemctl restart %s' % container_name)) echo_and_log('Done')
3,255,627,671,178,643,500
Rollback docker image to previous version
sonic_installer/main.py
rollback_docker
Cosmin-Jinga-MS/sonic-utilities
python
@sonic_installer.command('rollback-docker') @click.option('-y', '--yes', is_flag=True, callback=abort_if_false, expose_value=False, prompt='Docker image will be rolled back, continue?') @click.argument('container_name', metavar='<container_name>', required=True, type=click.Choice(DOCKER_CONTAINER_LIST)) def rollback_docker(container_name): ' ' if ('rollback_docker' in sys.argv): print_deprecation_warning('rollback_docker', 'rollback-docker') image_name = get_container_image_name(container_name) image_id_all = get_container_image_id_all(image_name) if (len(image_id_all) != 2): echo_and_log("Two images required, but there are '{}' images for '{}'. Aborting...".format(len(image_id_all), image_name), LOG_ERR) raise click.Abort() image_latest = (image_name + ':latest') image_id_previous = get_container_image_id(image_latest) version_tag = for id in image_id_all: if (id != image_id_previous): version_tag = get_docker_tag_name(id) run_command(('docker tag %s:%s %s:latest' % (image_name, version_tag, image_name))) if ((container_name == 'swss') or (container_name == 'bgp') or (container_name == 'teamd')): echo_and_log("Cold reboot is required to restore system state after '{}' rollback !!".format(container_name), LOG_ERR) else: run_command(('systemctl restart %s' % container_name)) echo_and_log('Done')
@sonic_installer.command('verify-next-image') def verify_next_image(): ' Verify the next image for reboot' bootloader = get_bootloader() if (not bootloader.verify_next_image()): echo_and_log('Image verification failed', LOG_ERR) sys.exit(1) click.echo('Image successfully verified')
-3,142,566,014,996,444,000
Verify the next image for reboot
sonic_installer/main.py
verify_next_image
Cosmin-Jinga-MS/sonic-utilities
python
@sonic_installer.command('verify-next-image') def verify_next_image(): ' ' bootloader = get_bootloader() if (not bootloader.verify_next_image()): echo_and_log('Image verification failed', LOG_ERR) sys.exit(1) click.echo('Image successfully verified')
def __init__(self, allocate, swap_mem_size=None, total_mem_threshold=None, available_mem_threshold=None): '\n Initialize the SWAP memory allocator.\n The allocator will try to setup SWAP memory only if all the below conditions are met:\n - allocate evaluates to True\n - disk has enough space(> DISK_MEM_THRESHOLD)\n - either system total memory < total_mem_threshold or system available memory < available_mem_threshold\n\n @param allocate: True to allocate SWAP memory if necessarry\n @param swap_mem_size: the size of SWAP memory to allocate(in MiB)\n @param total_mem_threshold: the system totla memory threshold(in MiB)\n @param available_mem_threshold: the system available memory threshold(in MiB)\n ' self.allocate = allocate self.swap_mem_size = (SWAPAllocator.SWAP_MEM_SIZE if (swap_mem_size is None) else swap_mem_size) self.total_mem_threshold = (SWAPAllocator.TOTAL_MEM_THRESHOLD if (total_mem_threshold is None) else total_mem_threshold) self.available_mem_threshold = (SWAPAllocator.AVAILABLE_MEM_THRESHOLD if (available_mem_threshold is None) else available_mem_threshold) self.is_allocated = False
5,742,194,480,738,586,000
Initialize the SWAP memory allocator. The allocator will try to setup SWAP memory only if all the below conditions are met: - allocate evaluates to True - disk has enough space(> DISK_MEM_THRESHOLD) - either system total memory < total_mem_threshold or system available memory < available_mem_threshold @param allocate: True to allocate SWAP memory if necessarry @param swap_mem_size: the size of SWAP memory to allocate(in MiB) @param total_mem_threshold: the system totla memory threshold(in MiB) @param available_mem_threshold: the system available memory threshold(in MiB)
sonic_installer/main.py
__init__
Cosmin-Jinga-MS/sonic-utilities
python
def __init__(self, allocate, swap_mem_size=None, total_mem_threshold=None, available_mem_threshold=None): '\n Initialize the SWAP memory allocator.\n The allocator will try to setup SWAP memory only if all the below conditions are met:\n - allocate evaluates to True\n - disk has enough space(> DISK_MEM_THRESHOLD)\n - either system total memory < total_mem_threshold or system available memory < available_mem_threshold\n\n @param allocate: True to allocate SWAP memory if necessarry\n @param swap_mem_size: the size of SWAP memory to allocate(in MiB)\n @param total_mem_threshold: the system totla memory threshold(in MiB)\n @param available_mem_threshold: the system available memory threshold(in MiB)\n ' self.allocate = allocate self.swap_mem_size = (SWAPAllocator.SWAP_MEM_SIZE if (swap_mem_size is None) else swap_mem_size) self.total_mem_threshold = (SWAPAllocator.TOTAL_MEM_THRESHOLD if (total_mem_threshold is None) else total_mem_threshold) self.available_mem_threshold = (SWAPAllocator.AVAILABLE_MEM_THRESHOLD if (available_mem_threshold is None) else available_mem_threshold) self.is_allocated = False
@staticmethod def get_disk_freespace(path): 'Return free disk space in bytes.' fs_stats = os.statvfs(path) return (fs_stats.f_bsize * fs_stats.f_bavail)
-6,005,371,022,862,676,000
Return free disk space in bytes.
sonic_installer/main.py
get_disk_freespace
Cosmin-Jinga-MS/sonic-utilities
python
@staticmethod def get_disk_freespace(path): fs_stats = os.statvfs(path) return (fs_stats.f_bsize * fs_stats.f_bavail)
@staticmethod def read_from_meminfo(): 'Read information from /proc/meminfo.' meminfo = {} with open('/proc/meminfo') as fd: for line in fd.readlines(): if line: fields = line.split() if ((len(fields) >= 2) and fields[1].isdigit()): meminfo[fields[0].rstrip(':')] = int(fields[1]) return meminfo
5,057,307,792,526,469,000
Read information from /proc/meminfo.
sonic_installer/main.py
read_from_meminfo
Cosmin-Jinga-MS/sonic-utilities
python
@staticmethod def read_from_meminfo(): meminfo = {} with open('/proc/meminfo') as fd: for line in fd.readlines(): if line: fields = line.split() if ((len(fields) >= 2) and fields[1].isdigit()): meminfo[fields[0].rstrip(':')] = int(fields[1]) return meminfo