content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import multiprocessing def _get_thread_count(): """Gets a thread_count based on the multiprocessing.cpu_count().""" try: thread_count = multiprocessing.cpu_count() # cpu_count only gets the physical core count. There doesn't appear to be a # simple way of determining whether a CPU supports simultaneous # multithreading in Python, so assume that anything with 6 or more cores # supports it. if thread_count >= 6: thread_count *= 2 except NotImplementedError: # Assume a quad core if we can't get the actual core count. thread_count = 4 return thread_count
f7c4959734e49a70412d87ebc1f03b811b600600
706,781
def weighted_avg(x, weights): # used in lego_reader.py """ x = batch * len * d weights = batch * len """ return weights.unsqueeze(1).bmm(x).squeeze(1)
efa08d9719ccbcc727cb7349888f0a26140521e9
706,783
from bs4 import BeautifulSoup import re def scrape_urls(html_text, pattern): """Extract URLs from raw html based on regex pattern""" soup = BeautifulSoup(html_text,"html.parser") anchors = soup.find_all("a") urls = [a.get("href") for a in anchors] return [url for url in urls if re.match(pattern, url)!=None]
dfba40df7894db91575b51a82d89fef0f824d362
706,786
def _try_type(value, dtype): """ Examples -------- >>> _try_type("1", int) 1 >>> _try_type(1.0, int) 1 >>> _try_type("ab", float) 'ab' """ try: return dtype(value) except ValueError: return value
4a188e57dfafca96e6cd8a815dbbb162c74df01b
706,788
def _gen_parabola(phase: float, start: float, mid: float, end: float) -> float: """Gets a point on a parabola y = a x^2 + b x + c. The Parabola is determined by three points (0, start), (0.5, mid), (1, end) in the plane. Args: phase: Normalized to [0, 1]. A point on the x-axis of the parabola. start: The y value at x == 0. mid: The y value at x == 0.5. end: The y value at x == 1. Returns: The y value at x == phase. """ mid_phase = 0.5 delta_1 = mid - start delta_2 = end - start delta_3 = mid_phase ** 2 - mid_phase coef_a = (delta_1 - delta_2 * mid_phase) / delta_3 coef_b = (delta_2 * mid_phase ** 2 - delta_1) / delta_3 coef_c = start return coef_a * phase ** 2 + coef_b * phase + coef_c
bdd808339e808a26dd1a4bf22552a1d32244bb02
706,789
def pkgdir(tmpdir, monkeypatch): """ temp directory fixture containing a readable/writable ./debian/changelog. """ cfile = tmpdir.mkdir('debian').join('changelog') text = """ testpkg (1.1.0-1) stable; urgency=medium * update to 1.1.0 * other rad packaging updates * even more cool packaging updates that take a lot of text to describe so the change wraps on multiple lines -- Ken Dreyer <[email protected]> Tue, 06 Jun 2017 14:46:37 -0600 testpkg (1.0.0-2redhat1) stable; urgency=medium * update to 1.0.0 (rhbz#123) -- Ken Dreyer <[email protected]> Mon, 05 Jun 2017 13:45:36 -0600 """.lstrip("\n") cfile.write(text) monkeypatch.chdir(tmpdir) return tmpdir
0717aba1d5181e48eb11fa1e91b72933cda1af14
706,790
import os def get_namespace_from_path(path): """get namespace from file path Args: path (unicode): file path Returns: unicode: namespace """ return os.path.splitext(os.path.basename(path))[0]
5ca9bdde1dbe3e845a7d8e64ca0813e215014efd
706,791
import calendar def validate_days(year, month, day): """validate no of days in given month and year >>> validate_days(2012, 8, 31) 31 >>> validate_days(2012, 8, 32) 31 """ total_days = calendar.monthrange(year, month) return (total_days[1] if (day > total_days[1]) else day)
7499dc9654ec9ffd7f534cf27444a3236dd82e81
706,793
import os import base64 def _pic_download(url, type): """ 图片下载 :param url: :param type: :return: """ save_path = os.path.abspath('...') + '\\' + 'images' if not os.path.exists(save_path): os.mkdir(save_path) img_path = save_path + '\\' + '{}.jpg'.format(type) img_data = base64.b64decode(url) with open(img_path, 'wb') as f: f.write(img_data) return img_path
05541a9991b4b3042c84d4e1b5188def365cc4fc
706,794
def get_targets(args): """ Gets the list of targets for cmake and kernel/build.sh :param args: The args variable generated by parse_parameters :return: A string of targets suitable for cmake or kernel/build.sh """ if args.targets: targets = args.targets elif args.full_toolchain: targets = "all" else: targets = "AArch64;ARM;BPF;Hexagon;Mips;PowerPC;RISCV;SystemZ;X86" return targets
81eb31fe416303bc7e881ec2c10cfeeea4fdab05
706,795
def _format_warning(message, category, filename, lineno, line=None): # noqa: U100, E501 """ Simple format for warnings issued by ProPlot. See the `internal warning call signature \ <https://docs.python.org/3/library/warnings.html#warnings.showwarning>`__ and the `default warning source code \ <https://github.com/python/cpython/blob/master/Lib/warnings.py>`__. """ return f'{filename}:{lineno}: ProPlotWarning: {message}\n'
f5709df0a84d9479d6b895dccb3eae8292791f74
706,796
def piocheCarte(liste_pioche, x): """ Cette fonction renvoie le nombre x de cartes de la pioche. Args: x (int): Nombre de cartes à retourner. Returns: list: Cartes retournées avec le nombre x. """ liste_carte = [] for i in range(x): liste_carte.append(liste_pioche[i]) del liste_pioche[0] return liste_carte
ed31c47d699447870207a4066a3da9c35333ada8
706,797
import os def is_process_running(pid): """Returns true if a process with pid is running, false otherwise.""" # from # http://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid try: os.kill(pid, 0) except OSError: return False else: return True
a05cbeb84b5d6f3d6d7a06ab3d14702742b2a289
706,798
import os def no_holders(disk): """Return true if the disk has no holders.""" holders = os.listdir('/sys/class/block/' + disk + '/holders/') return len(holders) == 0
3ef1b7754cde64f248ca9da747adb398aaefd878
706,799
import inspect import sys def main(fn): """Call fn with command line arguments. Used as a decorator. The main decorator marks the function that starts a program. For example, @main def my_run_function(): # function body Use this instead of the typical __name__ == "__main__" predicate. """ if inspect.stack()[1][0].f_locals['__name__'] == '__main__': args = sys.argv[1:] # Discard the script name from command line fn(*args) # Call the main function return fn
6c87ac44dc8422b8d4f02685a764f0cccffbc215
706,800
def _extract_options(config, options, *args): """Extract options values from a configparser, optparse pair. Options given on command line take precedence over options read in the configuration file. Args: config (dict): option values read from a config file through configparser options (optparse.Options): optparse 'options' object containing options values from the command line *args (str tuple): name of the options to extract """ extract = {} for key in args: if key not in args: continue extract[key] = config[key] option = getattr(options, key, None) if option is not None: extract[key] = option return extract
3d74857b3dcdd242950a35b84d3bcaae557a390b
706,801
def statRobustness(compromised, status): """produce data for robustness stats""" rob = {0:{"empty":0, "login based":0, "top 10 common":0, "company name":0}, 1:{"top 1000 common":0, "login extrapolation":0, "company context related":0, "4 char or less":0}, 2:{"top 1M common":0, "6 char or less":0, "2 charsets or less":0}, 3:{"present in attack wordlist":0, "present in locale attack wordlist":0, "leaked":0, "undetermined":0}} for acc in compromised: if status == 'all' or 'account_disabled' not in compromised[acc]["status"]: rob[compromised[acc]["robustness"]][compromised[acc]["reason"]] += 1 return rob
46920b466b96fa37a94888e788104c1d901a9227
706,802
def buscaBinariaIterativa(alvo, array): """ Retorna o índice do array em que o elemento alvo está contido. Considerando a coleção recebida como parâmetro, identifica e retor- na o índice em que o elemento especificado está contido. Caso esse elemento não esteja presente na coleção, retorna -1. Utiliza uma abordagem iterativa. Parameters ---------- alvo : ? Elemento cujo índice está sendo buscado array : list A lista cujo índice do elemento deve ser identificado Return ------ index : int O índice em que o elemento alvo está armazenado """ min = 0 max = len(array) - 1 while (min <= max): mid = (min + max) // 2 if (array[mid] == alvo): return mid else: if (array[mid] < alvo): min = mid + 1 else: max = mid - 1 return -1
e74fed0781b3c1bed7f5f57713a06c58bcbde107
706,803
import re def remove_extended(text): """ remove Chinese punctuation and Latin Supplement. https://en.wikipedia.org/wiki/Latin-1_Supplement_(Unicode_block) """ # latin supplement: \u00A0-\u00FF # notice: nbsp is removed here lsp_pattern = re.compile(r'[\x80-\xFF]') text = lsp_pattern.sub('', text) # chinese special character # chc_pattern = re.compile(r'[\r\t\n\.\!\/_,$%^*(+\"\')]|[+——()?【】“”!,。?、~@#¥%……&*()]') # text = chc_pattern.sub('',text) return text
52d0f5082b519d06f7dd20ba3d755790b1f3166d
706,804
def sum_num(n1, n2): """ Get sum of two numbers :param n1: :param n2: :return: """ return(n1 + n2)
08477e596317f6b8750debd39b5cf0aa56da857c
706,805
from pathlib import Path import json def get_reference_data(fname): """ Load JSON reference data. :param fname: Filename without extension. :type fname: str """ base_dir = Path(__file__).resolve().parent fpath = base_dir.joinpath('reference', 'data', fname + '.json') with fpath.open() as f: return json.load(f)
73880586393ce9463a356d69880f2f285058637f
706,807
import struct def little_endian_uint32(i): """Return the 32 bit unsigned integer little-endian representation of i""" s = struct.pack('<I', i) return struct.unpack('=I', s)[0]
07f72baaf8f7143c732fd5b9e56b0b7d02d531bd
706,808
def new_automation_jobs(issues): """ :param issues: issues object pulled from Redmine API :return: returns a new subset of issues that are Status: NEW and match a term in AUTOMATOR_KEYWORDS) """ new_jobs = {} for issue in issues: # Only new issues if issue.status.name == 'New': # Strip whitespace and make lowercase ('subject' is the job type i.e. Diversitree) subject = issue.subject.lower().replace(' ', '') # Check for presence of an automator keyword in subject line if subject == 'iridaretrieve': new_jobs[issue] = subject return new_jobs
74c9c96aeeea1d15384d617c266daa4d49f3a203
706,809
import os def resolve_test_data_path(test_data_file): """ helper function to ensure filepath is valid for different testing context (setuptools, directly, etc.) :param test_data_file: Relative path to an input file. :returns: Full path to the input file. """ if os.path.exists(test_data_file): return test_data_file else: path = os.path.join('woudc_data_registry', 'tests', test_data_file) if os.path.exists(path): return path
d124bcbc36b48fd6572697c9a5211f794c3dce19
706,810
def init(param_test): """ Initialize class: param_test """ # initialization param_test.default_args_values = {'di': 6.85, 'da': 7.65, 'db': 7.02} default_args = ['-di 6.85 -da 7.65 -db 7.02'] # default parameters param_test.default_result = 6.612133606 # assign default params if not param_test.args: param_test.args = default_args return param_test
d86cd246d4beb5aa267d222bb12f9637f001032d
706,811
def checkSeconds(seconds, timestamp): """ Return a string depending on the value of seconds If the block is mined since one hour ago, return timestamp """ if 3600 > seconds > 60: minute = int(seconds / 60) if minute == 1: return '{} minute ago'.format(minute) return '{} minutes ago'.format(minute) else: return 'Since {} sec'.format(seconds)
2d07657a14300793a116d28e7c9495ae4a1b61ed
706,812
import base64 def base64_encode(text): """<string> -- Encode <string> with base64.""" return base64.b64encode(text.encode()).decode()
ce837abde42e9a00268e14cfbd2bd4fd3cf16208
706,813
def _filter_out_variables_not_in_dataframe(X, variables): """Filter out variables that are not present in the dataframe. Function removes variables that the user defines in the argument `variables` but that are not present in the input dataframe. Useful when ussing several feature selection procedures in a row. The dataframe input to the first selection algorithm likely contains more variables than the input dataframe to subsequent selection algorithms, and it is not possible a priori, to say which variable will be dropped. Parameters ---------- X: pandas DataFrame variables: string, int or list of (strings or int). Returns ------- filtered_variables: List of variables present in `variables` and in the input dataframe. """ # When variables is not defined, keep it like this and return None. if variables is None: return None # If an integer or a string is provided, convert to a list. if not isinstance(variables, list): variables = [variables] # Filter out elements of variables that are not in the dataframe. filtered_variables = [var for var in variables if var in X.columns] # Raise an error if no column is left to work with. if len(filtered_variables) == 0: raise ValueError( "After filtering no variable remaining. At least 1 is required." ) return filtered_variables
63b4cce75741a5d246f40c5b88cfebaf818b3482
706,814
import gzip def file_format(input_files): """ Takes all input files and checks their first character to assess the file format. 3 lists are return 1 list containing all fasta files 1 containing all fastq files and 1 containing all invalid files """ fasta_files = [] fastq_files = [] invalid_files = [] # Open all input files and get the first character for infile in input_files: try: f = gzip.open(infile, "rb") fst_char = f.read(1) except OSError: f = open(infile, "rb") fst_char = f.read(1) f.close() #fst_char = f.readline().decode("ascii")[0] #print(fst_char) # Return file format based in first char if fst_char == b'@': fastq_files.append(infile) elif fst_char == b'>': fasta_files.append(infile) else: invalid_files.append(infile) return (fasta_files, fastq_files, invalid_files)
acd9a0f7b49884d611d0ac65b43407a323a6588b
706,815
def analyse_latency(cid): """ Parse the resolve_time and download_time info from cid_latency.txt :param cid: cid of the object :return: time to resolve the source of the content and time to download the content """ resolve_time = 0 download_time = 0 with open(f'{cid}_latency.txt', 'r') as stdin: for line in stdin.readlines(): """ The output of the ipfs get <cid> command is in the form of: Started: 02-19-2022 01:51:16 Resolve Ended: 02-19-2022 01:51:16 Resolve Duraution: 0.049049 Download Ended: 02-19-2022 01:51:16 Download Duraution: 0.006891 Total Duraution: 0.055940 """ if "Resolve Duraution:" in line: resolve_time = line.split(": ")[1] resolve_time = resolve_time.split("\n")[0] if "Download Duraution:" in line: download_time = line.split(": ")[1] download_time = download_time.split("\n")[0] return resolve_time, download_time
806a9969cc934faeea842901442ecececfdde232
706,816
import re def process_ref(paper_id): """Attempt to extract arxiv id from a string""" # if user entered a whole url, extract only the arxiv id part paper_id = re.sub("https?://arxiv\.org/(abs|pdf|ps)/", "", paper_id) paper_id = re.sub("\.pdf$", "", paper_id) # strip version paper_id = re.sub("v[0-9]+$", "", paper_id) # remove leading arxiv, i.e., such that paper_id=' arXiv: 2001.1234' is still valid paper_id = re.sub("^\s*arxiv[:\- ]", "", paper_id, flags=re.IGNORECASE) return paper_id
a1c817f1ae7b211973efd6c201b5c13e1a91b57b
706,817
def open_file(path): """more robust open function""" return open(path, encoding='utf-8')
785ab196756365d1f27ce3fcd69d0ba2867887a9
706,819
def del_none(dictionary): """ Recursively delete from the dictionary all entries which values are None. Args: dictionary (dict): input dictionary Returns: dict: output dictionary Note: This function changes the input parameter in place. """ for key, value in list(dictionary.items()): if value is None: del dictionary[key] elif isinstance(value, dict): del_none(value) return dictionary
48b76272ed20bbee38b5293ede9f5d824950aec5
706,820
def test_true() -> None: """This is a test that should always pass. This is just a default test to make sure tests runs. Parameters ---------- None Returns ------- None """ # Always true test. assert_message = "This test should always pass." assert True, assert_message return None
f08cb5feb4e450b10b58fe32d751bf45985df84c
706,821
def match_patterns(name, name_w_pattern, patterns): """March patterns to filename. Given a SPICE kernel name, a SPICE Kernel name with patterns, and the possible patterns, provide a dictionary with the patterns as keys and the patterns values as value after matching it between the SPICE Kernel name with patterns and without patterns. For example, given the following arguments: * name: ``insight_v01.tm`` * name_w_pattern: ``insight_v$VERSION.tm`` The function will return: ``{VERSION: '01'}`` :param name: Name of the SPICE Kernel :type name: str :param name_w_pattern: Name of the SPICE Kernel with patterns :type name_w_pattern: str :param patterns: List of the possible patterns present in the SPICE Kernel name with patterns :type patterns: list :return: Dictionary providing the patterns and their value as defined by the SPICE kernel :rtype: dict """ # # This list will help us determine the order of the patterns in the file # name because later on the patterns need to be correlated with the # pattern values. # pattern_name_order = {} name_check = name_w_pattern for pattern in patterns: pattern_name_order[pattern["#text"]] = name_w_pattern.find(pattern["#text"]) name_check = name_check.replace( "$" + pattern["#text"], "$" * int(pattern["@length"]) ) # # Convert the pattern_name_order_dictionary into an ordered lis # pattern_name_order = list( { k: v for k, v in sorted(pattern_name_order.items(), key=lambda item: item[1]) }.keys() ) # # Generate a list of values extracted from the comparison of the # original file and the file with patterns. # values_list = [] value = "" value_bool = False for i in range(len(name_check)): if (name_check[i] == name[i]) and (not value_bool): continue if (name_check[i] == name[i]) and value_bool: value_bool = False values_list.append(value) value = "" elif (name_check[i] == "$") and (not value_bool): value_bool = True value += name[i] elif (name_check[i] == "$") and value_bool: value += name[i] else: raise # # Correlate the values with their position in the file name with # patterns. # values = {} for i in range(len(values_list)): values[pattern_name_order[i]] = values_list[i] return values
a54b7f1fcda67b5649f92a21f4711874dd226ee9
706,822
def survival_df(data, t_col="t", e_col="e", label_col="Y", exclude_col=[]): """ Transform original DataFrame to survival dataframe that would be used in model training or predicting. Parameters ---------- data: DataFrame Survival data to be transformed. t_col: str Column name of data indicating time. e_col: str Column name of data indicating events or status. label_col: str Name of new label in transformed survival data. exclude_col: list Columns to be excluded. Returns ------- DataFrame: Transformed survival data. Negtive values in label are taken as right censored. """ x_cols = [c for c in data.columns if c not in [t_col, e_col] + exclude_col] # Negtive values are taken as right censored data.loc[:, label_col] = data.loc[:, t_col] data.loc[data[e_col] == 0, label_col] = - data.loc[data[e_col] == 0, label_col] return data[x_cols + [label_col]]
8d35c27a75340d5c6535727e0e419fc0548d6094
706,823
from datetime import datetime def get_date_today(): """Get date today in str format such as 20201119. """ return datetime.today().strftime('%Y%m%d')
d5e69607dbf4b8c829cfe30ea0335f46c7d2512a
706,824
def input_output_details(interpreter): """ input_output_details: Used to get the details from the interperter """ input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() return input_details, output_details
3024f2a6c91a533c3aff858ee3a1db11d360bb25
706,825
def fetchPackageNames(graphJson): """Parses serialized graph and returns all package names it uses :param graphJson: Serialized graph :type graphJson: dict :rtyoe: list(str) """ packages = set() def worker(graphData): for node in graphData["nodes"]: packages.add(node["package"]) for inpJson in node["inputs"]: packages.add(inpJson['package']) for outJson in node["inputs"]: packages.add(outJson['package']) if "graphData" in node: worker(node["graphData"]) worker(graphJson) return packages
ccac1cfa1305d5d318cf3e2e3ed85d00fff7e56b
706,826
def types_and_shorthands(): """a mapping from type names in the json doc to their one letter short hands in the output of 'attr' """ return { 'int': 'i', 'uint': 'u', 'bool': 'b', 'decimal': 'd', 'color': 'c', 'string': 's', 'regex': 'r', 'SplitAlign': 'n', 'LayoutAlgorithm': 'n', 'font': 'f', 'Rectangle': 'R', 'WindowID': 'w', }
39f364677a8e2ee1d459599ba2574a8a4f4cd49e
706,827
def CreateRootRelativePath(self, path): """ Generate a path relative from the root """ result_path = self.engine_node.make_node(path) return result_path.abspath()
79053bb1bcb724e8ddf9bfc4b5b13b67be9227f0
706,828
def _escape_pgpass(txt): """ Escape a fragment of a PostgreSQL .pgpass file. """ return txt.replace('\\', '\\\\').replace(':', '\\:')
3926f683a2715ff1d41d8433b525793e8214f7a9
706,829
from typing import OrderedDict def arr_to_dict(arr, ref_dict): """ Transform an array of data into a dictionary keyed by the same keys in ref_dict, with data divided into chunks of the same length as in ref_dict. Requires that the length of the array is the sum of the lengths of the arrays in each entry of ref_dict. The other dimensions of the input array and reference dict can differ. Arguments --------- arr : array Input array to be transformed into dictionary. ref_dict : dict Reference dictionary containing the keys used to construct the output dictionary. Returns ------- out : dict Dictionary of values from arr keyed with keys from ref_dict. """ out = OrderedDict() idx = 0 assert len(arr) == sum([len(v) for v in ref_dict.values()]) for k, bd in ref_dict.items(): out[k] = arr[idx : idx + len(bd)] idx += len(bd) return out
55339447226cdd2adafe714fa12e144c6b38faa2
706,830
def run_pipeline(context, func, ast, func_signature, pipeline=None, **kwargs): """ Run a bunch of AST transformers and visitors on the AST. """ # print __import__('ast').dump(ast) pipeline = pipeline or context.numba_pipeline(context, func, ast, func_signature, **kwargs) return pipeline, pipeline.run_pipeline()
559d9c44ae143e49ff505fd76df0393bae56f012
706,831
def replace_by_one_rule(specific_rule: dict, sentence: str): """ This function replace a sentence with the given specific replacement dict. :param specific_rule: A dict containing the replacement rule, where the keys are the words to use, the values will be replaced by the keys. :param sentence: A string to be replaced by the dict and given rule. :return: The string after replaced by the rules. """ original = sentence.lower() for key in specific_rule.keys(): for word in specific_rule[key]: original = original.replace(word, key) original = " ".join([i if i != 'be' else 'is' for i in original.split(' ')]) return original.replace('(s)', '').replace('is at there', 'been there').replace('(es)', ''). \ replace('is in there', 'been there').replace('is there', 'been there').replace('possess', 'have')
31a5bd58ef77d76c968c353dd493ba3357d5b506
706,832
def to_module_name(field): """_to_module_name(self, field: str) -> str Convert module name to match syntax used in https://github.com/brendangregg/FlameGraph Examples: [unknown] -> [unknown]' /usr/bin/firefox -> [firefox] """ if field != '[unknown]': field = '[{}]'.format(field.split('/')[-1]) return field
75e3fbb9a45710ea6dacecf5ecc34a5b9409606a
706,833
def traitement(l): """Permet de retirer les cartes blanches inutiles""" while l[-1][1] == 'nan': del l[-1] return l
d21a7d493a35fc53195315da9b824b0ca3c8ba25
706,834
def chunker(file_path): """ Read a block of lines from a file :param file_path: :return: """ words = [] with open(file_path, 'r') as file_object: for word in file_object: word = word.strip() if word: words.append(word) return words
a60b6f3cc7003955ae6acd8ac5e74574cdbd5976
706,835
import subprocess def c2c_dist(commande,octree_lvl=0): """ Commande CC cloud2cloud distance """ if octree_lvl==0: commande+=" -C2C_DIST -split_xyz -save_clouds" else: commande+=" -C2C_DIST -split_xyz -octree_level "+str(octree_lvl)+" -save_clouds" subprocess.call(commande) return True
723082644cb6d8b24cc27b634a3bca2b8caabe4a
706,836
def legalize_names(varnames): """returns a dictionary for conversion of variable names to legal parameter names. """ var_map = {} for var in varnames: new_name = var.replace("_", "__").replace("$", "_").replace(".", "_") assert new_name not in var_map var_map[var] = new_name return var_map
ad8e9ef3394d4ac3cfa80198f488c1834bd227fc
706,837
import argparse from textwrap import dedent def arg_parser() -> argparse.Namespace: """ Reads command line arguments. :returns: Values of accepted command line arguments. """ _parser = argparse.ArgumentParser( description=dedent( """Find all recipes in a directory, build them and push all their images to an sregistry. Recipes are identified by the suffix ".recipe". The image name will be taken from the recipe name using everything from the first character till the first "." occurrence. The version will be taken from the recipe name using everything from the first "." till the suffix ".recipe". The collection name will be taken from the recipes parent folder. """ ) ) _parser.add_argument( '--path', '-p', type=str, help="Base path to search recipes.", required=True ) _parser.add_argument( '--image_type', '-i', type=str, help="The type of image to be build." ) _parser.add_argument( '--build_log_dir', '-b', type=str, help="The directory, that should contain the build logs. Will be created if not existent." ) return _parser.parse_args()
6bf2f99cbab0674fb5b127b58cdb0e28ad87ef0a
706,838
from typing import Any from typing import Set from typing import KeysView def to_set(data: Any) -> Set[Any]: """Convert data to a set. A single None value will be converted to the empty set. ```python x = fe.util.to_set(None) # set() x = fe.util.to_set([None]) # {None} x = fe.util.to_set(7) # {7} x = fe.util.to_set([7, 8]) # {7,8} x = fe.util.to_set({7}) # {7} x = fe.util.to_set((7)) # {7} ``` Args: data: Input data, within or without a python container. The `data` must be hashable. Returns: The input `data` but inside a set instead of whatever other container type used to hold it. """ if data is None: return set() if not isinstance(data, set): if isinstance(data, (tuple, list, KeysView)): data = set(data) else: data = {data} return data
df2649d0b7c7c2323984edd3eeea76eff0eab4d2
706,839
import mimetypes def get_mimetype(path): """ Get (guess) the mimetype of a file. """ mimetype, _ = mimetypes.guess_type(path) return mimetype
7677259fcdf052f9647fe41e4b4cb71d83ea50cd
706,840
def id_number_checksum(gd): """ Calculates a Swedish ID number checksum, using the Luhn algorithm """ n = s = 0 for c in (gd['year'] + gd['month'] + gd['day'] + gd['serial']): # Letter? It's an interimspersonnummer and we substitute the letter # with 1. if c.isalpha(): c = 1 tmp = ((n % 2) and 1 or 2) * int(c) if tmp > 9: tmp = sum([int(i) for i in str(tmp)]) s += tmp n += 1 if (s % 10) == 0: return 0 return (((s // 10) + 1) * 10) - s
bbf0a9fa7f6ed2c2bfc414173fd2ac9e9c1d8835
706,841
def del_none(d): """ Delete dict keys with None values, and empty lists, recursively. """ for key, value in d.items(): if value is None or (isinstance(value, list) and len(value) == 0): del d[key] elif isinstance(value, dict): del_none(value) return d
46cf9e331c633f5f69b980f3b10c96306d3478c2
706,842
def merge_s2_threshold(log_area, gap_thresholds): """Return gap threshold for log_area of the merged S2 with linear interpolation given the points in gap_thresholds :param log_area: Log 10 area of the merged S2 :param gap_thresholds: tuple (n, 2) of fix points for interpolation """ for i, (a1, g1) in enumerate(gap_thresholds): if log_area < a1: if i == 0: return g1 a0, g0 = gap_thresholds[i - 1] return (log_area - a0) * (g1 - g0) / (a1 - a0) + g0 return gap_thresholds[-1][1]
36dd06c8af828e3dc2ef5f1048046039feaa6c21
706,843
def rename_indatabet_cols(df_orig): """ """ df = df_orig.copy(deep=True) odds_cols = {'odds_awin_pinn': 'awinOddsPinnIndatabet', 'odds_draw_pinn': 'drawOddsPinnIndatabet', 'odds_hwin_pinn': 'hwinOddsPinnIndatabet', 'odds_awin_bet365': 'awinOddsBet365Indatabet', 'odds_draw_bet365': 'drawOddsBet365Indatabet', 'odds_hwin_bet365': 'hwinOddsBet365Indatabet', 'odds_ftgoalso2.5_bet365': 'ftGoalsO2.5OddsBet365Indatabet', 'odds_ftgoalsu2.5_bet365': 'ftGoalsU2.5OddsBet365Indatabet', 'odds_ftgoalso2.5_pinn': 'ftGoalsO2.5OddsPinnIndatabet', 'odds_ftgoalsu2.5_pinn': 'ftGoalsU2.5OddsPinnIndatabet'} df.rename(columns=odds_cols, inplace=True) return df
a07e7c9757e1b207528f7b7fda63e06a1dced47a
706,844
def unadmin(bot, input): """Removes person from admins list, owner only""" if not input.owner: return False bot.config.set_del('admins',input.group(2).lower()) bot.reply("Unadmin'd {0}".format(input.group(2)))
1a74ab0a3d3d1b41dd6d1f065b71a48557af84ed
706,845
def get_strategy_name(): """Return strategy module name.""" return 'store_type'
bbf1ed9f43f492561ee5c595061f74bea0f5e464
706,846
def get_last_position(fit, warmup=False): """Parse last position from fit object Parameters ---------- fit : StanFit4Model warmup : bool If True, returns the last warmup position, when warmup has been done. Otherwise function returns the first sample position. Returns ------- list list contains a dictionary of last draw from each chain. """ fit._verify_has_samples() positions = [] extracted = fit.extract(permuted=False, pars=fit.model_pars, inc_warmup=warmup) draw_location = -1 if warmup: draw_location += max(1, fit.sim["warmup"]) chains = fit.sim["chains"] for i in range(chains): extract_pos = {key : values[draw_location, i] for key, values in extracted.items()} positions.append(extract_pos) return positions
28ec10c4f90ac786053334f593ffd3ade27b1fc5
706,847
import os import re def get_version(filename="telepresence/__init__.py"): """Parse out version info""" base_dir = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(base_dir, filename)) as initfile: for line in initfile.readlines(): match = re.match("__version__ *= *['\"](.*)['\"]", line) if match: return match.group(1)
829c2aad31bffc820a204110306cbbe92feb017b
706,848
import os def normalize_path(path): """Normalize and return absolute path. Expand user symbols like ~ and resolve relative paths. """ return os.path.abspath(os.path.expanduser(os.path.normpath(path)))
108a820df621cca2238feadf8a45eef59e9aa883
706,849
import importlib def _version(lib_name): """ Returns the version of a package. If version cannot be determined returns "available" """ lib = importlib.import_module(lib_name) if hasattr(lib, "__version__"): return lib.__version__ else: return "available"
cec49d2de66d2fc3a7ed3c89259711bdf40bbe8e
706,850
from pathlib import Path def mkdir(path_str): """ Method to create a new directory or directories recursively. """ return Path(path_str).mkdir(parents=True, exist_ok=True)
1621fd5f4d74b739de0b17933c1804faabf44a2f
706,851
def horner(n,c,x0): """ Parameters ---------- n : integer degree of the polynomial. c : float coefficients of the polynomial. x0 : float where we are evaluating the polynomial. Returns ------- y : float the value of the function evaluated at x0. z : float the value of the derivative evaluated at x0. """ y=c[n] z=c[n] for i in range(n-1,0,-1): y= x0*y+c[i] z=x0*z+y y=x0*y+c[0] #this computes the b0 return y,z
adf3f3772d12d5bed0158045ad480cee8454cb5c
706,852
import gzip def _compression_safe_opener(fname): """Determine whether to use *open* or *gzip.open* to read the input file, depending on whether or not the file is compressed. """ f = gzip.open(fname, "r") try: f.read(1) opener = gzip.open except IOError: opener = open finally: f.close() return opener
4c44da2ae15c63ccd6467e6e893a3c590c20a7e9
706,854
import sys def gen_headers(value_type, value, header_type="PacketFilter", direction=None, notFilter=False): """ helper function constructs json header format value: a STRING corresponding to value_type direction: "src" or "dst" Parameters ---------- value_type : string a string of header formats. Most commonly used are: ipv4_src |ipv4_dst ipv6_src | ipv6_dst mac_src | mac_dst tp_src | tp_dst| eth_type| vlan_vid| ip_proto value : string the value of the corresponding value_type. header_type : string, optional DESCRIPTION. The default is "PacketFilter". "PacketAliasFilter" needs corresponding alias set direction : string, optional DESCRIPTION. Either "src" or "dst" notFilter : boolean, optional DESCRIPTION. The default is False. If set to True negates the header value_type and value. Returns ------- dict constructed header dict usable for fwdApi. """ header={} header['type'] = header_type if header_type == "PacketFilter": header['values'] = {str(value_type): [str(value)]} elif header_type == "PacketAliasFilter": header['value'] = value else: sys.exit("header_type is either 'PacketFilter' or 'PacketAliasFilter'") if direction: header['direction'] = direction if notFilter == True: notHeader ={} notHeader['type'] = "NotFilter" notHeader['clause'] = header return notHeader return header
292d34bc44d51685633d7772439fc90d5e92edaf
706,855
from typing import Iterable from typing import Any from typing import Iterator import itertools def prepend( iterable: Iterable[Any], value: Any, *, times: int = 1, ) -> Iterator[Any]: """Return an iterator with a specified value prepended. Arguments: iterable: the iterable to which the value is to be prepended value: the value to prepend to the iterable Keyword Arguments: times: number of times to prepend the value (optional; default is 1) Returns: iterator prepending the specified value(s) to the items of the iterable Examples: >>> list(prepend(range(5), -1)) [-1, 0, 1, 2, 3, 4] >>> list(prepend(['off to work we go'], 'hi ho', times=2)) ['hi ho', 'hi ho', 'off to work we go'] """ return itertools.chain([value] * times, iterable)
659bc3616238f5e40865505c006c1369f20e33d3
706,856
def _with_extension(base: str, extension: str) -> str: """ Adds an extension to a base name """ if "sus" in base: return f"{extension}{base}" else: return f"{base}{extension}"
5a1253763808127f296c3bcb04c07562346dea2d
706,857
def GuessLanguage(filename): """ Attempts to Guess Langauge of `filename`. Essentially, we do a filename.rsplit('.', 1), and a lookup into a dictionary of extensions.""" try: (_, extension) = filename.rsplit('.', 1) except ValueError: raise ValueError("Could not guess language as '%s' does not have an \ extension"%filename) return {'c' : 'c' ,'py' : 'python'}[extension]
3cd1289ab3140256dfbeb3718f30a3ac3ffca6f2
706,858
import numpy def extract_data_size(series, *names): """ Determines series data size from the first available property, which provides direct values as list, tuple or NumPy array. Args: series: perrot.Series Series from which to extract data size. names: (str,) Sequence of property names to check. Returns: int or None Determined data size. """ # get size for name in names: # check property if not series.has_property(name): continue # get property prop = series.get_property(name, native=True) # get size if isinstance(prop, (list, tuple, numpy.ndarray)): return len(prop) # no data return None
39d503b359318d9dc118481baa7f99a43b926711
706,859
def fake_execute_default_reply_handler(*ignore_args, **ignore_kwargs): """A reply handler for commands that haven't been added to the reply list. Returns empty strings for stdout and stderr. """ return '', ''
e73bd970030c4f78aebf2913b1540fc1b370d906
706,860
def empty_items(item_list, total): """ Returns a list of null objects. Useful when you want to always show n results and you have a list of < n. """ list_length = len(item_list) expected_total = int(total) if list_length != expected_total: return range(0, expected_total-list_length) return ''
12848fe61457b2d138a2fcd074fb6ec6d09cbaf5
706,861
import struct def _read_string(fp): """Read the next sigproc-format string in the file. Parameters ---------- fp : file file object to read from. Returns ------- str read value from the file """ strlen = struct.unpack("I", fp.read(struct.calcsize("I")))[0] return fp.read(strlen).decode()
346a65e6be15f593c91dde34cb45c53cb5731877
706,862
def make_attrstring(attr): """Returns an attribute string in the form key="val" """ attrstring = ' '.join(['%s="%s"' % (k, v) for k, v in attr.items()]) return '%s%s' % (' ' if attrstring != '' else '', attrstring)
fbaf2b763b4b1f4399c45c3a19698d0602f0b224
706,863
from typing import Callable from typing import Iterable from typing import List def get_index_where(condition: Callable[..., bool], iterable: Iterable) -> List[int]: """Return index values where `condition` is `True`.""" return [idx for idx, item in enumerate(iterable) if condition(item)]
6f99086730dfc2ab1f87df90632bc637fc6f2b93
706,864
import shutil import os def missing_toolchain(triplet: str) -> bool: """ Checks whether gcc, g++ and binutils are installed and in the path for the current triplet :param triplet: a triplet in the form riscv64-linux-gnu :return: True if some part of the toolchain is missing, False otherwise """ toolchain_expected = {"ar", "as", "gcc", "g++", "ld", "ranlib", "strip"} retval = False for tool in toolchain_expected: retval |= shutil.which(cmd=triplet + "-" + tool, mode=os.X_OK) is None return retval
9fafae1a4bd5ce781183f28d098580624b6ba1ef
706,865
from typing import Sequence def argmax(sequence: Sequence) -> int: """Find the argmax of a sequence.""" return max(range(len(sequence)), key=lambda i: sequence[i])
58cc1d0e952a7f15ff3fca721f43c4c658c41de1
706,866
def read_data_from_device(device, location): """ Reads text data from device and returns it as output Args: location ('str'): Path to the text file Raises: FileNotFoundError: File Does not Exist Returns: Data ('str'): Text data read from the device """ # IMPORTANT # ========= # This API does not require the device to have network connection # copy_from_device is the other API that behaves similar to this one, # but it requires network connection since it uses SCP try: return device.execute("cat {}".format(location)) except Exception: # Throw file not found error when encounter generic error raise FileNotFoundError("File {} does not exist.".format(location))
f6895d25f9f9e68ec33bb2d8f693999a7e3a2812
706,867
def _client_ip(client): """Compatibility layer for Flask<0.12.""" return getattr(client, 'environ_base', {}).get('REMOTE_ADDR')
1bd110563c5e7165ec795d16e0f0d7be6d053db1
706,868
from typing import Set def tagify(tail=u'', head=u'', sep=u'.'): """ Returns namespaced event tag string. Tag generated by joining with sep the head and tail in that order head and tail may be a string or a list, tuple, or Set of strings If head is a list, tuple or Set Then join with sep all elements of head individually Else join in whole as string prefix If tail is a list, tuple or Set Then join with sep all elements of tail individually Else join in whole as string suffix If either head or tail is empty then do not exhibit in tag """ if isinstance(head, (list, tuple, Set)): # list like so expand parts = list(head) else: # string like so put in list parts = [head] if isinstance(tail, (list, tuple, Set)): # listlike so extend parts parts.extend(tail) else: # string like so append parts.append(tail) return sep.join([part for part in parts if part])
ddebdc0c4224db428a4338fd1e4c61137ac2d5c5
706,869
def classify_design_space(action: str) -> int: """ The returning index corresponds to the list stored in "count": [sketching, 3D features, mating, visualizing, browsing, other organizing] Formulas for each design space action: sketching = "Add or modify a sketch" + "Copy paste sketch" 3D features = "Commit add or edit of part studio feature" + "Delete part studio feature" - "Add or modify a sketch" mating = "Add assembly feature" + "Delete assembly feature" + "Add assembly instance" + "Delete assembly instance" visualizing = "Start assembly drag" + "Animate action called" browsing = Opening a tab + Creating a tab + Deleting a tab + Renaming a tab other organizing = "Create version" + "Cancel Operation" + "Undo Redo Operation" + "Merge branch" + "Branch workspace" + "Update version" :param action: the action to be classified :return: the index of the action type that this action is accounted for; if the action does not belong to any category, return -1 Note: "Add or modify a sketch" is special (+1 for sketching and -1 for 3D features), return -10 """ # Creating a sketch is special as it affects both the sketching and the 3D features counts if action == "Add or modify a sketch": return -10 # Sketching elif action == "Copy paste sketch": return 0 # 3D features elif action in ["Commit add or edit of part studio feature", "Delete part studio feature"]: return 1 # Mating elif action in ["Add assembly feature", "Delete assembly feature", "Add assembly instance" "Delete assembly instance"]: return 2 # Visualizing elif action in ["Start assembly drag", "Animate action called"]: return 3 # Browsing elif "Tab" in action and ("opened" in action or "created" in action or "deleted" in action or "renamed" in action): return 4 # Other organizing elif action in ["Create version", "Cancel Operation", "Undo Redo Operation", "Merge branch", "Branch workspace", "Update version"]: return 5 # Not classified (Optional: print out the unclassified actions) else: return -1
22dc68aa23258691b0d4b9f1b27a9e8451b275d9
706,870
import hashlib def get_sha256_hash(plaintext): """ Hashes an object using SHA256. Usually used to generate hash of chat ID for lookup Parameters ---------- plaintext: int or str Item to hash Returns ------- str Hash of the item """ hasher = hashlib.sha256() string_to_hash = str(plaintext) hasher.update(string_to_hash.encode('utf-8')) hash = hasher.hexdigest() return hash
79735973b8ad73823662cc428513ef393952b681
706,871
def get_bit_coords(dtype_size): """Get coordinates for bits assuming float dtypes.""" if dtype_size == 16: coords = ( ["±"] + [f"e{int(i)}" for i in range(1, 6)] + [f"m{int(i-5)}" for i in range(6, 16)] ) elif dtype_size == 32: coords = ( ["±"] + [f"e{int(i)}" for i in range(1, 9)] + [f"m{int(i-8)}" for i in range(9, 32)] ) elif dtype_size == 64: coords = ( ["±"] + [f"e{int(i)}" for i in range(1, 12)] + [f"m{int(i-11)}" for i in range(12, 64)] ) else: raise ValueError(f"dtype of size {dtype_size} neither known nor implemented.") return coords
6400017e47506613cf15162425843ce2b19eed3e
706,872
import sys import os def want_color_output(): """Return ``True`` if colored output is possible/requested and not running in GUI. Colored output can be explicitly requested by setting :envvar:`COCOTB_ANSI_OUTPUT` to ``1``. """ want_color = sys.stdout.isatty() # default to color for TTYs if os.getenv("NO_COLOR") is not None: want_color = False if os.getenv("COCOTB_ANSI_OUTPUT", default="0") == "1": want_color = True if os.getenv("GUI", default="0") == "1": want_color = False return want_color
bda881ef70cfdb9bbb1eb1b81958f837f8bd92ed
706,873
def jaccard(list1, list2): """calculates Jaccard distance from two networks\n | Arguments: | :- | list1 (list or networkx graph): list containing objects to compare | list2 (list or networkx graph): list containing objects to compare\n | Returns: | :- | Returns Jaccard distance between list1 and list2 """ intersection = len(list(set(list1).intersection(list2))) union = (len(list1) + len(list2)) - intersection return 1- float(intersection) / union
1056c3d5a592bea9a575c24e947a91968b931000
706,874
def default_argument_preprocessor(args): """Return unmodified args and an empty dict for extras""" extras = {} return args, extras
2031dde70dbe54beb933e744e711a0bf8ecaed99
706,875
def parse_cmd(script, *args): """Returns a one line version of a bat script """ if args: raise Exception('Args for cmd not implemented') # http://www.microsoft.com/resources/documentation/windows/xp/all/proddocs/en-us/cmd.mspx?mfr=true oneline_cmd = '&&'.join(script.split('\n')) oneline_cmd = 'cmd.exe /c "%s"' % oneline_cmd return oneline_cmd
b3355b20af2ca1ab2e996643ae0918a2d387760f
706,876
def expected_inheritance(variant_obj): """Gather information from common gene information.""" manual_models = set() for gene in variant_obj.get('genes', []): manual_models.update(gene.get('manual_inheritance', [])) return list(manual_models)
29bf223249e29942803cef8468dbd8bd04979e81
706,877
def by_label(move_data, value, label_name, filter_out=False, inplace=False): """ Filters trajectories points according to specified value and collum label. Parameters ---------- move_data : dataframe The input trajectory data value : The type_ of the feature values to be use to filter the trajectories Specifies the value used to filter the trajectories points label_name : String Specifes the label of the column used in the filtering filter_out : boolean, optional(false by default) If set to True, it will return trajectory points with feature value different from the value specified in the parameters The trajectories points with the same feature value as the one especifed in the parameters. inplace : boolean, optional(false by default) if set to true the original dataframe will be altered to contain the result of the filtering, otherwise a copy will be returned. Returns ------- move_data : dataframe or None Returns dataframe with trajectories points filtered by label. """ try: filter_ = move_data[label_name] == value if filter_out: filter_ = ~filter_ return move_data.drop(index=move_data[~filter_].index, inplace=inplace) except Exception as e: raise e
3d772f741539009b756744539f4a524e6ad402ea
706,878
import time def task_dosomething(storage): """ Task that gets launched to handle something in the background until it is completed and then terminates. Note that this task doesn't return until it is finished, so it won't be listening for Threadify pause or kill requests. """ # An important task that we want to run in the background. for i in range(10): print(i, end="") time.sleep(1) return False
9eabf3977c53932de8d775c21e4a1209003e0892
706,879
def _path(path): """Helper to build an OWFS path from a list""" path = "/" + "/".join(str(x) for x in path) return path.encode("utf-8") + b"\0"
d38937deb459bb9bf393402efc31a90a285d4a6d
706,880
def readFPs(filepath): """Reads a list of fingerprints from a file""" try: myfile = open(filepath, "r") except: raise IOError("file does not exist:", filepath) else: fps = [] for line in myfile: if line[0] != "#": # ignore comments line = line.rstrip().split() fps.append(line[0]) return fps
96d483360c411a27a3b570875f61344ef4dae573
706,883
def findScanNumberString(s): """If s contains 'NNNN', where N stands for any digit, return the string beginning with 'NNNN' and extending to the end of s. If 'NNNN' is not found, return ''.""" n = 0 for i in range(len(s)): if s[i].isdigit(): n += 1 else: n = 0 if n == 4: return s[i-3:] return ''
fd5973383bcf8b74573408d95d4f0065dfbda32f
706,884
import math def point_in_ellipse(origin, point, a, b, pa_rad, verbose=False): """ Identify if the point is inside the ellipse. :param origin A SkyCoord defining the centre of the ellipse. :param point A SkyCoord defining the point to be checked. :param a The semi-major axis in arcsec of the ellipse :param b The semi-minor axis in arcsec of the ellipse :param pa_rad The position angle of the ellipse. This is the angle of the major axis measured in radians East of North (or CCW from the y axis). """ # Convert point to be in plane of the ellipse, accounting for distortions at high declinations p_ra_dist = (point.icrs.ra.degree - origin.icrs.ra.degree)* math.cos(origin.icrs.dec.rad) p_dec_dist = point.icrs.dec.degree - origin.icrs.dec.degree # Calculate the angle and radius of the test opoint relative to the centre of the ellipse # Note that we reverse the ra direction to reflect the CCW direction radius = math.sqrt(p_ra_dist**2 + p_dec_dist**2) diff_angle = (math.pi/2 + pa_rad) if p_dec_dist == 0 else math.atan(p_ra_dist / p_dec_dist) - pa_rad # Obtain the point position in terms of the ellipse major and minor axes minor = radius * math.sin(diff_angle) major = radius * math.cos(diff_angle) if verbose: print ('point relative to ellipse centre angle:{} deg radius:{:.4f}" maj:{:.2f}" min:{:.2f}"'.format(math.degrees(diff_angle), radius*3600, major*3600, minor*3600)) a_deg = a / 3600.0 b_deg = b / 3600.0 # Calc distance from origin relative to a and b dist = math.sqrt((major / a_deg) ** 2 + (minor / b_deg) ** 2) if verbose: print("Point %s is %f from ellipse %f, %f, %f at %s." % (point, dist, a, b, math.degrees(pa_rad), origin)) return round(dist,3) <= 1.0
9c4b056c205b8d25e80211adb0eeb1cdfaf4c11c
706,885
def isNumberString(value): """ Checks if value is a string that has only digits - possibly with leading '+' or '-' """ if not value: return False sign = value[0] if (sign == '+') or (sign == '-'): if len(value) <= 1: return False absValue = value[1:] return absValue.isdigit() else: if len(value) <= 0: return False else: return value.isdigit()
06feaab112e184e6a01c2b300d0e4f1a88f2250e
706,886
from typing import Union from typing import Dict from typing import Any from typing import List def _func_length(target_attr: Union[Dict[str, Any], List[Any]], *_: Any) -> int: """Function for returning the length of a dictionary or list.""" return len(target_attr)
b66a883c763c93d9a62a7c09324ab8671d325d05
706,887
def configuration_filename(feature_dir, proposed_splits, split, generalized): """Calculates configuration specific filenames. Args: feature_dir (`str`): directory of features wrt to dataset directory. proposed_splits (`bool`): whether using proposed splits. split (`str`): train split. generalized (`bool`): whether GZSL setting. Returns: `str` containing arguments in appropriate form. """ return '{}{}_{}{}.pt'.format( feature_dir, ('_proposed_splits' if proposed_splits else ''), split, '_generalized' if generalized else '', )
a3fc2c23746be7ed17f91820dd30a8156f91940c
706,888
import re def getAllNumbers(text): """ This function is a copy of systemtools.basics.getAllNumbers """ if text is None: return None allNumbers = [] if len(text) > 0: # Remove space between digits : spaceNumberExists = True while spaceNumberExists: text = re.sub('(([^.,0-9]|^)[0-9]+) ([0-9])', '\\1\\3', text, flags=re.UNICODE) if re.search('([^.,0-9]|^)[0-9]+ [0-9]', text) is None: spaceNumberExists = False numberRegex = '[-+]?[0-9]+[.,][0-9]+|[0-9]+' allMatchIter = re.finditer(numberRegex, text) if allMatchIter is not None: for current in allMatchIter: currentFloat = current.group() currentFloat = re.sub("\s", "", currentFloat) currentFloat = re.sub(",", ".", currentFloat) currentFloat = float(currentFloat) if currentFloat.is_integer(): allNumbers.append(int(currentFloat)) else: allNumbers.append(currentFloat) return allNumbers
42d45d6bb7a5ae1b25d2da6eadb318c3388923d6
706,889
def _fileobj_to_fd(fileobj): """Return a file descriptor from a file object. Parameters: fileobj -- file object or file descriptor Returns: corresponding file descriptor Raises: ValueError if the object is invalid """ if isinstance(fileobj, int): fd = fileobj else: try: fd = int(fileobj.fileno()) except (AttributeError, TypeError, ValueError): raise ValueError('Invalid file object: {!r}'.format(fileobj) ) from None if fd < 0: raise ValueError('Invalid file descriptor: {}'.format(fd)) return fd
8b1bea4083c0ecf481c712c8b06c76257cea43db
706,890