content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def get_data_field(thing_description, data_field_list): """Get the field specified by 'data_field_list' from each thing description Args: data_field_list(list): list of str that specified the hierarchical field names For example, if the parameter value is ['foo', 'bar', 'foobar'], then this function will try to get thing_description['foo']['bar']['foobar'] and return the value If any of the field does not exist, an error will occur Returns: object: the content specified by the data field """ for data_field in data_field_list: thing_description = thing_description[data_field] return thing_description
f0ef0a46fbcafa993e01f59349c1283b51bbd393
701,382
def get_speed_formatted_str(speed): """ Returns the speed with always two whole numbers and two decimal value. Example: 03.45 Args: speed (float): The actual speed of the car Returns: str: The text format of the speed """ speed_str = "{:0.2f}".format(round(speed, 2)) return speed_str.zfill(5)
c05d20f568950f8236f9e46e90387e3a71090589
701,384
def ccw(A, B, C): """ Check if a point C is counter-clockwise to AB. """ return (C[1] - A[1])*(B[0]-A[0]) > (B[1]-A[1])*(C[0]-A[0])
c1afb4e510be6a85ad7de1aa924917e37b227dbe
701,385
from typing import Counter def check_cardinality(df, cat_cols, threshold=8): """ Check categorical cardinality Checks the cardinality of categorical features of a given dataset. Returns two dictionaries, one for features with low cardinality and another for features with high cardinality. The low/high cardinality criteria can be tunned with the `threshold` parameter. Parameters ---------- df : pandas.DataFrame Dataset whose categorical features will be analyzed. cat_cols : list of str List of column names. The columns must be all categorical. threshold : int, optional Numeric criteria to separate low cardinality features from high cardinality ones. Default value is 8. Returns ------- low_card : dict Dictionary containing the name of the low cardinality features as keys and their cardinality as values. high_card : dict Dictionary containing the name of the high cardinality features as keys and their cardinality as values. """ high_card = {} low_card = {} for col in cat_cols: rank = len(Counter(df[col])) if rank <= threshold: low_card[col] = rank else: high_card[col] = rank return low_card, high_card
5b33a39c1da007de46ca409c8fb531b3b3600a7b
701,388
def parse_veh_comp(xmldoc): """parses the vehicle composition from the VISSIM data :param xmldoc: input VISSIM xml :type xmldoc: xml.dom.minidom.Document :return: relevant VISSIM vehicleComposition data :rtype: dict of list of dict """ veh_cmp_d = dict() # local vehicle compositions' dict for vehicle_comp in xmldoc.getElementsByTagName('vehicleComposition'): rel_flows = vehicle_comp.getElementsByTagName( 'vehicleCompositionRelativeFlow') flow_l = [] for flow in rel_flows: flw_d = { 'desSpeedDistr': flow.getAttribute('desSpeedDistr'), 'rel_flow': flow.getAttribute('relFlow'), 'vehType': flow.getAttribute('vehType'), } flow_l.append(flw_d) # list of dictionaries veh_cmp_d[vehicle_comp.getAttribute('no')] = flow_l return veh_cmp_d
195b2c8dcbd055d5c8e8fdb4f6b68f360c60c961
701,390
def validate_enum(datum, schema, **kwargs): """ Check that the data value matches one of the enum symbols. i.e "blue" in ["red", green", "blue"] Parameters ---------- datum: Any Data being validated schema: dict Schema kwargs: Any Unused kwargs """ return datum in schema["symbols"]
689fef653b757435d45e76afbf16245d0d53839f
701,396
import re def get_valid_filename(s): """Sanitize string to make it reasonable to use as a filename. From https://github.com/django/django/blob/master/django/utils/text.py Parameters ---------- s : string Examples -------- >>> print get_valid_filename(r'A,bCd $%#^#*!()"\' .ext ') 'a_bcd__.ext' """ s = re.sub(r'[ ,;\t]', '_', s.strip().lower()) return re.sub(r'(?u)[^-\w.]', '', s)
a8161a16d0bd8ad0c5d9ff20c56b52fbdba2d859
701,397
from typing import Literal from typing import List def get_foot_marker(foot: Literal["left", "right"]) -> List[str]: """Get the names of all markers that are attached ot a foot (left or right)""" sensors = ["{}_fcc", "{}_toe", "{}_fm5", "{}_fm1"] return [s.format(foot[0]) for s in sensors]
518fbb3f68cbf8622b2bf1fa85f9ecae8008c456
701,401
def is_config_or_test(example, scan_width=5, coeff=0.05): """Check if file is a configuration file or a unit test by : 1- looking for keywords in the first few lines of the file. 2- counting number of occurence of the words 'config' and 'test' with respect to number of lines. """ keywords = ["unit tests", "test file", "configuration file"] lines = example["content"].splitlines() count_config = 0 count_test = 0 # first test for _, line in zip(range(scan_width), lines): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test nlines = example["content"].count("\n") threshold = int(coeff * nlines) for line in lines: count_config += line.lower().count("config") count_test += line.lower().count("test") if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False}
0e2823897b72a916afd9672beed904190bb2c1c2
701,404
def has_shape(data, shape, allow_empty=False): """ Determine if a data object has the provided shape At any level, the object in `data` and in `shape` must have the same type. A dict is the same shape if all its keys and values have the same shape as the key/value in `shape`. The number of keys/values is not relevant. A list is the same shape if all its items have the same shape as the value in `shape` A tuple is the same shape if it has the same length as `shape` and all the values have the same shape as the corresponding value in `shape` Any other object simply has to have the same type. If `allow_empty` is set, lists and dicts in `data` will pass even if they are empty. """ if not isinstance(data, type(shape)): return False if isinstance(data, dict): return (allow_empty or len(data) > 0) and\ all(has_shape(k, shape.keys()[0]) for k in data.keys()) and\ all(has_shape(v, shape.values()[0]) for v in data.values()) elif isinstance(data, list): return (allow_empty or len(data) > 0) and\ all(has_shape(v, shape[0]) for v in data) elif isinstance(data, tuple): return len(data) == len(shape) and all( has_shape(data[i], shape[i]) for i in range(len(data))) else: return True
f04add860bb6b886bb693ddc85b3d4877245d749
701,406
def RR_calc(classes, TOP): """ Calculate Global performance index (RR). :param classes: confusion matrix classes :type classes: list :param TOP: number of positives in predict vector per class :type TOP: dict :return: RR as float """ try: class_number = len(classes) result = sum(list(TOP.values())) return result / class_number except Exception: return "None"
814a11c339b25dc687d537efd3244ddad9c0f8fd
701,408
import torch def view_complex_native(x: torch.FloatTensor) -> torch.Tensor: """Convert a PyKEEN complex tensor representation into a torch one using :func:`torch.view_as_complex`.""" return torch.view_as_complex(x.view(*x.shape[:-1], -1, 2))
14e74f1c8b5e6de673c962e4381e74026d3d3db2
701,410
def cross_corr_norm(patch_0, patch_1): """ Returns the normalized cross-correlation between two same-sized image patches. Parameters : patch_0, patch_1 : image patches """ n = patch_0.shape[0] * patch_0.shape[1] # Mean intensities mu_0, mu_1 = patch_0.mean(), patch_1.mean() # Standard deviations sigma_0, sigma_1 = patch_0.std(), patch_1.std() return (1.0 / (n - 1) * (((patch_0 - mu_0) / sigma_0) * ((patch_1 - mu_1) / sigma_1)).sum())
213100b174993baa07ea685b23541d3dfe49ace8
701,411
def site_stat_stmt(table, site_col, values_col, fun): """ Function to produce an SQL statement to make a basic summary grouped by a sites column. Parameters ---------- table : str The database table. site_col : str The column containing the sites. values_col : str The column containing the values to be summarised. fun : str The function to apply. Returns ------- str SQL statement. """ fun_dict = {'mean': 'avg', 'sum': 'sum', 'count': 'count', 'min': 'min', 'max': 'max'} cols_str = ', '.join([site_col, fun_dict[fun] + '(' + values_col + ') as ' + values_col]) stmt1 = "SELECT " + cols_str + " FROM " + table + " GROUP BY " + site_col return stmt1
c704d5687effd3c12abb3feecde9041eb88aae7a
701,413
def _format_source_error(filename, lineno, block): """ A helper function which generates an error string. This function handles the work of reading the lines of the file which bracket the error, and formatting a string which points to the offending line. The output is similar to: File "foo.py", line 42, in bar() 41 def bar(): ----> 42 a = a + 1 43 return a Parameters ---------- filename : str The full path to the offending file. lineno : int The line number of the offending like. block : str The name of the block scope in which the error occured. In the sample above, the block scope is 'bar'. Returns ------- result : str A nicely formatted string for including in an exception. If the file cannot be opened, the source lines will note be included. """ text = 'File "%s", line %d, in %s()' % (filename, lineno, block) start_lineno = max(0, lineno - 1) end_lineno = start_lineno + 2 lines = [] try: with open(filename, 'r') as f: for idx, line in enumerate(f, 1): if idx >= start_lineno and idx <= end_lineno: lines.append((idx, line)) elif idx > end_lineno: break except IOError: pass if len(lines) > 0: digits = str(len(str(end_lineno))) line_templ = '\n----> %' + digits + 'd %s' other_templ = '\n %' + digits + 'd %s' for lno, line in lines: line = line.rstrip() if lno == lineno: text += line_templ % (lno, line) else: text += other_templ % (lno, line) return text
32d093e53811415338877349ca8e64b0e9261b1d
701,414
def calc_exposure(k, src_rate, bgd_rate, read_noise, neff): """ Compute the time to get to a given significance (k) given the source rate, the background rate, the read noise, and the number of effective background pixels ----- time = calc_exposure(k, src_rate, bgd_rate, read_noise, neff) """ denom = 2 * src_rate**2 nom1 = (k**2) * (src_rate + neff*bgd_rate) nom2 = ( k**4 *(src_rate + neff*bgd_rate)**2 + 4 * k**2 * src_rate**2 * neff * read_noise**2)**(0.5) exposure = (nom1 + nom2)/ denom return exposure
993853d244cfa5c6619300def02294a2497d78df
701,415
def type_or_null(names): """Return the list of types `names` + the name-or-null list for every type in `names`.""" return [[name, 'null'] for name in names]
72cbefcbba08c98d3c4c11a126e22b6f83f4175b
701,416
import torch def quadratic_matmul(x: torch.Tensor, A: torch.Tensor) -> torch.Tensor: """Matrix quadratic multiplication. Parameters ---------- x : torch.Tensor, shape=(..., X) A batch of vectors. A : torch.Tensor, shape=(..., X, X) A batch of square matrices. Returns ------- torch.Tensor, shape=(...,) Batched scalar result of quadratic multiplication. """ assert x.shape[-1] == A.shape[-1] == A.shape[-2] x_T = x.unsqueeze(-2) # shape=(..., 1, X) x_ = x.unsqueeze(-1) # shape=(..., X, 1) quadratic = x_T @ A @ x_ # shape=(..., 1, 1) return quadratic.squeeze(-1).squeeze(-1)
78335f6a57f34701f3f1fe9b8dd74e9b8be686a3
701,418
def filter_linksearchtotals(queryset, filter_dict): """ Adds filter conditions to a LinkSearchTotal queryset based on form results. queryset -- a LinkSearchTotal queryset filter_dict -- a dictionary of data from the user filter form Returns a queryset """ if "start_date" in filter_dict: start_date = filter_dict["start_date"] if start_date: queryset = queryset.filter(date__gte=start_date) if "end_date" in filter_dict: end_date = filter_dict["end_date"] if end_date: queryset = queryset.filter(date__lte=end_date) return queryset
96a7e816e7e2d6632db6e6fb20dc50a56a273be9
701,424
def find_in_list(list_one, list_two): """Find and return an element from list_one that is in list_two, or None otherwise.""" for element in list_one: if element in list_two: return element return None
9376b38a06cadbb3e06c19cc895eff46fd09f5c1
701,427
def _real_freq_filter(rfft_signal, filters): """Helper function to apply a full filterbank to a rfft signal """ nr = rfft_signal.shape[0] subbands = filters[:, :nr] * rfft_signal return subbands
0bee4822ac1d6b5672e4ad89bb59f03d72828244
701,430
def _strip_extension(name, ext): """ Remove trailing extension from name. """ ext_len = len(ext) if name[-ext_len:] == ext: name = name[:-ext_len] return name
aa1e6f8c68e09597e2566ecd96c70d2c748ac600
701,431
from typing import List from typing import Optional def span_to_label(tokens: List[str], labeled_spans: dict, scheme: Optional[str] = 'BIO') -> List[str]: """ Convert spans to label :param tokens: a list of tokens :param labeled_spans: a list of tuples (start_idx, end_idx, label) :param scheme: labeling scheme, in ['BIO', 'BILOU']. :return: a list of string labels """ assert scheme in ['BIO', 'BILOU'], ValueError("unknown labeling scheme") if labeled_spans: assert list(labeled_spans.keys())[-1][1] <= len(tokens), ValueError("label spans out of scope!") labels = ['O'] * len(tokens) for (start, end), label in labeled_spans.items(): if scheme == 'BIO': labels[start] = 'B-' + label if end - start > 1: labels[start + 1: end] = ['I-' + label] * (end - start - 1) elif scheme == 'BILOU': if end - start == 1: labels[start] = 'U-' + label else: labels[start] = 'B-' + label labels[end - 1] = 'L-' + label if end - start > 2: labels[start + 1: end - 1] = ['I-' + label] * (end - start - 2) return labels
dbd572d4c306f31202c93b5983f5dd4cdd237074
701,435
def convert_ids_to_tokens(inv_vocab, ids): """Converts a sequence of ids into tokens using the vocab.""" output = [] for item in ids: output.append(inv_vocab[item]) return output
da1aa84d271fe46cedf530c2871ee54c57e676e2
701,438
import importlib def _check_import(package_name): """Import a package, or give a useful error message if it's not there.""" try: return importlib.import_module(package_name) except ImportError: err_msg = ( f"{package_name} is not installed. " "It may be an optional powersimdata requirement." ) raise ImportError(err_msg)
c4cb7c5a49071663d23e9530155bdee3304a5f72
701,441
import getpass def prompt(identifier) -> tuple: """Credential entry helper. Returns: Tuple of login_id, key """ login_id = input(f"API Login ID for {identifier}: ") key = getpass.getpass(f"API Transaction Key for {identifier}: ") return (login_id, key)
be0ed9be1a60c2c29753d6a9ca8b3f12294f183b
701,442
import time def generate_timestamp(expire_after: float = 30) -> int: """ :param expire_after: expires in seconds. :return: timestamp in milliseconds """ return int(time.time() * 1000 + expire_after * 1000)
16f2fcd77de9edb1e167f1288e37a10491469c22
701,445
import base64 def b64e(s): """b64e(s) -> str Base64 encodes a string Example: >>> b64e("test") 'dGVzdA==' """ return base64.b64encode(s)
2562f5d18ac59bbe4e8a28ee4033eaa0f10fc641
701,446
import yaml import random import string def tmp_config_file(dict_: dict) -> str: """ Dumps dict into a yaml file that is saved in a randomly named file. Used to as config file to create ObservatoryConfig instance. :param dict_: config dict :return: path of temporary file """ content = yaml.safe_dump(dict_).replace("'!", "!").replace("':", ":") file_name = "".join(random.choices(string.ascii_lowercase, k=10)) with open(file_name, "w") as f: f.write(content) return file_name
d4ea42a8dc1824757df7f9823f44f7fc181b29aa
701,447
import pprint def check_overlapping(features): """Check for elements of `features` with overlapping ranges. In the case of overlap, print an informative error message and return names and positions of overlapping features. """ features = features[:] overlapping = [] for i in range(len(features)-1): prev_name, prev_start, prev_end = features[i] name, start, end = features[i+1] if prev_end >= start: overlap = ((prev_name, prev_start, prev_end), (name, start, end)) overlapping.append(overlap) raise ValueError('overlapping features: ' + pprint.pformat(overlap)) return overlapping
6a246aca29c01b32091d7890b6a55d66367e8e14
701,450
def compute_level(id, tree): """ compute the level of an id in a tree """ topic = tree[id] level = 0 while (id != 0): level += 1 id = topic['parent'] topic = tree[id] return(level)
fb7fbc1c1f97e85c03abdd453a3deb3411960e45
701,451
def none_or_valid_float_value_as_string(str_to_check): """ Unless a string is "none", tries to convert it to a float and back to check that it represents a valid float value. Throws ValueError if type conversion fails. This function is only needed because the MATLAB scripts take some arguments either as a float value in string form or as "none". :param str_to_check: string to validate :return: string which is either "none" or represents a valid float value """ return str_to_check if str_to_check == "none" else str(float(str_to_check))
54f3c63fab0752678cb5a69723aa7790ab11a624
701,453
from datetime import datetime def convert_time(time_str): """Convert iso string to date time object :param time_str: String time to convert """ try: dt = datetime.strptime(time_str, "%Y-%m-%dT%H:%Mz") return dt except Exception: return time_str
4ba3d5b8af4305cc44afb60d02eeb1b1d041fab9
701,456
def suck_out_formats(reporters): """Builds a dictionary mapping edition keys to their cite_format if any. The dictionary takes the form of: { 'T.C. Summary Opinion': '{reporter} {volume}-{page}', 'T.C. Memo.': '{reporter} {volume}-{page}' ... } In other words, this lets you go from an edition match to its parent key. """ formats_out = {} for reporter_key, data_list in reporters.items(): # For each reporter key... for data in data_list: # Map the cite_format if it exists for edition_key, edition_value in data["editions"].items(): try: formats_out[edition_key] = data["cite_format"] except KeyError: # The item wasn't there; add it. pass return formats_out
a0db907839573ca53f7c96c326afe1eac5491c63
701,457
def cast_bytes(data, encoding='utf8'): """ Cast str, int, float to bytes. """ if isinstance(data, str) is True: return data.encode(encoding) elif isinstance(data, int) is True: return str(data).encode(encoding) elif isinstance(data, float) is True: return str(data).encode(encoding) elif isinstance(data, bytes) is True: return data elif data is None: return None else: raise TypeError("Expected unicode or bytes, got %r" % data)
01ac5d7cd4a728e401075334900808a6a579deef
701,458
def B(i,j,k): """ Tensor B used in constructing ROMs. Parameters ---------- i : int j : int k : int Indices in the tensor. Returns ------- int Tensor output. """ if i == j + k: return -1 elif j == i + k or k == i + j: return 1 else: msg = "Possible Error: Indices ({},{},{})".format(i,j,k) print(msg) return 0
b4969759fd2f07bd2bd2baed48a2adfd8669987a
701,459
import io def format_data(data, indent): """Format a bytestring as a C string literal. Arguments: data: Bytestring to write indent: Indentation for each line, a string Returns: A multiline string containing the code, with indentation before every line including the first. There is no final newline. """ fp = io.StringIO() fp.write(indent) fp.write('"') rem = 80 - 3 - len(indent) def advance(n): nonlocal rem if rem < n: fp.write('"\n') fp.write(indent) fp.write('"') rem = 80 - 3 - len(indent) rem -= n for n, c in enumerate(data.rstrip(b"\0")): if 32 <= c <= 126: if c in b'\\"': advance(2) fp.write("\\") else: advance(1) fp.write(chr(c)) elif c == 0 and (n == len(data) - 1 or not b"0" <= data[n + 1:n + 2] <= b"7"): advance(2) fp.write("\\0") else: advance(4) fp.write("\\{:03o}".format(c)) fp.write('"') return fp.getvalue()
260e2b296addeb1113d657b086302197a8e365bb
701,460
def psf_to_inhg(psf): """Convert lb/ft^2 to inches of mercury.""" return psf * 0.014139030952735
c1a482c71ad86ae31efece5f1a395fa354db8c3e
701,462
def get_version(v): """ Generate a PEP386 compliant version Stolen from django.utils.version.get_version :param v tuple: A five part tuple indicating the version :returns str: Compliant version """ assert isinstance(v, tuple) assert len(v) == 5 assert v[3] in ('alpha', 'beta', 'rc', 'final') parts = 2 if v[2] == 0 else 3 main = '.'.join(str(i) for i in v[:parts]) sub = '' if v[3] != 'final': mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'} sub = mapping[v[3]] + str(v[4]) return str(main + sub)
946c9ea382ac7da0da1c74373cf981df174737c1
701,463
def tadsize_chart(genome_name): """ Determine the distance threshold to build coverage tracks. Args: genome_name (string): name of the reference genome; ex: mammals, drosophila, c_elegans, s_pombe, c_crescentus Returns: dist_thresh (int): integer specifying distance threshold in basepairs """ low_bound = { "mammals": 100000, "drosophila": 10000, "c_elegans": 1000000, "s_pombe": 50000, "c_crescentus": 30000 } upp_bound = { "mammals": 2000000, "drosophila": 100000, "c_elegans": 2000000, "s_pombe": 100000, "c_crescentus": 400000 } typ_res = { "mammals": 1000000, "drosophila": 250000, "c_elegans": 3000000, "s_pombe": 300000, "c_crescentus": 250000 } return low_bound[genome_name], upp_bound[genome_name], typ_res[genome_name]
844744424845a1d240fa93023b9786a7ed2cc12c
701,465
def convert_results_to_table(results, aggregation="average"): """ Convert results to table Args: results (dict): results dictionary aggregation (str): aggregation method, either average or sum """ headers = [] columns = [] for target_task, source_tasks in results.items(): headers.append(target_task) column = [] for _, metrics in source_tasks.items(): if metrics: aggregate_value = sum(metrics.values()) if aggregation == "sum" else sum(metrics.values())/len(metrics) aggregate_value = round(aggregate_value, 4) else: aggregate_value = "N/A" column.append(aggregate_value) columns.append(column) return columns,headers
51d38a52cb5428568c89e518df86624c5f438cf6
701,466
def get_service_type(f): """Retrieves service type from function.""" return getattr(f, 'service_type', None)
fb4d98a4b4db0d10ab97d94d98ccfe21cea05fe9
701,472
def compute_error(model_data, reference_data): """Returns the summ of the squared differences between model and reference data.""" error = ((model_data - reference_data) ** 2).sum() return error
66e80326b85eed67008b517dfeff99cc8352bffd
701,474
import json def dj(_dict): """Converts dicts to JSON and safely handles non-serializable items""" return json.dumps( _dict, default=lambda o: 'ERROR: Item not JSON serializable', sort_keys=True, indent=3)
042fdc731a084e1d74175a1ac22bc5b4204050c6
701,477
def split_path(path): """ Normalise S3 path string into bucket and key. Parameters ---------- path : string Input path, like `s3://mybucket/path/to/file` Examples -------- >>> split_path("s3://mybucket/path/to/file") ['mybucket', 'path/to/file'] """ if path.startswith('s3://'): path = path[5:] if '/' not in path: return path, "" else: return path.split('/', 1)
446f7643066864937e11b915d4ff842f21c65dd6
701,479
def unixtime2mjd(unixtime): """ Converts a UNIX time stamp in Modified Julian Day Input: time in UNIX seconds Output: time in MJD (fraction of a day) """ # unixtime gives seconds passed since "The Epoch": 1.1.1970 00:00 # MJD at that time was 40587.0 result = 40587.0 + unixtime / (24. * 60. * 60.) return result
670e915b7a5de8cd9ced28e6b4d32c51ac916d54
701,480
def num_active_calls(log, ad): """Get the count of current active calls. Args: log: Log object. ad: Android Device Object. Returns: Count of current active calls. """ calls = ad.droid.telecomCallGetCallIds() return len(calls) if calls else 0
a6674df1e8e539478db6ab1a640fbce1cf0b6b4c
701,482
def density_standard(components): """ Natural gas density at standard temperature, kg/m3 :param components: (list) List of gas components. Each item is an object of class GasComponent :return: (float) The density of natural gas an standard parameters, kg/m3 """ return sum([component.density_standard * component.volume_percentage * 0.01 for component in components])
c087ce6ae1a3486dd092341286023c56606380a3
701,485
def generate_test_uuid(tail_value=0): """Returns a blank uuid with the given value added to the end segment.""" return '00000000-0000-0000-0000-{value:0>{pad}}'.format(value=tail_value, pad=12)
f113eef54eba9d8d1fb5234c87af3cb6290ea25e
701,487
from typing import Dict async def total() -> Dict: """ Sum of a list of numbers --- tags: - Total get: parameters: - N/A response: 200: description: returns a dictionary with a total sum of a list of numbers """ return {"total": sum(list(range(10000001)))}
67c1d1abf6c76c533d8ea776dbb46a4184b0fca5
701,489
def extractAliasFromContainerName(containerName): """ Take a compose created container name and extract the alias to which it will be refered. For example bddtests_vp1_0 will return vp0 """ return containerName.split("_")[1]
a5ab9487ae31ee1a4b2ed9b67062817488107983
701,491
def get_string(request, key): """Returns the first value in the request args for a given key.""" if not request.args: return None if type(key) is not bytes: key = key.encode() if key not in request.args: return None val = request.args[key][0] if val is not None and type(val) is bytes: val = val.decode() return val
ae43bb3e11cf21deb8f726ed6a2321c51099e4f3
701,494
def _get_maxmem(profile_df): """ Get current peak memory :param pandas.core.frame.DataFrame profile_df: a data frame representing the current profile.tsv for a sample :return str: max memory """ return "{} GB".format(str(max(profile_df['mem']) if not profile_df['mem'].empty else 0))
2e628d48f7b4e0e3c1465f09da7aa795d2954a06
701,497
async def process_headers(headers): """Filter out unwanted headers and return as a dictionary.""" headers = dict(headers) header_keys = ( "user-agent", "referer", "accept-encoding", "accept-language", "x-real-ip", "x-forwarded-for", ) return {k: headers.get(k) for k in header_keys}
32feeb40c12c4b69d65da1c178e396e85fc9e557
701,500
def by_circ(x, y): """ Sort circRNAs by the start and end position """ return x.end - y.end if x.start == y.start else x.start - y.start
5d8205389960b92f10c450fdb6385678a279406b
701,503
def _normalize_longitude(lon: float) -> float: """Normalize longitudes between [-180, 180]""" return ((lon + 180.0) % 360.0) - 180.0
e50dc8fee9a0499a2e32f3ccf8b2e9a634581bba
701,507
def get_tf_tensor_shape(tensor): """Get tensor shape, if there is unkown tensor, set it as None""" shape = [] try: shape = tensor.get_shape().as_list() if any(s is None for s in shape): return None return shape except Exception: # pylint: disable=broad-except shape = None return shape
33c7e17102ad2f7d407c1f86b13c7cdfa61ca677
701,508
def _wrapped_value_and_num(value): """Returns a list containing value plus the list's length.""" if isinstance(value, (list, tuple)): return value, len(value) else: return [value], 1
811521a18dffd9ee046751c74d4d8a097662c8cd
701,514
def perform_fit(cfmclient, fabric_uuid, name, description): """ Request a full fit across managed Composable Fabrics. :param cfmclient: CFM Client object :param fabric_uuid: Valid Fabric UUID of an existing fabric :param name: Simple name of the fit :param description: Longer Description of the fitting request :return: """ data = { 'fabric_uuid': '{}'.format(fabric_uuid), 'name': '{}'.format(name), 'description': '{}'.format(description) } path = 'v1/fits' return cfmclient.post(path, data=data)
66d6462c97b1354ef11b6378b82912030ed40a94
701,515
from pathlib import Path def parent(path: str): """Returns the parent `Path` of the given path.""" return Path(path).parent.resolve()
d86b37bc8310b024eb0a78c1b1de404cf6c2c85a
701,517
import base64 def b64encode(value): """ Encode a value in base64 """ return base64.b64encode(value)
988abf5a9d2c0c1f38f16fbf8f80fd43aa115223
701,520
def zip_tasks_verbose_output(table, stdstreams): """Zip a list of strings (table) with a list of lists (stdstreams) :param table: a formatted list of tasks :param stdstreams: for each task, a list of lines from stdout/stderr tail """ if len(table) != len(stdstreams): raise ValueError('Can only zip same-length lists') output = [] for i in range(len(table)): output.append(table[i]) output.extend([line for line in stdstreams[i]]) return output
33d74cd274ec39330cbc127a3088a430b80d234a
701,523
import pickle def try_deserialize_handler(serialized_handler): """Reverse function of try_serialize_handler. Args: serialized_handler: serialized handler str or None. Returns: handler instance or None. """ if serialized_handler: return pickle.loads(serialized_handler)
bc91e26c65add4e74affd148b8ed550fe923c925
701,526
def hardwareVersionToString (hwversion): """ Converts a raw integer value into a human readable string a.b.c.d. :param int hwversion: raw value as received from the generator :return str: a human readable string 'a.b.c.d'. """ if ((hwversion >> 30) & 1) != 0: # new format 30-22 + 21-16 # mask here with 0xFF instead of 0x3FF to ignore the first two bits a = (hwversion >> 22) & 0xFF # this one should always be 17 b = (hwversion >> 16) & 0x3F c = (hwversion >> 8) & 0xFF d = (hwversion & 0xFF) else: # old format a = 2000 + ((hwversion >> 26) & 0x3F) # 2000 + first 6 bits (MSB) b = (hwversion >> 22) & 0x0F # 4 next bits c = (hwversion >> 16) & 0x3F # 6 next bits d = (hwversion & 0xFFFF) # last 16 bits (LSB) return "%d.%d.%d.%d" % (a, b, c, d)
1f1fab23706e05fa593ef4cf56f3ec4e1a6f4c6f
701,527
import code def main(_): """Run an interactive console.""" code.interact() return 0
e85a21c0197a599378c3f25022ec99cb557d6017
701,532
import heapq def heapmerge(*inputs): """Like heapq.merge(), merges multiple sorted inputs (any iterables) into a single sorted output, but provides more convenient API: each input is a pair of (iterable, label) and each yielded result is a pair of (item, label of the input) - so that it's known what input a given item originates from. Labels can be any objects (e.g., object that produced the input stream).""" def entries(iterable, label): for obj in iterable: yield (obj, label) iterables = [entries(*inp) for inp in inputs] return heapq.merge(*iterables)
580ec9f2f0793f8907390f5c7f8eebf4ac539b59
701,533
import mimetypes def bundle_media_description(key, filename): """Bundle the media description necessary for uploading. :param key: form-data key name :param filename: Local file name or path. :return: tuple of ('key name', ('file name', 'file object', 'MIME content-type') :rtype: tuple """ content_type, _ = mimetypes.guess_type(filename) media_description = (key, (filename, open(filename, 'rb'), content_type)) return media_description
8c160a9c767d86a1c1867d22f018d6342239e68d
701,537
def format_write_request(address, value): """ Format a write request based on an address and the value to write to the FPGA. :param address: address at which to write date. :param value: data to write to the address. :return: formatted request. """ if address >= 2**(4 * 8): raise ValueError(f'Address {address} is too large (max 4 bytes).') if address < 0: raise ValueError(f'Address {address} cannot be negative.') if value >= 2**(4 * 8): raise ValueError(f'Value {value} is too large (max 4 bytes).') if value < 0: raise ValueError(f'Address {address} cannot be negative.') buff = bytearray(9) buff[0] = 1 << 7 buff[1:] = int.to_bytes(address, length=4, byteorder="little") buff[5:] = int.to_bytes(value, length=4, byteorder="little") return buff
8425b7ff3422162cb0127c64069dbf68a414becf
701,538
from datetime import datetime def create_envelope(payload: dict) -> dict: """ Creates a dictionary with event label, timestamp and message field :param payload: The payload dict :return: An event message dictionary """ payload['timestamp'] = datetime.utcnow().timestamp() if 'event' not in payload: payload['event'] = 'cs.unknown' if 'message' not in payload: payload['message'] = None return payload
593a2b60a667dece41f10031d13ad98018d9f881
701,540
import re def prepare_template_data(fill_pairs): """ Prepares formatted data for filling template. It produces mutliple variants of keys (key, Key, KEY) to control format of filled template. Args: fill_pairs (iterable) of tuples (key, value) Returns: (dict) ('host', 'maya') > {'host':'maya', 'Host': 'Maya', 'HOST': 'MAYA'} """ fill_data = {} regex = re.compile(r"[a-zA-Z0-9]") for key, value in dict(fill_pairs).items(): # Handle cases when value is `None` (standalone publisher) if value is None: continue # Keep value as it is fill_data[key] = value # Both key and value are with upper case fill_data[key.upper()] = value.upper() # Capitalize only first char of value # - conditions are because of possible index errors # - regex is to skip symbols that are not chars or numbers # - e.g. "{key}" which starts with curly bracket capitalized = "" for idx in range(len(value or "")): char = value[idx] if not regex.match(char): capitalized += char else: capitalized += char.upper() capitalized += value[idx + 1:] break fill_data[key.capitalize()] = capitalized return fill_data
a6082093bcbe39ba429decd2735fb2797c6c11dd
701,542
from pathlib import Path from typing import Callable from typing import Any def readFileLines(filepath: Path, f: Callable[[str], Any] = str) -> list: """Reads the lines in a file Args: filepath (Path): The path of the file to be read. f (Callable[[str], Any], optional): Transformation for the lines. Defaults to `str`. Returns: list: list with the lines, with the defined transformation applied. """ lines = None with open(filepath, 'r') as file: lines = [] # `readline` reads one line (better do it like this for large files) while (line := file.readline()): lines.append(f(line)) # We want integer numbers return lines
21df2445f6132085d7da36276f371170ff0f2a4e
701,543
def get_default(arr, idx, default_value): """get arr[idx] or return default_value """ try: return arr[idx] except IndexError: return default_value
038b943da7fa1d36038444880264160da8e031f4
701,544
def second(xs): """ Returns the second element of a list, or None if the list is empty """ if not xs: return None return xs[1]
e5a915116d61e01842f86623aafcf6e2e6c8b5a3
701,545
import logging def get_logger(module: str, file: str) -> logging.Logger: """Configure a file logger for use in a script. Parameters ---------- module : str The name of the module from which the logger is called file : str The name of the log file to which the logger will write Returns ------- logging.Logger The configured logger instance. """ handler = logging.FileHandler(file) formatter = logging.Formatter( "{asctime} :: {levelname} :: {name} :: {message}", style="{" ) handler.setLevel(logging.INFO) handler.setFormatter(formatter) logging.basicConfig(level=logging.INFO, handlers=[handler]) logger = logging.getLogger(module) return logger
c9a68d216ca9a04ccb208dd546621e051dd37e36
701,546
def filled_grasp_grid(empty_grasp_grid, grid_file): """Return a GraspGrid instance filled from the grid_file.""" empty_grasp_grid.read(grid_file) grid_file.close() return empty_grasp_grid
97ed623c09b9d42642a208451b004e5bec4910ed
701,547
def get_1d_coords(p, i, j): """ Finds index of site in 1d chain from 2d lattics based on snake decomposition, ie 1d coords on 2d lattice look like: 0, 1, 2, 3, 7, 6, 5, 4, 8, 8, 10, 11 Args: p - dictionary that contains the relevant system parameters i - row index on 2d lattice j - column index on 2d lattice Returns: reshaped_i - index of site on 1d chain """ if i % 2 == 0: reshaped_i = i * p['W'] + j else: reshaped_i = i * p['W'] + (p['W'] - j - 1) return reshaped_i
2852ec9d57c99f922380fc8b671bdd757e8decf4
701,550
import json def abrir_freq(file): """ Abre o ficheiro json com o dicionário das frequências das regras. :param file: ficheiro json :return: dicionário com as frequências das regras no corpus. """ with open(file) as f: freq_svo_dict = json.load(f) return freq_svo_dict
b1cbb7c98146cf72390b511428d7734881f0b944
701,551
def get_minsep_range(minseps, cap=None): """ Create ranged minseps from an ensemble of minsep entries Args: minseps (list): A list of minsep dictionaries cap (tuple): Minimum-maximum caps Returns: minsep (dict): A minsep where values are minimum and maximum values """ base = {key: [value, value] for key, value in minseps[0].items()} for minsep in minseps: for key, value in minsep.items(): # If the key exists (it should) if key in base: existing = base[key] # Expand the minimum and maximum values if cap and (value < cap[0]): existing[0] = cap[0] elif cap and (value > cap[1]): existing[1] = cap[1] elif existing[0] > value: existing[0] = value elif existing[1] < value: existing[1] = value # add the pairs if needed else: base[key] = [value, value] return base
e60392fa70f7f2b0989f4894ce90c7bd78aac7d2
701,553
def datetime_to_iso(date, only_date=True): """ Convert datetime format to ISO 8601 time format This function converts a date in datetime instance, e.g. ``datetime.datetime(2017,9,14,0,0)`` to ISO format, e.g. ``2017-09-14`` :param date: datetime instance to convert :type date: datetime :param only_date: whether to return date only or also time information. Default is ``True`` :type only_date: bool :return: date in ISO 8601 format :rtype: str """ if only_date: return date.isoformat().split('T')[0] return date.isoformat()
676a7e65de2c9e4de60cfe8d832ab342aa46af4f
701,555
def descendants(node, lst=None): """Return a list of all the descendants beneath a node""" if lst is None: lst = [] for child in node.children: lst.append(child) descendants(child, lst=lst) return lst
5fe6fb9d9fbfd63bbeb161fdbe1d0e54d20edf9e
701,558
def create_elem_dict(row): """ Create new element dictionary from row with metadata common to all nodes/ways/relations. """ elem = { 'id': row.id, 'version': row.version, 'userId': row.user_id, 'userName': row.user_name, 'timestamp': row.timestamp, 'tags': row.tags, } if row.add: elem['added'] = True if row.delete: elem['deleted'] = True return elem
d97b712cd4b0bc6e79f5aa09f212491d715f1c69
701,562
from typing import Dict from typing import Any from typing import Tuple from typing import Optional def source_ip_and_reverse_dns( message: Dict[str, Any]) -> Tuple[Optional[str], Optional[str]]: """ Extract the source IP and reverse DNS information from a canary request. """ reverse_dns, source_ip = (None, None) if 'SourceIP' in message: source_ip = message['SourceIP'] # `ReverseDNS` can sometimes exist and still be empty. if 'ReverseDNS' in message and message['ReverseDNS']: reverse_dns = message['ReverseDNS'] return (source_ip, reverse_dns)
df2a9b6c4a177073fc019e88c33234f4d6124ccb
701,563
def fCO2_to_CO2(fCO2, Ks): """ Calculate CO2 from fCO2 """ return fCO2 * Ks.K0
2b71b46147291e7fffb99d51d0acb59ea4cd0c69
701,564
def check_if_in_team(api, team_id, person): """ Checks if a person is in a given team :param api: CiscoSparkAPI instance to query Spark with. :param team_id: The ID of the team to check for :param person: The person to check against the team """ team_memberships = api.team_memberships.list(team_id) # Check every membership to see if this person is contained within for membership in team_memberships: if person.id == membership.personId: return True return False
b20a5ee41485b2dd397c7dd23407ef95d0f34e4a
701,566
def group_per_category(bkgs): """ Groups a flat list of datasets into sublists with the same category E.g. [ ttjet, ttjet, ttjet, qcd, qcd ] --> [ [ttjet, ttjet, ttjet], [qcd, qcd] ] """ cats = list(set(bkg.get_category() for bkg in bkgs)) cats.sort() return [ [ b for b in bkgs if b.get_category() == cat ] for cat in cats ]
c331456b1b3066f667c94e6ed7d00c8421ec2160
701,567
def subtract_params(param_list_left: list, param_list_right: list): """Subtract two lists of parameters :param param_list_left: list of numpy arrays :param param_list_right: list of numpy arrays :return: list of numpy arrays """ return [x - y for x, y in zip(param_list_left, param_list_right)]
f8563cae337af0e30621428103afa10bde614e93
701,568
import torch def pretty_size(size): """ Pretty prints a torch.Size object By user machinethink: https://forums.fast.ai/t/gpu-memory-not-being-freed-after-training-is-over/10265/7 """ assert(isinstance(size, torch.Size)) return " × ".join(map(str, size))
006ae05ce22653bfe58a5791aa5912de7283d9ca
701,574
from typing import Tuple def convert_time(time: str, ampm: str) -> Tuple[int, int]: """Convert time given "HH:MM" to 24h format. Args: time (str): a time like "12:00" without ampm ampm (str): either "am" or "pm" Returns: Tuple[int, int]: (hour, minute) in 24h time format """ hour, minute = [int(n) for n in time.split(':')] hour %= 12 if ampm == 'pm': hour += 12 return hour, minute
b1bd57ea92e82ba629e3ad2733f173dfcc805e9e
701,575
def column_equality(series, col1, col2, comparison='equal', pos_return_val=1, neg_return_val=0): """ Apply to a dataframe row to return a binary feature depending on equality or inequality E.g. df.apply(lambda s: column_match(s, 'day_of_week', 'day_of_sale'), axis=1) to for matching the two. Result is series of positive_return_vals and neg_return_vals. Defaults to """ if comparison == 'equal': if series[col1] == series[col2]: return pos_return_val else: return neg_return_val if comparison == 'unequal': if series[col1] != series[col2]: return pos_return_val else: return neg_return_val
9ec71f5fd3af4a8d89b4cd58a255065ec8352eb2
701,576
import math def get_tile_from_lon_lat(lon: float, lat: float, zoom: int) -> tuple[int, int]: """ Turns a lon/lat measurement into a Slippy map tile at a given zoom. """ # Clamps lon, lat to proper mercator projection values lat = min(lat, 85.0511) lat = max(lat, -85.0511) lon = min(lon, 179.9999) lon = max(lon, -179.9999) lat_rad = math.radians(lat) n = 2.0 ** zoom xtile = int((lon + 180.0) / 360.0 * n) ytile = int((1.0 - math.asinh(math.tan(lat_rad)) / math.pi) / 2.0 * n) return xtile, ytile
cdd542c8a362d54dccb8760278b22a17f5df57f9
701,577
def get_cincinnati_channels(major, minor): """ :param major: Major for release :param minor: Minor version for release. :return: Returns the Cincinnati graph channels associated with a release in promotion order (e.g. candidate -> stable) """ major = int(major) minor = int(minor) if major != 4: raise IOError('Unable to derive previous for non v4 major') prefixes = ['candidate', 'fast', 'stable'] if major == 4 and minor == 1: prefixes = ['prerelease', 'stable'] return [f'{prefix}-{major}.{minor}' for prefix in prefixes]
e57ad8d26ea0a397e8c3f9edc99174f78b506564
701,580
def lagrange_four_point(x, y0, y1, y2, y3): """The third order polynomial p(x) with p(-1)=y0, p(0)=y1, p(1)=y2, p(2)=y3.""" a2 = 3 * (y0 + y2 - y1 - y1) a3 = 3 * (y1 - y2) + y3 - y0 a1 = -a3 + 3 * (y2 - y0) return y1 + x * (a1 + x * (a2 + x * a3)) * 0.166666666666666666666666
b60da1f8567c5b9babbc9e158b1444e30424bb1f
701,581
import uuid def create_filename(prefix='', ext=''): """ Create a unique filename. :param str prefix: Prefix to add to filename. :param str ext: Extension to append to filename, e.g. 'jpg' :return: Unique filename. :rtype: str """ suffix = '.' + ext if ext else '' return prefix + str(uuid.uuid4()) + suffix
30cedc7bdcf3fdbf202b8a0d26e64bd6f865094d
701,586
def std_secao_filter(secao_list): """ Takes a words list from a secao filter and standardize the words to the same pattern as the one used to download the articles' URLs: extra -> e and suplemento -> a. """ return [str(s).lower().replace('extra','e').replace('suplemento','a') for s in secao_list]
6c5b1a52bec02078cd8c0e1dc35c97420a424936
701,591
import re def generate_layer_name(layer): """ Generates unique name for layer. Parameters ---------- layer : BaseLayer Returns ------- str """ cls = layer.__class__ layer_id = cls.global_identifiers_map[cls] cls.global_identifiers_map[cls] += 1 classname = cls.__name__ if classname.isupper(): layer_name = classname.lower() else: layer_name = re.sub(r'(?<!^)(?=[A-Z][a-z_])', '-', classname) return "{}-{}".format(layer_name.lower(), layer_id)
8cce0bf0c68601dcbed2c0852a563243cd818743
701,594
def recv_meas_outcome(socket): """Receive the measurement outcome (0 or 1) of the server's last measurement. """ return int(socket.recv(maxsize=1))
f49717272722be1476cb9bcc08bcbe7b8525c2ba
701,597
import base64 def make_basic_auth_header(username, password): """ create a basic authentication header :param username: user name [unicode on py2, str on py3] :param password: password [unicode on py2, str on py3] :return: basic auth header [str on py2, str on py3] """ # note: the coding dance in the next lines is to make sure we get str type # on python 2 as well as on python 3 as str is the type we get in the auth # object when practically running with a real web server. user_pass = u'%s:%s' % (username, password) return 'Basic ' + str(base64.b64encode(user_pass.encode('utf-8')).decode('ascii'))
69900bbc73a4df8e0f2f932a30e6acdb08cb9c4d
701,598
from typing import Union def alfa_key(alfa: Union[str, int]) -> Union[str, None]: """ Return the numeric value of a possible alfanumeric key name. See "alfanumeric key names". Parameters ---------- alfa : str | int The package name from "alfa numeric names" list or the correspondent index Returns ------- alfa_code : str The correspondent alfa numeric code Raises ------ TypeError If `alfa` type isn't str or int Examples -------- >>> alfa_key('A') 29 >>> alfa_key(10) 29 """ alfa_key_list = [ ['0', 7], ['1', 8], ['2', 9], ['3', 10], ['4', 11], ['5', 12], ['6', 13], ['7', 14], ['8', 15], ['9', 16], ['A', 29], ['B', 30], ['C', 31], ['D', 32], ['E', 33], ['F', 34], ['G', 35], ['H', 36], ['I', 37], ['J', 38], ['K', 39], ['L', 40], ['M', 41], ['N', 42], ['O', 43], ['P', 44], ['Q', 45], ['R', 46], ['S', 47], ['T', 48], ['U', 49], ['V', 50], ['W', 51], ['X', 52], ['Y', 53], ['Z', 54]] if type(alfa) == int: return(alfa_key_list[int(alfa)][1]) elif type(alfa) == str: for index in range(len(alfa_key_list)): if str(alfa_key_list[index][0]) == alfa: alfa_code = alfa_key_list[index][1] return(alfa_code) else: raise TypeError()
f800ec179075f00264d3d797c3df6bcf1e6b80da
701,599
def word_to_col(w): """Splits a hexadecimal string to a bytes column. Parameters ---------- w : str Hexadecimal 32-bit word. Returns ------- list 4 bytes column containing integers representing the input string. """ x = int(w, 16) return [x >> 24, (x >> 16) & 0xff, (x >> 8) & 0xff, x & 0xff]
540dabd4e42eb68ce166f608bfa849b31f1bc2fe
701,600
def _create_documents_per_words(freq_matrix: dict) -> dict: """ Returns a dictionary of words and the number of documents in which they appear. :param freq_matrix: The frequency matrix to be summarized. :return: A dictionary of words and the number of documents in which they appear. """ doc_per_words = dict() for sentence, freq_table in freq_matrix.items(): for word, frequency in freq_table.items(): if word in doc_per_words: doc_per_words[word] += 1 else: doc_per_words[word] = 1 return doc_per_words
3b25081ce3452629de9fdd6afd122bd058ee9acf
701,601