content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def get_data_meta_path(either_file_path: str) -> tuple: """get either a meta o rr binary file path and return both as a tuple Arguments: either_file_path {str} -- path of a meta/binary file Returns: [type] -- (binary_path, meta_path) """ file_stripped = '.'.join(either_file_path.split('.')[:-1]) return tuple([file_stripped + ext for ext in ['.bin', '.meta']])
0456186cd99d5899e2433ac9e44ba0424077bcc0
707,218
def get_local_info(hass): """Get HA's local location config.""" latitude = hass.config.latitude longitude = hass.config.longitude timezone = str(hass.config.time_zone) elevation = hass.config.elevation return latitude, longitude, timezone, elevation
1fdefbad46c7cdb58abdc36f7d8799aa1e4af87c
707,222
def get_word_combinations(word): """ 'one-two-three' => ['one', 'two', 'three', 'onetwo', 'twothree', 'onetwothree'] """ permutations = [] parts = [part for part in word.split(u'-') if part] for count in range(1, len(parts) + 1): for index in range(len(parts) - count + 1): permutations.append(u''.join(parts[index:index+count])) return permutations
5a4c042cc0f3dedb297e2513bf638eac4278e0a6
707,230
def dice_counts(dice): """Make a dictionary of how many of each value are in the dice """ return {x: dice.count(x) for x in range(1, 7)}
427703283b5c0cb621e25f16a1c1f2436642fa9f
707,233
from typing import List def dict_to_kvp(dictionary: dict) -> List[tuple]: """ Converts a dictionary to a list of tuples where each tuple has the key and value of each dictionary item :param dictionary: Dictionary to convert :return: List of Key-Value Pairs """ return [(k, v) for k, v in dictionary.items()]
2b856ebb218884a4975d316bebe27546070f2083
707,236
import re def _get_ip_from_response(response): """ Filter ipv4 addresses from string. Parameters ---------- response: str String with ipv4 addresses. Returns ------- list: list with ip4 addresses. """ ip = re.findall(r'\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b', response) return ip
ac36a3b729b0ce4ba13a6db550a71276319cbd70
707,238
def read_csv_to_data(path: str, delimiter: str = ",", headers: list = []): """A zero-dependancy helper method to read a csv file Given the path to a csv file, read data row-wise. This data may be later converted to a dict of lists if needed (column-wise). Args: path (str): Path to csv file delimiter (str, optional): Delimiter to split the rows by. Defaults to ',' headers: (list, optional): Given header list for a csv file. Defaults to an empty list, which results in the first row being used as a header. Returns: A list of dictionary values (list of rows) representing the file being read """ data = [] with open(path, "r") as f: header = headers if len(headers) == 0: header = f.readline().split(",") for line in f: entry = {} for i, value in enumerate(line.split(",")): entry[header[i].strip()] = value.strip() data.append(entry) return data
f60e163e770680efd1f8944becd79a0dd7ceaa08
707,240
def getInputShape(model): """ Gets the shape when there is a single input. Return: Numeric dimensions, omits dimensions that have no value. eg batch size. """ s = [] for dim in model.input.shape: if dim.value: s.append(dim.value) return tuple(s)
628f61a995784b9be79816a5bbcde2f8204640be
707,243
def kewley_agn_oi(log_oi_ha): """Seyfert/LINER classification line for log([OI]/Ha).""" return 1.18 * log_oi_ha + 1.30
5e6b71742bec307ad609d855cced80ae08e5c35c
707,244
def multiply_str(char, times): """ Return multiplied character in string """ return char * times
cc69f0e16cba1b8c256301567905e861c05291ea
707,247
def calories_per_item(hundr, weight, number_cookies, output_type): """ >>> calories_per_item(430, 0.3, 20, 0) 'One item has 64.5 kcal.' >>> calories_per_item(430, 0.3, 20, 1) 'One item has 64.5 Calories.' >>> calories_per_item(1, 1000, 10, 1) 'One item has 1000.0 Calories.' >>> calories_per_item(1, 1000, 10, 0) 'One item has 1000.0 kcal.' >>> calories_per_item(0, 1000, 10, 0) 'One item has 0.0 kcal.' """ kcal_per_item = hundr * 10 # convert kcal per 100g to kcal per kg unit = 'kcal' if output_type == 1: # change output unit based on input unit = 'Calories' return 'One item has ' + str((kcal_per_item * weight) / number_cookies) + ' ' + unit + '.'
9ca16eee8aa8a81424aeaa30f696fb5bec5e3956
707,248
def solar_true_longitude(solar_geometric_mean_longitude, solar_equation_of_center): """Returns the Solar True Longitude with Solar Geometric Mean Longitude, solar_geometric_mean_longitude, and Solar Equation of Center, solar_equation_of_center.""" solar_true_longitude = solar_geometric_mean_longitude + solar_equation_of_center return solar_true_longitude
a335bb82002846eb2bc2106675c13e9f3ee28900
707,249
def findUsername(data): """Find a username in a Element Args: data (xml.etree.ElementTree.Element): XML from PMS as a Element Returns: username or None """ elem = data.find('User') if elem is not None: return elem.attrib.get('title') return None
f7b6bb816b9eeeca7e865582935a157cdf276928
707,252
def is_valid_table_name(cur, table_name): """ Checks whether a name is for a table in the database. Note: Copied from utils.database for use in testing, to avoid a circular dependency between tests and implementation. Args: cur: sqlite3 database cursor object table_name (str): name to check Returns: True if valid, False otherwise """ query = """ SELECT 1 FROM sqlite_master WHERE type == 'table' AND name == ? """ res = cur.execute(query, (table_name,)) return res.fetchone() is not None
f1efc66220baa215a73f374da19842ab38c619be
707,261
import re def sub_repeatedly(pattern, repl, term): """apply sub() repeatedly until no change""" while True: new_term = re.sub(pattern, repl, term) if new_term == term: return term term = new_term
e57c648fb057f81e35e0fc2d2dc57edd0b400baf
707,262
def create_lexicon(word_tags): """ Create a lexicon in the right format for nltk.CFG.fromString() from a list with tuples with words and their tag. """ # dictionary to filter the double tags word_dict = {} for word, tag in word_tags: if tag not in word_dict: word_dict[tag] = {word} else: word_dict[tag].add(word) # PRO is the tag for 's, but the 's is not removed on nouns. word_dict['NN'] = [x.replace('\'s', '') for x in word_dict['NN']] word_dict['JJ'] = [x.replace('\'s', '') for x in word_dict['JJ']] del word_dict[','] word_dict['PRP'].update(word_dict['PRP$']) del word_dict['PRP$'] word_dict['POS'] = ['"s'] # convert the dictionary to the right NLTK format lexicon = '' for key, val in word_dict.items(): lexicon += key + ' -> ' # add ' ' around every word val = [f'\'{v}\'' for v in val] # the words are seperated by a pipe lexicon += ' | '.join(val) + '\n' return lexicon
3a91671d559f5924ec9326520db6e11a1672fee4
707,263
import ipaddress def ip_only(value): """ Returns only the IP address string of the value provided. The value could be either an IP address, and IP network or and IP interface as defined by the ipaddress module. Parameters ---------- value : str The value to use Returns ------- str The IP address only value, if the value provided was valid None If the value provided is not an IP thing """ for test in [lambda x: str(ipaddress.ip_address(x)), lambda x: str(ipaddress.ip_interface(x).ip), lambda x: str(ipaddress.ip_network(x).network_address)]: try: return test(value) except: pass return None
149b202969c0ccb4e0c5e55417ce0231f1b5fc11
707,265
import random def is_prime(number, num_trials=200): """Determines whether a number is prime. Runs the Miller-Rabin probabilistic primality test many times on the given number. Args: number (int): Number to perform primality test on. num_trials (int): Number of times to perform the Miller-Rabin test. Returns: True if number is prime, False otherwise. """ if number < 2: return False if number != 2 and number % 2 == 0: return False # Find largest odd factor of n-1. exp = number - 1 while exp % 2 == 0: exp //= 2 for _ in range(num_trials): rand_val = int(random.SystemRandom().randrange(1, number)) new_exp = exp power = pow(rand_val, new_exp, number) while new_exp != number - 1 and power != 1 and power != number - 1: power = (power * power) % number new_exp *= 2 if power != number - 1 and new_exp % 2 == 0: return False return True
78478437c08bcbd5e4c690466e4fe51bb4fad5ce
707,269
def str2num(s): """Convert string to int or float number. Parameters ---------- s : string String representing a number. Returns ------- Number (int or float) Raises ------ TypeError If `s` is not a string. ValueError If the string does not represent a (float or int) number. """ try: x = float(s) if x.is_integer(): return int(x) else: return x except ValueError: raise ValueError("'s' does not represent a number (int or float)")
5dfaed567a66fc7d3ee46cbb70d9c408d38fcbfe
707,271
from typing import OrderedDict import inspect def _get_new_args_dict(func, args, kwargs): """Build one dict from args, kwargs and function default args The function signature is used to build one joint dict from args and kwargs and additional from the default arguments found in the function signature. The order of the args in this dict is the order of the args in the function signature and hence the list of args can be used in cases where we can only supply *args, but we have to work with a mixture of args, kwargs and default args as in xarray.apply_ufunc in the xarray wrapper. """ new_args_dict = OrderedDict() for i, (arg, parameter) in enumerate(inspect.signature(func).parameters.items()): if i < len(args): new_args_dict[arg] = args[i] elif arg in kwargs.keys(): new_args_dict[arg] = kwargs[arg] else: new_args_dict[arg] = parameter.default return new_args_dict
ad7553e7b778b8f7b499217c7ee4ad7328958809
707,273
def num_list(to_parse): """ Creates list from its string representation Arguments: to_parse {string} -- String representation of list, can include 'None' or internal lists, represented by separation with '#' Returns: list[int] -- List represented in to_parse """ if len(to_parse) == 2: return [] inter = to_parse[1:-1] inter = [x.strip() for x in inter.split(',')] result = [] for n in inter: if n == "None": result.append(None) elif "#" in n: result.append([int(x) for x in n.split("#")]) else: result.append(int(n)) return result
b444554e37434b5ae42ebc913bcc0f9b99c65ce9
707,275
def read_ground_stations_extended(filename_ground_stations_extended): """ Reads ground stations from the input file. :param filename_ground_stations_extended: Filename of ground stations basic (typically /path/to/ground_stations.txt) :return: List of ground stations """ ground_stations_extended = [] gid = 0 with open(filename_ground_stations_extended, 'r') as f: for line in f: split = line.split(',') if len(split) != 8: raise ValueError("Extended ground station file has 8 columns: " + line) if int(split[0]) != gid: raise ValueError("Ground station id must increment each line") ground_station_basic = { "gid": gid, "name": split[1], "latitude_degrees_str": split[2], "longitude_degrees_str": split[3], "elevation_m_float": float(split[4]), "cartesian_x": float(split[5]), "cartesian_y": float(split[6]), "cartesian_z": float(split[7]), } ground_stations_extended.append(ground_station_basic) gid += 1 return ground_stations_extended
2492dc8d5c55f124696aafbec11d74e609c3f397
707,281
import ast def get_module_docstring(path): """get a .py file docstring, without actually executing the file""" with open(path) as f: return ast.get_docstring(ast.parse(f.read()))
e253372bfb6f65907a5461332d14c414c2370c66
707,283
def transform(f, a, b, c, d): """ Transform a given function linearly. If f(t) is the original function, and a, b, c, and d are the parameters in order, then the return value is the function F(t) = af(cx + d) + b """ return lambda x: a * f(c * x + d) + b
a47b3f4f3dc1e3ed5ddb6155bcd67b8297c298ed
707,284
import json def load_config(path='config.json'): """ Loads configruation from config.json file. Returns station mac address, interval, and units for data request """ # Open config JSON with open(path) as f: # Load JSON file to dictionary config = json.load(f) # Return mac address, interval, and units return (config['station_max_address'], int(config['interval']), config['units'])
5522f023ed3293149613dcc2dc007e34d50f3fa8
707,288
import torch def log_px_z(pred_logits, outcome): """ Returns Bernoulli log probability. :param pred_logits: logits for outcome 1 :param outcome: datapoint :return: log Bernoulli probability of outcome given logits in pred_logits """ pred = pred_logits.view(pred_logits.size(0), -1) y = outcome.view(outcome.size(0), -1) return -torch.sum(torch.max(pred, torch.tensor(0., device=pred.device)) - pred * y + torch.log(1 + torch.exp(-torch.abs(pred))), 1)
6369d893cc9bfe5c3f642f819511798d01ae3ae9
707,289
import functools def partial_at(func, indices, *args): """Partial function application for arguments at given indices.""" @functools.wraps(func) def wrapper(*fargs, **fkwargs): nargs = len(args) + len(fargs) iargs = iter(args) ifargs = iter(fargs) posargs = (next((ifargs, iargs)[i in indices]) for i in range(nargs)) # posargs = list( posargs ) # print( 'posargs', posargs ) return func(*posargs, **fkwargs) return wrapper
1b45e0bd8baea869d80c6b5963c6063f6b8fbdd4
707,290
def augment_features(data, feature_augmentation): """ Augment features for a given data matrix. :param data: Data matrix. :param feature_augmentation: Function applied to augment the features. :return: Augmented data matrix. """ if data is not None and feature_augmentation is not None: if isinstance(feature_augmentation, list): for augmentation_function in feature_augmentation: data = augmentation_function(data) else: data = feature_augmentation(data) return data
687a7ff2a4b61131f5d95e1f7d6eb77d75bd6f06
707,291
def copy_keys_except(dic, *keys): """Return a copy of the dict without the specified items. """ ret = dic.copy() for key in keys: try: del ret[key] except KeyError: pass return ret
b1e57db9dbacbc2a7c502c36082f40598a0f4b90
707,292
import random import math def get_params(img, scale, ratio): """Get parameters for ``crop`` for a random sized crop. Args: img (PIL Image): Image to be cropped. scale (tuple): range of size of the origin size cropped ratio (tuple): range of aspect ratio of the origin aspect ratio cropped Returns: tuple: params (i, j, h, w) to be passed to ``crop`` for a random sized crop. """ area = img.size[0] * img.size[1] for attempt in range(10): target_area = random.uniform(*scale) * area log_ratio = (math.log(ratio[0]), math.log(ratio[1])) aspect_ratio = math.exp(random.uniform(*log_ratio)) w = int(round(math.sqrt(target_area * aspect_ratio))) h = int(round(math.sqrt(target_area / aspect_ratio))) if w <= img.size[0] and h <= img.size[1]: i = random.randint(0, img.size[1] - h) j = random.randint(0, img.size[0] - w) return i, j, h, w # Fallback to central crop in_ratio = img.size[0] / img.size[1] if in_ratio < min(ratio): w = img.size[0] h = int(round(w / min(ratio))) elif in_ratio > max(ratio): h = img.size[1] w = int(round(h * max(ratio))) else: # whole image w = img.size[0] h = img.size[1] i = (img.size[1] - h) // 2 j = (img.size[0] - w) // 2 return i, j, h, w
80838328fc9383731e1a853c8dc572228d1a4567
707,293
def getlineno(frame): """Get the line number from a frame object, allowing for optimization.""" # FrameType.f_lineno is now a descriptor that grovels co_lnotab return frame.f_lineno
b8c8d6fb3ebb8784d10250a42526b31e185e9b7a
707,295
def get_or_create(session, model, **kwargs): """ Creates and returns an instance of the model with given kwargs, if it does not yet exist. Otherwise, get instance and return. Parameters: session: Current database session model: The Class of the database model **kwargds: The attributes for the desired instance Returns: (object): An object instance of the model with given kwargs """ instance = session.query(model).filter_by(**kwargs).first() if instance: return instance else: instance = model(**kwargs) session.add(instance) return instance
4d3e4f0da5ca61789171db5d8d16a5fa06e975cc
707,301
def soup_extract_enzymelinks(tabletag): """Extract all URLs for enzyme families from first table.""" return {link.string: link['href'] for link in tabletag.find_all("a", href=True)}
7baabd98042ab59feb5d8527c18fe9fa4b6a50af
707,306
def custom_field_sum(issues, custom_field): """Sums custom field values together. Args: issues: List The issue list from the JQL query custom_field: String The custom field to sum. Returns: Integer of the sum of all the found values of the custom_field. """ custom_field_running_total = 0 for issue in issues: if getattr(issue.fields, custom_field) is None: custom_field_running_total = custom_field_running_total + 2 else: custom_field_running_total = custom_field_running_total + \ getattr(issue.fields, custom_field) return custom_field_running_total
32c1cce310c06f81036ee79d70a8d4bbe28c8417
707,307
import pathlib def list_files(directory): """Returns all files in a given directory """ return [f for f in pathlib.Path(directory).iterdir() if f.is_file() and not f.name.startswith('.')]
a8c5fea794198c17c2aff41a1a07009984a8e61f
707,309
from typing import Union def score_normalization(extracted_score: Union[str, None]): """ Sofa score normalization. If available, returns the integer value of the SOFA score. """ score_range = list(range(0, 30)) if (extracted_score is not None) and (int(extracted_score) in score_range): return int(extracted_score)
74501e9351296037ecc90ae647155e3c6b76ae01
707,310
def clap_convert(txt): """convert string of clap values on medium to actualy number Args: txt (str): claps values Returns: number on claps (int) """ # Medium annotation if txt[-1] == "K": output = int(float(txt[:-1]) * 1000) return output else: return int(txt)
253e0e2be4f37f1994637bbfc80edfc5d72bc4e5
707,314
def effective_area(true_energy, reco_energy, simu_area): """ Compute the effective area from a list of simulated energy and reconstructed energy Parameters ---------- true_energy: 1d numpy array reco_energy: 1d numpy array simu_area: float - area on which events are simulated Returns ------- float = effective area """ return simu_area * len(reco_energy) / len(true_energy)
b17efa390a1ae14bb8ecb959740bad8c391b1d2e
707,320
def fmt(n): """format number with a space in front if it is single digit""" if n < 10: return " " + str(n) else: return str(n)
976acc22cafd6d6bdb4e251853f49a114b63ec21
707,324
def _always_run(*args, **kwargs) -> bool: """ This returns False to indicate that the step is not already completed. """ return False
db31e0ac20ac0eef410fb051928308ce7414f5b6
707,327
def has_anonymous_link(node, auth): """check if the node is anonymous to the user :param Node node: Node which the user wants to visit :param str link: any view-only link in the current url :return bool anonymous: Whether the node is anonymous to the user or not """ if auth.private_link: return auth.private_link.anonymous return False
c5941bce3f0110dfcd5e9bbb19bae0682c5e731f
707,328
def _causes_name_clash(candidate, path_list, allowed_occurences=1): """Determine if candidate leads to a name clash. Args: candidate (tuple): Tuple with parts of a path. path_list (list): List of pathlib.Paths. allowed_occurences (int): How often a name can occur before we call it a clash. Returns: bool """ duplicate_counter = -allowed_occurences for path in path_list: parts = tuple(reversed(path.parts)) if len(parts) >= len(candidate) and parts[: len(candidate)] == candidate: duplicate_counter += 1 return duplicate_counter > 0
3b874e4ea6d8780483100e464e3325321c82689e
707,331
import fsspec def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool: """ Validates if filesystem has remote protocol. Args: fs (``fsspec.spec.AbstractFileSystem``): An abstract super-class for pythonic file-systems, e.g. :code:`fsspec.filesystem(\'file\')` or :class:`datasets.filesystems.S3FileSystem` """ if fs is not None and fs.protocol != "file": return True else: return False
c40f9bb4845bbd1fc1a4cf9fce2c1b366cd22354
707,336
def get_element_attribute_or_empty(element, attribute_name): """ Args: element (element): The xib's element. attribute_name (str): The desired attribute's name. Returns: The attribute's value, or an empty str if none exists. """ return element.attributes[attribute_name].value if element.hasAttribute(attribute_name) else ""
dbc7f5c24d321c40b46f1c78950d7cf254719b5c
707,337
def escape(line, chars): """Escapes characters 'chars' with '\\' in 'line'.""" def esc_one_char(ch): if ch in chars: return "\\" + ch else: return ch return u"".join([esc_one_char(ch) for ch in line])
f69409c92eacbbcab4232f7bb0ee244c77a4f219
707,339
def hex2int(s: str): """Convert a hex-octets (a sequence of octets) to an integer""" return int(s, 16)
ecdb3152f8c661c944edd2811d016fce225c3d51
707,345
def get_mention_token_dist(m1, m2): """ Returns distance in tokens between two mentions """ succ = m1.tokens[0].doc_index < m2.tokens[0].doc_index first = m1 if succ else m2 second = m2 if succ else m1 return max(0, second.tokens[0].doc_index - first.tokens[-1].doc_index)
84052f805193b1d653bf8cc22f5d37b6f8de66f4
707,352
def FindDescendantComponents(config, component_def): """Return a list of all nested components under the given component.""" path_plus_delim = component_def.path.lower() + '>' return [cd for cd in config.component_defs if cd.path.lower().startswith(path_plus_delim)]
f9734442bbe3a01460970b3521827dda4846f448
707,353
from bs4 import BeautifulSoup def create_bs4_obj(connection): """Creates a beautiful Soup object""" soup = BeautifulSoup(connection, 'html.parser') return soup
b3956b13756e29cd57a0e12457a2d665959fb03d
707,355
import toml def parse_config_file(path): """Parse TOML config file and return dictionary""" try: with open(path, 'r') as f: return toml.loads(f.read()) except: open(path,'a').close() return {}
599164f023c0db5bffa0b6c4de07654daae1b995
707,356
def base_round(x, base): """ This function takes in a value 'x' and rounds it to the nearest multiple of the value 'base'. Parameters ---------- x : int Value to be rounded base : int Tase for x to be rounded to Returns ------- int The rounded value """ return base*round(x/base)
e5b1a1b81c7baf990b7921fe27a20075c0305935
707,358
def encode_integer_leb128(value: int) -> bytes: """Encode an integer with signed LEB128 encoding. :param int value: The value to encode. :return: ``value`` encoded as a variable-length integer in LEB128 format. :rtype: bytes """ if value == 0: return b"\0" # Calculate the number of bits in the integer and round up to the nearest multiple # of 7. We need to add 1 bit because bit_length() only returns the number of bits # required to encode the magnitude, but not the sign. n_bits = value.bit_length() + 1 if n_bits % 7: n_bits += 7 - (n_bits % 7) # Bit operations force a negative integer to its unsigned two's-complement # representation, e.g. -127 & 0xff = 0x80, -10 & 0xfff = 0xff6, etc. We use this to # sign-extend the number *and* make it unsigned. Once it's unsigned, we can use # ULEB128. mask = (1 << n_bits) - 1 value &= mask output = bytearray(n_bits // 7) for i in range(n_bits // 7): output[i] = 0x80 | (value & 0x7F) value >>= 7 # Last byte shouldn't have the high bit set. output[-1] &= 0x7F return bytes(output)
b74832115a58248f4a45a880f657de6dd38b0d8d
707,360
def expose(policy): """ Annotate a method to permit access to contexts matching an authorization policy. The annotation may be specified multiple times. Methods lacking any authorization policy are not accessible. :: @mitogen.service.expose(policy=mitogen.service.AllowParents()) def unsafe_operation(self): ... :param mitogen.service.Policy policy: The policy to require. """ def wrapper(func): func.mitogen_service__policies = [policy] + getattr( func, "mitogen_service__policies", [] ) return func return wrapper
74caed36885e5ea947a2ecdac9a2cddf2f5f51b0
707,366
import base64 def didGen(vk, method="dad"): """ didGen accepts an EdDSA (Ed25519) key in the form of a byte string and returns a DID. :param vk: 32 byte verifier/public key from EdDSA (Ed25519) key :param method: W3C did method string. Defaults to "dad". :return: W3C DID string """ if vk is None: return None # convert verkey to jsonable unicode string of base64 url-file safe vk64u = base64.urlsafe_b64encode(vk).decode("utf-8") return "did:{0}:{1}".format(method, vk64u)
9991491ab486d8960633190e3d3baa9058f0da50
707,367
from typing import Dict from pathlib import Path import json def load_json(filename: str) -> Dict: """Read JSON file from metadata folder Args: filename: Name of metadata file Returns: dict: Dictionary of data """ filepath = ( Path(__file__).resolve().parent.parent.joinpath("metadata").joinpath(filename) ) metadata: Dict = json.loads(filepath.read_text()) return metadata
37d9f08344cf2a544c12fef58992d781556a9efd
707,369
def get_short_size(size_bytes): """ Get a file size string in short format. This function returns: "B" size (e.g. 2) when size_bytes < 1KiB "KiB" size (e.g. 345.6K) when size_bytes >= 1KiB and size_bytes < 1MiB "MiB" size (e.g. 7.8M) when size_bytes >= 1MiB size_bytes: File size in bytes """ if size_bytes < 1024: return str(size_bytes) if size_bytes < 1048576: return f"{size_bytes / 1024:.1f}K" return f"{size_bytes / 1048576:.1f}M"
ebc9ba25c01dedf0d15b9e2a21b67989763bc8c8
707,370
import pathlib def path_check(path_to_check): """ Check that the path given as a parameter is an valid absolute path. :param path_to_check: string which as to be checked :type path_to_check: str :return: True if it is a valid absolute path, False otherwise :rtype: boolean """ path = pathlib.Path(path_to_check) if not path.is_absolute(): return False return True
41b3537b0be2c729ba993a49863df4a15119db8b
707,373
def gen_endpoint(endpoint_name, endpoint_config_name): """ Generate the endpoint resource """ endpoint = { "SagemakerEndpoint": { "Type": "AWS::SageMaker::Endpoint", "DependsOn": "SagemakerEndpointConfig", "Properties": { "EndpointConfigName": { "Fn::GetAtt": ["SagemakerEndpointConfig", "EndpointConfigName"] }, "EndpointName": endpoint_name, "RetainAllVariantProperties": False, }, }, } return endpoint
bc658e6aebc41cfddefe0e77b2d65748a84789c5
707,375
import random def summary_selector(summary_models=None): """ Will create a function that take as input a dict of summaries : {'T5': [str] summary_generated_by_T5, ..., 'KW': [str] summary_generted_by_KW} and randomly return a summary that has been generated by one of the summary_model in summary_model if summary_models is none, will not use summaru :param summary_models: list of str(SummarizerModel) :return: function [dict] -> [str] """ if summary_models is None or len(summary_models) == 0 or \ (len(summary_models) == 1 and summary_models[0] == ""): return lambda x: "" summary_model = random.choice(summary_models) return lambda summaries_dict: summaries_dict[summary_model]
b8a2336546324d39ff87ff5b59f4f1174e5dd54c
707,380
from typing import Match def _replace_fun_unescape(m: Match[str]) -> str: """ Decode single hex/unicode escapes found in regex matches. Supports single hex/unicode escapes of the form ``'\\xYY'``, ``'\\uYYYY'``, and ``'\\UYYYYYYYY'`` where Y is a hex digit. Only decodes if there is an odd number of backslashes. .. versionadded:: 0.2 Parameters ---------- m : regex match Returns ------- c : str The unescaped character. """ slsh = b'\\'.decode('ascii') s = m.group(0) count = s.count(slsh) if count % 2 == 0: return s else: c = chr(int(s[(count + 1):], base=16)) return slsh * (count - 1) + c
3fdb275e3c15697e5302a6576b4d7149016299c0
707,382
def check_answer(guess, a_follower, b_follower): """Chcek if the user guessed the correct option""" if a_follower > b_follower: return guess == "a" else: return guess == "b"
acd1e78026f89dd1482f4471916472d35edf68a7
707,384
from typing import Dict def canonical_for_code_system(jcs: Dict) -> str: """get the canonical URL for a code system entry from the art decor json. Prefer FHIR URIs over the generic OID URI. Args: jcs (Dict): the dictionary describing the code system Returns: str: the canonical URL """ if "canonicalUriR4" in jcs: return jcs["canonicalUriR4"] else: return jcs["canonicalUri"]
f111a4cb65fa75799e799f0b088180ef94b71cc8
707,386
def escape_html(text: str) -> str: """Replaces all angle brackets with HTML entities.""" return text.replace('<', '&lt;').replace('>', '&gt;')
f853bcb3a69b8c87eb3d4bcea5bbca66376c7db4
707,390
import torch def calc_driver_mask(n_nodes, driver_nodes: set, device='cpu', dtype=torch.float): """ Calculates a binary vector mask over graph nodes with unit value on the drive indeces. :param n_nodes: numeber of driver nodes in graph :param driver_nodes: driver node indeces. :param device: the device of the `torch.Tensor` :param dtype: the data type of the `torch.Tensor` :return: the driver mask vector. """ driver_mask = torch.zeros(n_nodes, device=device, dtype=dtype) driver_mask[list(driver_nodes)] = 1 return driver_mask
2d2a08a86629ece190062f68dd25fc450d0fd84e
707,391
def open_file(name): """ Return an open file object. """ return open(name, 'r')
8921ee51e31ac6c64d9d9094cedf57502a2aa436
707,392
import math def _bit_length(n): """Return the number of bits necessary to store the number in binary.""" try: return n.bit_length() except AttributeError: # pragma: no cover (Python 2.6 only) return int(math.log(n, 2)) + 1
bea6cb359c7b5454bdbb1a6c29396689035592d7
707,393
def with_metaclass(meta, *bases): """A Python 2/3 compatible way of declaring a metaclass. Taken from `Jinja 2 <https://github.com/mitsuhiko/jinja2/blob/master/jinja2 /_compat.py>`_ via `python-future <http://python-future.org>`_. License: BSD. Use it like this:: class MyClass(with_metaclass(MyMetaClass, BaseClass)): pass """ class _Metaclass(meta): """Inner class""" __call__ = type.__call__ __init__ = type.__init__ def __new__(cls, name, this_bases, attrs): if this_bases is None: return type.__new__(cls, name, (), attrs) return meta(name, bases, attrs) return _Metaclass(str('temporary_class'), None, {})
0fe8e95fe29821e4cda8b66ff54ddd1b73e51243
707,396
from typing import List def join_with_and(words: List[str]) -> str: """Joins list of strings with "and" between the last two.""" if len(words) > 2: return ", ".join(words[:-1]) + ", and " + words[-1] elif len(words) == 2: return " and ".join(words) elif len(words) == 1: return words[0] else: return ""
ecb2c1fa060657f2ea4173c4382a81c9b42beeb9
707,397
def is_url_relative(url): """ True if a URL is relative, False otherwise. """ return url[0] == "/" and url[1] != "/"
91e1cb756a4554973e53fd1f607515577bc63294
707,398
import math def to_half_life(days): """ Return the constant [1/s] from the half life length [day] """ s= days * 3600*24 return -math.log(1/2)/s
af7724dfb9442bf1f5e931df5dd39b31d0e78091
707,407
import inspect def get_default_args(func): """ Return dict for parameter name and default value. Parameters ---------- func : Callable A function to get parameter name and default value. Returns ------- Dict Parameter name and default value. Examples -------- >>> def test_func(a: int, b: str = "c") -> int: ... return a+1 >>> get_default_args(test_func) {'b': 'c'} >>> def test_func2(a: int = 1, b="c") -> int: ... return a+1 >>> get_default_args(test_func2) {'a': 1, 'b': 'c'} """ signature = inspect.signature(func) return { k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty }
dcc75dceae1385868866d668aa021584547190df
707,409
def sec_to_time(seconds): """Transform seconds into a formatted time string. Parameters ----------- seconds : int Seconds to be transformed. Returns ----------- time : string A well formatted time string. """ m, s = divmod(seconds, 60) h, m = divmod(m, 60) return "%02d:%02d:%02d" % (h, m, s)
59fcfe2f53d11ea7daac736b59b5eaeb72172dba
707,410
import base64 def create_api_headers(token): """ Create the API header. This is going to be sent along with the request for verification. """ auth_type = 'Basic ' + base64.b64encode(bytes(token + ":")).decode('ascii') return { 'Authorization': auth_type, 'Accept': 'application/json', 'Content-Type': 'application/json' }
41ba1e22898dab2d42dde52e4458abc40640e957
707,420
import re def normalize_archives_url(url): """ Normalize url. will try to infer, find or guess the most useful archives URL, given a URL. Return normalized URL, or the original URL if no improvement is found. """ # change new IETF mailarchive URLs to older, still available text .mail archives new_ietf_exp = re.compile( "https://mailarchive\\.ietf\\.org/arch/search/" "\\?email_list=(?P<list_name>[\\w-]+)" ) ietf_text_archives = ( r"https://www.ietf.org/mail-archive/text/\g<list_name>/" ) new_ietf_browse_exp = re.compile( r"https://mailarchive.ietf.org/arch/browse/(?P<list_name>[\w-]+)/?" ) match = new_ietf_exp.match(url) if match: return re.sub(new_ietf_exp, ietf_text_archives, url) match = new_ietf_browse_exp.match(url) if match: return re.sub(new_ietf_browse_exp, ietf_text_archives, url) return url
e8a5351af28338c77c3e94fdf2b81e22c7a6edfd
707,422
def getIsolatesFromIndices(indices): """ Extracts the isolates from the indices of a df_X. :param pandas.index indices: cn.KEY_ISOLATE_DVH, cn.KEY_ISOLATE_MMP :return dict: keyed by cn.KEY_ISOLATE_DVH, cn.KEY_ISOLATE_MMP values correspond to rows element in the index """ keys = [n for n in indices.names] result = {} for idx, key in enumerate(keys): result[key] = [v[idx] for v in indices.values] return result
4e9200c722ce0c478d13eddcc799f4a8f7cab6db
707,423
import ast from typing import Optional def get_qualname(node: ast.AST) -> Optional[str]: """ If node represents a chain of attribute accesses, return is qualified name. """ parts = [] while True: if isinstance(node, ast.Name): parts.append(node.id) break elif isinstance(node, ast.Attribute): parts.append(node.attr) node = node.value else: return None return '.'.join(reversed(parts))
0d08b25a50b7d159f5df3b0b17282725eb748f38
707,425
def get_reset_state_name(t_fsm): """ Returns the name of the reset state. If an .r keyword is specified, that is the name of the reset state. If the .r keyword is not present, the first state defined in the transition table is the reset state. :param t_fsm: blifparser.BlifParser().blif.fsm object :return str reset_state: name of the reset state """ reset_state = None if t_fsm.r is None: if len(t_fsm.transtable) > 0: reset_state = t_fsm.transtable[0][1] else: reset_state = t_fsm.r.name return reset_state
c65ea80f94f91b31a179faebc60a97f7260675c4
707,427
def border_msg(msg: str): """ This function creates boarders in the top and bottom of text """ row = len(msg) h = ''.join(['+'] + ['-' * row] + ['+']) return h + "\n" + msg + "\n" + h
cdd9d17ba76014f4c80b9c429aebbc4ca6f959c3
707,428
def _qual_arg(user_value, python_arg_name, gblock_arg_name, allowable): """ Construct and sanity check a qualitative argument to send to gblocks. user_value: value to try to send to gblocks python_arg_name: name of python argument (for error string) gblock_arg_name: name of argument in gblocks allowable: dictionary of allowable values mapping python to whatever should be jammed into gblocks """ if user_value in allowable.keys(): return "-{}={}".format(gblock_arg_name,allowable[user_value]) else: err = "\n\n{} '{}' not recognized\n".format(python_arg_name, user_value) err += "must be one of:\n" allowed = list(allowable) allowed.sort() for a in allowed: err += " {}\n".format(a) raise ValueError(err)
7bf6717ee3dbeb533902773c86316d2bbdcd59a9
707,430
def overlapping_community(G, community): """Return True if community partitions G into overlapping sets. """ community_size = sum(len(c) for c in community) # community size must be larger to be overlapping if not len(G) < community_size: return False # check that the set of nodes in the communities is the same as G if not set(G) == set.union(*community): return False return True
da9e3465c6351df0efd19863e579c49bbc6b9d67
707,432
from typing import Any def linear_search(lst: list, x: Any) -> int: """Return the index of the first element of `lst` equal to `x`, or -1 if no elements of `lst` are equal to `x`. Design idea: Scan the list from start to finish. Complexity: O(n) time, O(1) space. For an improvement on linear search for sorted lists, see the binary search function in the decrease_and_conquer module. """ for i, y in enumerate(lst): if x == y: return i return -1
47e73d53ff68954aadc6d0e9e293643717a807d8
707,434
def checksum(number): """Calculate the checksum. A valid number should have a checksum of 1.""" check = 0 for n in number: check = (2 * check + int(10 if n == 'X' else n)) % 11 return check
8ada40ca46bc62bbe8f96d69528f2cd88021ad6a
707,437
def instanceof(value, type_): """Check if `value` is an instance of `type_`. :param value: an object :param type_: a type """ return isinstance(value, type_)
3de366c64cd2b4fe065f15de10b1e6ac9132468e
707,438
def is_fouling_team_in_penalty(event): """Returns True if fouling team over the limit, else False""" fouls_to_give_prior_to_foul = event.previous_event.fouls_to_give[event.team_id] return fouls_to_give_prior_to_foul == 0
ac1578af1092586a30b8fc9cdb3e5814da1f1544
707,440
import warnings def lmc(wave, tau_v=1, **kwargs): """ Pei 1992 LMC extinction curve. :param wave: The wavelengths at which optical depth estimates are desired. :param tau_v: (default: 1) The optical depth at 5500\AA, used to normalize the attenuation curve. :returns tau: The optical depth at each wavelength. """ if (wave < 1e3).any(): warnings.warn('LMC: extinction extrapolation below 1000AA is poor') mic = wave * 1e-4 aa = [175., 19., 0.023, 0.005, 0.006, 0.020] ll = [0.046, 0.08, 0.22, 9.7, 18., 25.] bb = [90., 5.50, -1.95, -1.95, -1.80, 0.00] nn = [2.0, 4.5, 2.0, 2.0, 2.0, 2.0] abs_ab = mic * 0. norm_v = 0 # hack to go from tau_b to tau_v mic_5500 = 5500 * 1e-4 for i, a in enumerate(aa): norm_v += aa[i] / ((mic_5500 / ll[i])**nn[i] + (ll[i] / mic_5500)**nn[i] + bb[i]) abs_ab += aa[i] / ((mic / ll[i])**nn[i] + (ll[i] / mic)**nn[i] + bb[i]) return tau_v * (abs_ab / norm_v)
04c89605e8ad4188c62b631e173a9c8fe714958a
707,441
import math def sigmoid(num): """ Find the sigmoid of a number. :type number: number :param number: The number to find the sigmoid of :return: The result of the sigmoid :rtype: number >>> sigmoid(1) 0.7310585786300049 """ # Return the calculated value return 1 / (1 + math.exp(-num))
73730a39627317011d5625ab85c146b6bd7793d8
707,444
def get_name_and_version(requirements_line: str) -> tuple[str, ...]: """Get the name a version of a package from a line in the requirement file.""" full_name, version = requirements_line.split(" ", 1)[0].split("==") name_without_extras = full_name.split("[", 1)[0] return name_without_extras, version
424b3c3138ba223610fdfa1cfa6d415b8e31aff3
707,445
import re def is_valid_dump_key(dump_key): """ True if the `dump_key` is in the valid format of "database_name/timestamp.dump" """ regexmatch = re.match( r'^[\w-]+/\d{4}_\d{2}_\d{2}_\d{2}_\d{2}_\d{2}_\d+\.\w+\.dump$', dump_key, ) return regexmatch
66fd7d465f641a96bd8b22e95918a6dcbefef658
707,448
def reverse_dict2(d): """Reverses direction of dependence dict >>> d = {'a': (1, 2), 'b': (2, 3), 'c':()} >>> reverse_dict(d) # doctest: +SKIP {1: ('a',), 2: ('a', 'b'), 3: ('b',)} :note: dict order are not deterministic. As we iterate on the input dict, it make the output of this function depend on the dict order. So this function output order should be considered as undeterministic. """ result = {} for key in d: for val in d[key]: result[val] = result.get(val, tuple()) + (key, ) return result
2419538a13699015f8fefa156e89cf9b1960e358
707,450
import random def Flip(p, y='Y', n='N'): """Returns y with probability p; otherwise n.""" return y if random.random() <= p else n
072e170e3f37508a04f8bdbed22470b178f05ab9
707,451
def addneq_parse_residualline(line: str) -> dict: """ Parse en linje med dagsløsningsresidualer fra en ADDNEQ-fil. Udtræk stationsnavn, samt retning (N/E/U), spredning og derefter et vilkårligt antal døgnresidualer. En serie linjer kan se således ud: GESR N 0.07 0.02 -0.06 GESR E 0.10 -0.00 -0.10 GESR U 0.23 -0.10 0.20 """ params = line.split() return { "STATION NAME": params[0], "DIRECTION": params[1], "STDDEV": float(params[2]), "RES": [float(x) for x in params[3:]], }
6d1556cbd01f3fe4cd66dcad231e41fa6b1b9470
707,453
def fahrenheit_to_celsius(fahrenheit): """Convert a Fahrenheit temperature to Celsius.""" return (fahrenheit - 32.0) / 1.8
4aee3dd0b54450fabf7a3a01d340b45a89caeaa3
707,454
def username_in_path(username, path_): """Checks if a username is contained in URL""" if username in path_: return True return False
131a8fa102fd0a0f036da81030b005f92ea9aab0
707,463
def str_parse_as_utf8(content) -> str: """Returns the provided content decoded as utf-8.""" return content.decode('utf-8')
75b8d5f1f8867c50b08146cc3edc1d0ab630280a
707,464
def remove_start(s: str) -> str: """ Clear string from start '-' symbol :param s: :return: """ return s[1:] if s.startswith('-') else s
03504a3094798f6582bcae40233f7215e8d4d780
707,466
def get_genome_dir(infra_id, genver=None, annver=None, key=None): """Return the genome directory name from infra_id and optional arguments.""" dirname = f"{infra_id}" if genver is not None: dirname += f".gnm{genver}" if annver is not None: dirname += f".ann{annver}" if key is not None: dirname += f".{key}" return dirname
ab033772575ae30ae346f96aed840c48fb01c556
707,475
def uniq(string): """Removes duplicate words from a string (only the second duplicates). The sequence of the words will not be changed. """ words = string.split() return ' '.join(sorted(set(words), key=words.index))
2e5b6c51bc90f3a2bd7a4c3e845f7ae330390a76
707,478
def generateListPermutations(elements, level=0): """Generate all possible permutations of the list 'elements'.""" #print(" " * level, "gP(", elements, ")") if len(elements) == 0: return [[]] permutations = [] for e in elements: reduced = elements[:] reduced.remove(e) reducedPermutations = generateListPermutations(reduced, level + 1) #print(" "*level, "reduced", reducedPermutations) for p in reducedPermutations: p.insert(0, e) permutations.append(p) return permutations
1894b6726bedaaf634e8c7ac56fc1abd9e204eef
707,481
def clean_up_tokenization_spaces(out_string): """Converts an output string (de-BPE-ed) using de-tokenization algorithm from OpenAI GPT.""" out_string = out_string.replace('<unk>', '') out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ',' ).replace(" ' ", "'").replace(" n't", "n't").replace(" 'm", "'m").replace(" do not", " don't" ).replace(" 's", "'s").replace(" 've", "'ve").replace(" 're", "'re") return out_string
0bd51ca7dbaa36569c0d2f18d510f1c6a92e1822
707,484
def pad_sents(sents, pad_token): """ Pad list of sentences(SMILES) according to the longest sentence in the batch. @param sents (list[list[str]]): list of SMILES, where each sentence is represented as a list of tokens @param pad_token (str): padding token @returns sents_padded (list[list[str]]): list of SMILES where SMILES shorter than the max length SMILES are padded out with the pad_token, such that each SMILES in the batch now has equal length. """ sents_padded = [] max_length = max([len(sentence) for sentence in sents]) sents_padded = [sentence+(max_length-len(sentence))*[pad_token] for sentence in sents] return sents_padded
8f0eabfaaa18eafa84366a2f20ed2ddd633dacc6
707,488