content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def columns_not_to_edit(): """ Defines column names that shouldn't be edited. """ ## Occasionally unchanging things like NIGHT or TILEID have been missing in the headers, so we won't restrict ## that even though it typically shouldn't be edited if the data is there return ['EXPID', 'CAMWORD', 'OBSTYPE']
430430c121d784727808b8e7c98d96bd846dc65f
704,443
def substitute_variables(model_params_variables, model_data_raw): """ :param model_params_variables: :param model_data_raw: :return: """ model_data_list = [] for argument_raw in model_data_raw: argument_split_raw = argument_raw.split(",") argument_split = [] for parameter_raw in filter(lambda x: x != "", argument_split_raw): parameter = parameter_raw.strip() if parameter in model_params_variables: argument_split.append(model_params_variables[parameter]) else: if "inst" in parameter: last_time_str = model_data_list[-1][1] last_time = float(last_time_str) new_time = last_time + 1 new_time_str = str(new_time) argument_split.append(new_time_str) elif parameter.startswith("-e") and "_" in parameter: flag = parameter.split("_")[0] argument_split.append(flag) else: argument_split.append(parameter) model_data_list.append(argument_split) # generate clean model data model_data = [] for argument in model_data_list: model_data.append(",".join(argument)) return model_data
bb34bc44f9f4c633d5396fde31bf8ece5cd163c6
704,444
import tempfile import subprocess def get_html_docker(url: str) -> str: """Returns the rendered HTML at *url* as a string""" cmd = ['docker', 'container', 'run', '--rm', 'zenika/alpine-chrome', '--no-sandbox', '--dump-dom', str(url) ] with tempfile.NamedTemporaryFile(suffix='.html') as fp: p = subprocess.run(cmd, stdout=fp, stderr=subprocess.STDOUT, ) if p.returncode != 0: raise OSError(f"Command failed [{p.returncode}]:\n{' '.join(cmd)}") with open(fp.name, 'rb') as fout: html_doc = fout.read().decode('utf8') # Clean up the cmd's previous print statements # html_doc = html_doc[html_doc.find('<html>'):].strip() if not html_doc: raise OSError(f"No HTML could be obtained for {url}") return html_doc
2980e35337f572daca7a16f2694620ca5c02aa90
704,445
def domain_domain_pair_association(domain_type_dict, opposite_type_dict={'T': 'AT', 'AT': 'T'}): """ Compute domain domain association. domain_type_dict is a {domain_name:{T:[gene_ids], AT:[gene_ids]} ... } """ domain_domain_dict = {} for domain, type2genes in domain_type_dict.items(): domain_dict = domain_domain_dict.setdefault(domain, {}) for domain_next, type2genes_next in domain_type_dict.items(): if domain_next in domain_dict: continue domain_dict_next = domain_domain_dict.setdefault(domain_next, {}) pairs = [] for type, opposite_type in opposite_type_dict.items(): genes = type2genes.setdefault(type, []) genes_next = type2genes_next.setdefault(opposite_type, []) pairs += list(set(genes_next) & set(genes)) if len(pairs) > 0: domain_dict[domain_next] = pairs domain_dict_next[domain] = pairs return domain_domain_dict
1dea69154132af8e39b4119a307e38bde8269160
704,447
import asyncio def mock_coro(return_value=None, exception=None): """Return a coro that returns a value or raise an exception.""" fut = asyncio.Future() if exception is not None: fut.set_exception(exception) else: fut.set_result(return_value) return fut
d06d037bab143e288534e3e7e98da259f7c1cefc
704,448
import requests import json def request_records(request_params): """ Download utility rate records from USURDB given a set of request parameters. :param request_params: dictionary with request parameter names as keys and the parameter values :return: """ records = requests.get( "https://api.openei.org/utility_rates?", params=request_params ) request_content = records.content # strict=False prevents an error (control characters are allowed inside # strings) json_records = json.loads(request_content, strict=False) return json_records
7323657186cc87a291e47c3a71cd2e81b4ec8a73
704,449
def _handle_sort_key(model_name, sort_key=None): """Generate sort keys according to the passed in sort key from user. :param model_name: Database model name be query.(alarm, meter, etc.) :param sort_key: sort key passed from user. return: sort keys list """ sort_keys_extra = {'alarm': ['name', 'user_id', 'project_id'], 'meter': ['user_id', 'project_id'], 'resource': ['user_id', 'project_id', 'timestamp'], } sort_keys = sort_keys_extra[model_name] if not sort_key: return sort_keys # NOTE(Fengqian): We need to put the sort key from user # in the first place of sort keys list. try: sort_keys.remove(sort_key) except ValueError: pass finally: sort_keys.insert(0, sort_key) return sort_keys
aef2d996d9d18593ec129c4a37bf8150b3e9c0fe
704,450
def calc_glass_constants(nd, nF, nC, *partials): """Given central, blue and red refractive indices, calculate Vd and PFd. Args: nd, nF, nC: refractive indices at central, short and long wavelengths partials (tuple): if present, 2 ref indxs, n4 and n5, wl4 < wl5 Returns: V-number and relative partial dispersion from F to d If `partials` is present, the return values include the central wavelength index and the relative partial dispersion between the 2 refractive indices provided from `partials`. """ dFC = nF-nC vd = (nd - 1.0)/dFC PFd = (nF-nd)/dFC if len(partials) == 2: n4, n5 = partials P45 = (n4-n5)/dFC return nd, vd, PFd, P45 return vd, PFd
f347b6caf167c19451bb2f03e88b5846c6873250
704,451
import subprocess import click def git_status_check(cwd): """check whether there are uncommited changes in current dir Parameters ---------- cwd : str current working directory to check git status Returns ------- bool indicating whether there are uncommited changes """ pipe = subprocess.Popen(["git status --porcelain"], stdout=subprocess.PIPE, shell=True, cwd=cwd) stdout, stderr = pipe.communicate() stdout = stdout.decode() if stdout != "": click.echo("Uncommited changes exist on branch") return True else: return False
11960967a2e0461ee21861a8aaa856233b0275d9
704,452
import os from pathlib import Path from datetime import datetime def update_ssh_config(sshurl, user, dryrun=False): """ Add a new entry to the SSH config file (``~/.ssh/config``). It sets the default user login to the SSH special remote. Parameters ----------- sshurl : str SSH URL of the git-annex special remote in the form `ssh://server.example.org` user : str User login for authentication to the git-annex special remote dryrun : bool If `True`, only generates the commands and do not execute them (Default: `False`) """ # Return cmd to None is no operation is performed cmd = None # Remove "ssh://" prefix in SSH URL sshurl = sshurl.replace('ssh://', '') # Path to ssh config file ssh_config_path = os.path.join( str(Path.home()), '.ssh', 'config' ) print(f'\t* Add new entry in {ssh_config_path}') # Save the current content of an existing ssh config file content = None if os.path.exists(ssh_config_path): with open(ssh_config_path, 'r+') as ssh_config: content = ssh_config.read() # Add the entry if it does not exist in the existing ssh config file with open(ssh_config_path, 'w+') as ssh_config: if (content and (f'Host {sshurl}' not in content))\ or content is None: hdr = [ '## Added by NeuroDataPub ', f'({datetime.strftime(datetime.now(), "%d. %B %Y %I:%M%p")}) ##\n', ] lines = [ f'Host {sshurl} \n', f'\tHostName {sshurl} \n', f'\tUser {user} \n\n' ] try: if not dryrun: ssh_config.writelines(hdr + lines) print(f'\t - Entry:\n\n{"".join(lines)}') cmd = f"""cat << EOF >> {ssh_config_path} {hdr} Host {sshurl} HostName {sshurl} User {user} EOF """ except Exception as e: print(f'\t - ERROR:\n\n{e}') else: print(f'\t - INFO: Entry for `Host {sshurl}` already existing!\n\n') # Append the previous content of the existing ssh config file if content and not dryrun: with open(ssh_config_path, 'a') as ssh_config: ssh_config.write(content) return cmd
50feb2753eb5095090be7b440bb60a7a0478204b
704,454
def in_range(x, a1, a2): """Check if (modulo 360) x is in the range a1...a2. a1 must be < a2.""" a1 %= 360. a2 %= 360. if a1 <= a2: # "normal" range (not including 0) return a1 <= x <= a2 # "jumping" range (around 0) return a1 <= x or x <= a2
8855ea29e44c546d55122c7c6e4878b44a3bc272
704,455
def trip_direction(trip_original_stops, direction_stops): """ Guess the trip direction_id based on trip_original_stops, and a direction_stops which should be a dictionary with 2 keys: "0" and "1" - corresponding values should be sets of stops encountered in given dir """ # Stops for each direction have to be unique dir_stops_0 = direction_stops["0"].difference(direction_stops["1"]) dir_stops_1 = direction_stops["1"].difference(direction_stops["0"]) # Trip stops in direction 0 and direction 1 trip_stops_0 = trip_original_stops.intersection(dir_stops_0) trip_stops_1 = trip_original_stops.intersection(dir_stops_1) # Amount of stops of trip in each direction trip_stops_0_len = len(trip_stops_0) trip_stops_1_len = len(trip_stops_1) # More or equal stops belonging to dir_0 then dir_1 => "0" if trip_stops_0_len >= trip_stops_1_len: return "0" # More stops belonging to dir_1 elif trip_stops_0_len < trip_stops_1_len: return "1" # How did we get here else: raise RuntimeError(f"{trip_stops_0_len} is not bigger, equal or less then {trip_stops_1_len}")
a418c90775039b1d52b09cb2057d71f97361e0d9
704,456
def add_common_arguments(parser): """Populate the given argparse.ArgumentParser with arguments. This function can be used to make the definition these argparse arguments reusable in other modules and avoid the duplication of these definitions among the executable scripts. The following arguments are added to the parser: - **...** (...): ... Parameters ---------- parser : argparse.ArgumentParser The parser to populate. Returns ------- argparse.ArgumentParser Return the populated ArgumentParser object. """ return parser
c8e3eba16c33f0fcf12caf3a31b281dcee858648
704,457
import os def is_valid_path_and_ext(fname, wanted_ext=None): """ Validates the path exists and the extension is one wanted. Parameters ---------- fname : str Input file name. wanted_ext : List of str, optional Extensions to check Return ------ bool """ if not os.path.exists(fname): print(f"Error: No file '{fname}' exists.") return False # Simply validates the path existence if wanted_ext is None: return True # Make sure the extension is one desired. base, ext = os.path.splitext(fname) if ext not in wanted_ext: return False return True
fea067c87a2f867703c234c2fdab418c7e0ab862
704,458
def attr_names(obj): """ Determine the names of user-defined attributes of the given SimpleNamespace object. Source: https://stackoverflow.com/a/27532110 :return: A list of strings. """ return sorted(obj.__dict__)
ecbc0321d0796925341731df303c48ea911fcf57
704,459
import random def weighted_choice(choices): """ Pick a weighted value off :param list choices: Each item is a tuple of choice and weight :return: """ total = sum(weight for choice, weight in choices) selection = random.uniform(0, total) counter = 0 for choice, weight in choices: if counter + weight > selection: return choice counter += weight assert False, "Shouldn't get here"
c32ff27b9892bb88db2928ec22c4ede644f6792c
704,461
import argparse def parse_args(args): """ Parse the arguments. """ parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.') parser.add_argument('--data-path', help='Data for prediction', type=str, required=True) parser.add_argument('--target-path', help='Target path', type=str, default='result.json') parser.add_argument('--split', help='Target path', type=str, default='val') parser.add_argument('--max-detections', help='Max detection', default=10) parser.add_argument('--ninedash-category-id', help='Ninedash category ID', default=1) parser.add_argument('--model-path', help='Model path of the network', type=str, required=True) parser.add_argument('--score-threshold', help='Minimum score threshold', type=float, default=0.3) parser.add_argument('--phi', help='Hyper parameter phi', default=0, type=int, choices=(0, 1, 2, 3, 4, 5, 6)) parser.add_argument('--weighted-bifpn', help='Use weighted BiFPN', action='store_true') parser.add_argument('--batch-size', help='Size of the batches.', default=1, type=int) parser.add_argument('--num-classes', help='Number of classes', default=1, type=int) parser.add_argument('--gpu', help='Id of the GPU to use (as reported by nvidia-smi).') print(vars(parser.parse_args(args))) return parser.parse_args(args)
1071d2fdeb2eec7a7b149b295d504e4796dd3aa7
704,462
from typing import List from typing import Dict def aggregate_collate_fn(insts: List) -> Dict[str, List[str]]: """aggragate the instance to the max seq length in batch. Args: insts: list of sample Returns: """ snts, golds = [], [] for inst in insts: snts.append(inst['snt']) golds.append(inst['gold']) assert len(snts) == len(golds) return {'snts': snts, 'golds': golds}
8d986d508fd2e5a5c91947563aec2b862ab13361
704,463
def get_layout(data, width_limit): """A row of a chart can be dissected as four components below: 1. Label region ('label1'): fixed length (set to max label length + 1) 2. Intermediate region (' | '): 3 characters 3. Bar region ('▇ or '): variable length This function first calculates the width of label region(1), and compute the longest of the lengths of bar(3) regions. Then returns the layout of the chart, which is described by the widths of each regions. The total widths of the chart will not exceed width_limit-15 characters, just for an aesthetic reason. """ labels = [d[0] for d in data] label_width = len(max(labels, key=lambda label: len(label))) + 1 intermediate_width = 3 bar_width = (width_limit - 15) - (label_width + intermediate_width) return label_width, bar_width
dbb8bfa2c537f3b05713bf3abdc106ec74bc7ac9
704,466
def get_number_of_classes(model_config): """Returns the number of classes for a detection model. Args: model_config: A model_pb2.DetectionModel. Returns: Number of classes. Raises: ValueError: If the model type is not recognized. """ meta_architecture = model_config.WhichOneof("model") meta_architecture_config = getattr(model_config, meta_architecture) if hasattr(meta_architecture_config, "num_classes"): return meta_architecture_config.num_classes else: raise ValueError("{} does not have num_classes.".format(meta_architecture))
d87605b6025e1bc78c7436affe740f7591a99f68
704,467
def shorten_build_target(build_target: str) -> str: """Returns a shortened version of the build target.""" if build_target == '//chrome/android:chrome_java': return 'chrome_java' return build_target.replace('//chrome/browser/', '//c/b/')
03af53f1fcacae9a4e0309053075806d65275ce9
704,468
def apply_shift(x, shift, out): """ Translates elements of `x` along axis=0 by `shift`, using linear interpolation for non-integer shifts. Parameters ---------- x : ndarray Array with ndim >= 1, holding data. shift : float Shift magnitude. out : ndarray Array with the same shape as x. Returns ------- out : ndarray """ T = len(out) if shift > 0: d = int(shift // 1) r = shift % 1 for t in range(T): j = t - d if j <= 0: out[t] = x[0] else: out[t] = x[j] * (1 - r) + x[j - 1] * r elif shift < 0: d = int((-shift) // 1) r = (-shift) % 1 for t in range(T): j = t - d if j <= 0: out[-t-1] = x[-1] else: out[-t-1] = x[-j-1] * (1 - r) + x[-j] * r else: out[:] = x return out
86e58c536cbc2fb43bb049aab6d0d4d733308bbd
704,469
from typing import Tuple import random def draw_two(max_n: int) -> Tuple[int, int]: """Draw two different ints given max (mod max).""" i = random.randint(0, max_n) j = (i + random.randint(1, max_n - 1)) % max_n return i, j
9ebb09158c296998c39a2c4e8fc7a18456428fc6
704,471
def versionPropertiesDictionary(sql_row_list): """ versionPropertiesDictionary(sql_row_list) transforms a row gotten via SQL request (list), to a dictionary """ properties_dictionary = \ { "id": sql_row_list[0], "model_id": sql_row_list[1], "version": sql_row_list[2], "metadata": sql_row_list[3], "commit_comment": sql_row_list[4], "created_timestamp": sql_row_list[5] } return properties_dictionary;
ab8cdd166bf8a187945c44fd416c3a4cf4634d02
704,472
import os def moduleName(file): """Extract a module name from the python source file name, with appended ':'.""" return os.path.splitext(os.path.split(file)[1])[0] + ":"
4f5035e80ddd3df7a8a93585bebf25e2e3300b49
704,474
def ms2str(v): """ Convert a time in milliseconds to a time string. Arguments: v: a time in milliseconds. Returns: A string in the format HH:MM:SS,mmm. """ v, ms = divmod(v, 1000) v, s = divmod(v, 60) h, m = divmod(v, 60) return f"{h:02d}:{m:02d}:{s:02d},{ms:03d}"
5d50aa072584e5ad17d8bd3d08b0b0813aced819
704,475
import operator def regroup(X, N): """ Regroup the rows and columns in X. Rows/Columns that are N apart in X are adjacent in Y. Parameters: X (np.ndarray): Image to be regrouped N (list): Size of 1D DCT performed (could give int) Returns: Y (np.ndarray): Regoruped image """ # if N is a 2-element list, N[0] is used for columns and N[1] for rows. # if a single value is given, a square matrix is assumed try: N_m = N_n = operator.index(N) except TypeError: N_m, N_n = N m, n = X.shape if m % N_m != 0 or n % N_n != 0: raise ValueError('regroup error: X dimensions not multiples of N') X = X.reshape(m // N_m, N_m, n // N_n, N_n) # subdivide the axes X = X.transpose((1, 0, 3, 2)) # permute them return X.reshape(m, n) # and recombine
d4492e71a42a69d86d0e2a1c21bf05d13dfe13d7
704,476
def uint_to_two_compl(value: int, length: int) -> int: """Convert int to two complement integer with binary operations.""" if value >> (length - 1) & 1 == 0: # check sign bit return value & (2 ** length - 1) else: return value | (~0 << length)
a0b7bd5192a3f12119ea7ec1a58ca785c37369bf
704,477
def is_index(file_name: str) -> bool: """Determines if a filename is a proper index name.""" return file_name == "index"
7beb5779b61e25b4467eb7964478c78d44f28931
704,480
import hashlib def hash_short(message, length=16): """ Given Hash Function""" return hashlib.sha1(message).hexdigest()[:length / 4]
bd071674ce5caf382dc73d27835f43409a6a49d2
704,481
def get_substrings(source: str): """Get all substrings of a given string Args: string (str): the string to generate the substring from Returns: list: list of substrings """ # the number of substrings per length is the same as the length of the substring # if the characters are not equal. For each duplicate char decrease number of substrings by 1 substrings = [] # for i in range(len(source)): s = "" for j in range(len(source)): substrings.append(source[j]) s += source[j] substrings.append(s) return substrings
79f1db4184c51235a9d6beb8437f1647bc993958
704,482
def search_tag(resource_info, tag_key): """Search tag in tag list by given tag key.""" return next( (tag["Value"] for tag in resource_info.get("Tags", []) if tag["Key"] == tag_key), None, )
5945631a3de7032c62c493369e82dd330ef2bc47
704,483
def get_spending_features(txn, windows_size=[1, 7, 30]): """ This function computes: - the cumulative number of transactions for a customer for 1, 7 and 30 days - the cumulative average transaction amount for a customer for 1, 7 and 30 days Args: txn: grouped transactions of customer Returns: nb_trans and cust_avg_amt for each window size """ # Setting trans_date as index for rolling function txn.index = txn.trans_date for size in windows_size: # compute the total transaction amount and the number of transactions during the window rolling_tx_amt = txn["amt"].rolling(f"{size}D").sum() roll_tx_cnt = txn["amt"].rolling(f"{size}D").count() # compute the average transaction amount avg_trans_amt = rolling_tx_amt / roll_tx_cnt # create as new columns txn[f"nb_txns_{size}_days"] = list(roll_tx_cnt) txn[f"avg_txns_amt_{size}_days"] = list(avg_trans_amt) # Reindex according to transaction IDs txn.index = txn.trans_num # And return the dataframe with the new features return txn
b648df3d1217074edec455a416e0eb698d8069ee
704,484
import six def _expand_expected_codes(codes): """Expand the expected code string in set of codes. 200-204 -> 200, 201, 202, 204 200, 203 -> 200, 203 """ retval = set() for code in codes.replace(',', ' ').split(' '): code = code.strip() if not code: continue elif '-' in code: low, hi = code.split('-')[:2] retval.update( str(i) for i in six.moves.xrange(int(low), int(hi) + 1)) else: retval.add(code) return retval
52056db88bf14352d4cda2411f25855457defbd7
704,485
def _get_minimum_columns( nrows, col_limits, families, family_counts, random_state ): """If ``col_limits`` has a tuple lower limit then sample columns of the corresponding element of ``families`` as needed to satisfy this bound.""" columns, metadata = [], [] for family, min_limit in zip(families, col_limits[0]): for _ in range(min_limit): meta = family.make_instance(random_state) columns.append(meta.sample(nrows, random_state)) metadata.append(meta) family_counts[family.name] += 1 return columns, metadata, family_counts
56a0abc58a6e6c3356be08511e7a30da3c1c52e3
704,487
def _paths_from_ls(recs): """The xenstore-ls command returns a listing that isn't terribly useful. This method cleans that up into a dict with each path as the key, and the associated string as the value. """ ret = {} last_nm = "" level = 0 path = [] ret = [] for ln in recs.splitlines(): nm, val = ln.rstrip().split(" = ") barename = nm.lstrip() this_level = len(nm) - len(barename) if this_level == 0: ret.append(barename) level = 0 path = [] elif this_level == level: # child of same parent ret.append("%s/%s" % ("/".join(path), barename)) elif this_level > level: path.append(last_nm) ret.append("%s/%s" % ("/".join(path), barename)) level = this_level elif this_level < level: path = path[:this_level] ret.append("%s/%s" % ("/".join(path), barename)) level = this_level last_nm = barename return ret
afa0fbe3e5c1773074569363a538587664a00a2f
704,488
import torch def make_complex_matrix(x, y): """A function that takes two tensors (a REAL (x) and IMAGINARY part (y)) and returns the combine complex tensor. :param x: The real part of your matrix. :type x: torch.doubleTensor :param y: The imaginary part of your matrix. :type y: torch.doubleTensor :raises ValueError: This function will not execute if x and y do not have the same dimension. :returns: The full vector with the real and imaginary parts seperated as previously mentioned. :rtype: torch.doubleTensor """ if x.size()[0] != y.size()[0] or x.size()[1] != y.size()[1]: raise ValueError( 'Real and imaginary parts do not have the same dimension.') z = torch.zeros(2, x.size()[0], x.size()[1], dtype=torch.double) z[0] = x z[1] = y return z
faae031b3aa6f4972c8f558f6b66e33d416dec71
704,489
def scale3(v, s): """ scale3 """ return (v[0] * s, v[1] * s, v[2] * s)
4993c072fb66a33116177023dde7b1ed2c8705fd
704,490
from typing import SupportsAbs import math def is_unit(v: SupportsAbs[float]) -> bool: # <2> """'True' if the magnitude of 'v' is close to 1.""" return math.isclose(abs(v), 1.0)
0b31da2e5a3bb6ce49705d5b2a36d3270cc5d802
704,491
def atom_eq(at1,at2): """ Returns true lits are syntactically equal """ return at1 == at2
43aab77292c81134490eb8a1c79a68b38d50628d
704,492
import math def k2(Ti, exp=math.exp): """[cm^3 / s]""" return 2.78e-13 * exp(2.07/(Ti/300) - 0.61/(Ti/300)**2)
6c1f471b31767f2d95f3900a8811f47dc8c45086
704,493
import math import torch def magnitude_prune(masking, mask, weight, name): """Prunes the weights with smallest magnitude. The pruning functions in this sparse learning library work by constructing a binary mask variable "mask" which prevents gradient flow to weights and also sets the weights to zero where the binary mask is 0. Thus 1s in the "mask" variable indicate where the sparse network has active weights. In this function name and masking can be used to access global statistics about the specific layer (name) and the sparse network as a whole. Args: masking Masking class with state about current layers and the entire sparse network. mask The binary mask. 1s indicated active weights. weight The weight of the respective sparse layer. This is a torch parameter. name The name of the layer. This can be used to access layer-specific statistics in the masking class. Returns: mask Pruned Binary mask where 1s indicated active weights. Can be modified in-place or newly constructed Accessable global statistics: Layer statistics: Non-zero count of layer: masking.name2nonzeros[name] Zero count of layer: masking.name2zeros[name] Redistribution proportion: masking.name2variance[name] Number of items removed through pruning: masking.name2removed[name] Network statistics: Total number of nonzero parameter in the network: masking.total_nonzero = 0 Total number of zero-valued parameter in the network: masking.total_zero = 0 Total number of parameters removed in pruning: masking.total_removed = 0 """ num_remove = math.ceil(masking.prune_rate*masking.name2nonzeros[name]) num_zeros = masking.name2zeros[name] k = math.ceil(num_zeros + num_remove) if num_remove == 0.0: return weight.data != 0.0 x, idx = torch.sort(torch.abs(weight.data.view(-1))) mask.data.view(-1)[idx[:k]] = 0.0 return mask
4bac89da952338e133ac0d85735e80631862c7da
704,494
async def async_unload_entry(hass, config_entry): """Handle removal of an entry.""" return True
28005ececbf0c43c562cbaf7a2b8aceb12ce3e41
704,496
import json def is_valid_json(text: str) -> bool: """Is this text valid JSON? """ try: json.loads(text) return True except json.JSONDecodeError: return False
3013210bafd5c26cacb13e9d3f4b1b708185848b
704,497
from pathlib import Path def get_config_path(root: str, idiom: str) -> Path: """Get path to idiom config Arguments: root {str} -- root directory of idiom config idiom {str} -- basename of idiom config Returns: Tuple[Path, Path] -- pathlib.Path to file """ root_path = Path(root) file_name = '{}.json'.format(idiom) return root_path.joinpath(file_name)
86d65f11fbd1dfb8aca13a98e129b085158d2aff
704,498
def minor_min_width(G): """Computes a lower bound for the treewidth of graph G. Parameters ---------- G : NetworkX graph The graph on which to compute a lower bound on the treewidth. Returns ------- lb : int A lower bound on the treewidth. Examples -------- This example computes a lower bound for the treewidth of the :math:`K_7` complete graph. >>> K_7 = nx.complete_graph(7) >>> dnx.minor_min_width(K_7) 6 References ---------- Based on the algorithm presented in [GD]_ """ # we need only deal with the adjacency structure of G. We will also # be manipulating it directly so let's go ahead and make a new one adj = {v: set(u for u in G[v] if u != v) for v in G} lb = 0 # lower bound on treewidth while len(adj) > 1: # get the node with the smallest degree v = min(adj, key=lambda v: len(adj[v])) # find the vertex u such that the degree of u is minimal in the neighborhood of v neighbors = adj[v] if not neighbors: # if v is a singleton, then we can just delete it del adj[v] continue def neighborhood_degree(u): Gu = adj[u] return sum(w in Gu for w in neighbors) u = min(neighbors, key=neighborhood_degree) # update the lower bound new_lb = len(adj[v]) if new_lb > lb: lb = new_lb # contract the edge between u, v adj[v] = adj[v].union(n for n in adj[u] if n != v) for n in adj[v]: adj[n].add(v) for n in adj[u]: adj[n].discard(u) del adj[u] return lb
649ea7fe0a55ec5289b04b761ea1633c2a258000
704,499
def normalize_email(email): """Normalizes the given email address. In the current implementation it is converted to lower case. If the given email is None, an empty string is returned. """ email = email or '' return email.lower()
6ee68f9125eef522498c7299a6e793ba11602ced
704,500
def parse_read_options(form, prefix=''): """Extract read options from form data. Arguments: form (obj): Form object Keyword Arguments: prefix (str): prefix for the form fields (default: {''}) Returns: (dict): Read options key - value dictionary. """ read_options = { 'encoding': getattr(form, prefix+'encoding').data, 'delimiter': getattr(form, prefix+'delimiter').data, } geom = getattr(form, prefix+'geom') lat = getattr(form, prefix+'lat') lon = getattr(form, prefix+'lon') if geom.data != '': read_options['geom'] = geom.data elif lat.data != '' and lon.data != '': read_options['lat'] = lat.data read_options['lon'] = lon.data return read_options
660e836172015999fe74610dffc331d2b37991c3
704,501
import argparse def create_arguement_parser(): """return a arguement parser used in shell""" parser = argparse.ArgumentParser( prog="python run.py", description="A program named PictureToAscii that can make 'Picture To Ascii'", epilog="Written by jskyzero 2016/12/03", formatter_class=argparse.ArgumentDefaultsHelpFormatter, prefix_chars='-') parser.add_argument("pictures_path", metavar="picture_path", nargs='+', help="the picture(s) file path") parser.add_argument("-o", metavar='output dir', dest="output_dir", help="the ascii file output dir", default="./") parser.add_argument("-e", metavar='extension name', dest="ext_name", nargs="?", help="empty value means no extension name", default="txt") parser.add_argument("-s", metavar='size', dest="size", nargs=2, type=int, help="width and height of all ascii file(s)") return parser
ce53083d1eb823063b36341667bee0666e832082
704,502
import torch def normalize_gradient(netC, x): """ f f_hat = -------------------- || grad_f || + | f | x: real_data_v f: C_real before mean """ x.requires_grad_(True) f = netC(x) grad = torch.autograd.grad( f, [x], torch.ones_like(f), create_graph=True, retain_graph=True)[0] grad_norm = torch.norm(torch.flatten(grad, start_dim=1), p=2, dim=1) grad_norm = grad_norm.view(-1, *[1 for _ in range(len(f.shape) - 1)]) f_hat = (f / (grad_norm + torch.abs(f))) return f_hat
ff1b8b239cb86e62c801496b51d95afe6f6046d4
704,503
import os def get_path_with_arch(platform, path): """ Distribute packages into folders according to the platform. """ # Change the platform name into correct formats platform = platform.replace('_', '-') platform = platform.replace('x86-64', 'x86_64') platform = platform.replace('manylinux1', 'linux') path = os.path.join(path, platform) return path
c627d01837b7e2c70394e1ec322e03179e859251
704,504
def join_2_steps(boundaries, arguments): """ Joins the tags for argument boundaries and classification accordingly. """ answer = [] for pred_boundaries, pred_arguments in zip(boundaries, arguments): cur_arg = '' pred_answer = [] for boundary_tag in pred_boundaries: if boundary_tag == 'O': pred_answer.append('O') elif boundary_tag in 'BS': cur_arg = pred_arguments.pop(0) tag = '%s-%s' % (boundary_tag, cur_arg) pred_answer.append(tag) else: tag = '%s-%s' % (boundary_tag, cur_arg) pred_answer.append(tag) answer.append(pred_answer) return answer
9801ca876723d092f89a68bd45a138dba406468d
704,505
import operator import math def unit_vector(vec1, vec2): """ Return a unit vector pointing from vec1 to vec2 """ diff_vector = map(operator.sub, vec2, vec1) scale_factor = math.sqrt( sum( map( lambda x: x**2, diff_vector ) ) ) if scale_factor == 0: scale_factor = 1 # We don't have an actual vector, it has zero length return map(lambda x: x/scale_factor, diff_vector)
79e2cff8970c97d6e5db5259801c58f82075b1a2
704,506
def my_map(f, lst): """this does something to every object in a list""" if(lst == []): return [] return [f(lst[0])] + my_map(f, lst[1:])
20016cd580763289a45a2df704552ee5b5b4f25e
704,507
import struct import ipaddress def read_ipv6(d): """Read an IPv6 address from the given file descriptor.""" u, l = struct.unpack('>QQ', d) return ipaddress.IPv6Address((u << 64) + l)
c2006e6dde0de54b80b7710980a6b0cb175d3e19
704,508
def generate_level08(): """Generate the bricks.""" bricks = bytearray(8 * 5 * 3) colors = [2, 0, 1, 3, 4] index = 0 col_x = 0 for x in range(6, 111, 26): for y in range(27, 77, 7): bricks[index] = x bricks[index + 1] = y bricks[index + 2] = colors[col_x] index += 3 col_x += 1 return bricks
c1535d8efb285748693f0a457eb6fe7c91ce55d4
704,509
import os def user_prompt( question_str, response_set=None, ok_response_str="y", cancel_response_str="f" ): """``input()`` function that accesses the stdin and stdout file descriptors directly. For prompting for user input under ``pytest`` ``--capture=sys`` and ``--capture=no``. Does not work with ``--capture=fd``. """ valid_response_set = ( (response_set or set()) | set(ok_response_str) | set(cancel_response_str) ) def fd_input(): while True: with os.fdopen(os.dup(1), "w") as stdout: stdout.write("\n{}: ".format(question_str)) with os.fdopen(os.dup(2), "r") as stdin: response_str = stdin.readline().lower().strip() if response_str in valid_response_set: return response_str if response_str == "": return ok_response_str try: return fd_input() except KeyboardInterrupt: return cancel_response_str
086a56fd16b89cb33eff8f8e91bb5b284ae6d8c4
704,510
def int2bin(n, count=16): """ this method converts integer numbers to binary numbers @param n: the number to be converted @param count: the number of binary digits """ return "".join([str((n >> y) & 1) for y in range(count-1, -1, -1)])
70ce01844c8e32eb24750c4420812feda73a89dd
704,511
def intstr(num, numplaces=4): """A simple function to map an input number into a string padded with zeros (default 4). Syntax is: out = intstr(6, numplaces=4) --> 0006 2008-05-27 17:12 IJC: Created""" formatstr = "%(#)0"+str(numplaces)+"d" return formatstr % {"#":int(num)}
8637a1f6146d1ff8b399ae920cfbfaab83572f86
704,512
def rename_duplicate_name(dfs, name): """Remove duplicates of *name* from the columns in each of *dfs*. Args: dfs (list of pandas DataFrames) Returns: list of pandas DataFrames. Columns renamed such that there are no duplicates of *name*. """ locations = [] for i, df in enumerate(dfs): for j, col in enumerate(df.columns): if col == name: locations.append((i, j)) if len(locations) > 1: current_count = 1 for k, (i, j) in enumerate(locations): cols = list(dfs[i].columns) cols[j] = name + f":{k + 1:.0f}" dfs[i].columns = cols return dfs
c816804a0ea9f42d473f99ddca470f4e527336f9
704,513
def checkH(board, intX, intY, newX, newY): """Check if the horse move is legal, returns true if legal""" tmp=False if abs(intX-newX)+abs(intY-newY)==3: if intX!=newX and intY!=newY: tmp=True return tmp
f1ce66457a54dea4c587bebf9bd2dd0b56577dc4
704,514
import re def get_info(prefix, string): """ :param prefix: the regex to match the info you are trying to obtain :param string: the string where the info is contained (can have new line character) :return: the matches within the line """ info = None # find and return the matches based on the prefix and if there is a match (not empty) matches = re.findall(prefix, string) if len(matches) > 0: info = matches[0] return info
ed41100910df8ec3e0060ecd1196fb8cc1060329
704,516
from datetime import datetime def convert_moz_time( moz_time_entry ): """ Convert Mozilla timestamp-alike data entries to an ISO 8601-ish representation """ # [ https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSPR/Reference/PRTime ] ## result = datetime.fromtimestamp( moz_time_entry/1000000 ).strftime('%Y-%m-%d %H:%M:%S') result = datetime.fromtimestamp( moz_time_entry/1000000 ) return result
ba15a7ed86d9b608799384e9663d36c1cff36fae
704,519
def is_nonnegative_length(G, l): """ Checks whether a length function, defined on the arcs, satisfies the non-negative condition. Args: G: An instance of Graph class. l: A dictionary that defines a length function on the edge set. Returns: A boolean, True if the length function satisfies the non-negativity condition, False in other case. """ assert G.directed # Condición de no negatividad for e in G.aristas: if l[e] < 0: return False return True
c99aaf07b65f9a192b6421b4b3ccf73c98917500
704,521
import os def get_verified_absolute_path(path): """Verify and return absolute path of argument. Args: path : Relative/absolute path Returns: Absolute path """ installed_path = os.path.abspath(path) if not os.path.exists(installed_path): raise RuntimeError("No valid path for requested component exists") return installed_path
2d7c6dcb6066c81b3506837534a72aa814e1faa6
704,522
def unflatten_tensor(input, feat_size, anchors): """ Un-flattens and un-permutes a tensor from size [B x (W x H) x C] --> [B x C x W x H] """ bsize = input.shape[0] if len(input.shape) >= 3: csize = input.shape[2] else: csize = 1 input = input.view(bsize, feat_size[0] * anchors.shape[0], feat_size[1], csize) input = input.permute(0, 3, 1, 2).contiguous() return input
9e7b603071312ea35fa214b3e5a6f586d652c760
704,523
import curses import math def get_colour_depth(): """ Returns the maximum number of possible values per color channel, that can be used with the availible number of colours and colour pairs in the terminal. """ nr_colours = curses.COLORS return int(math.pow(nr_colours, 1. / 3.))
3bd47ee65a7db72d87ac7cc965a43e37724e148a
704,524
import six import traceback def failure_format_traceback(fail): """ :param fail: must be an IFailedFuture returns a string """ try: f = six.StringIO() traceback.print_exception( fail._type, fail.value, fail._traceback, file=f, ) return f.getvalue() except Exception: return u"Failed to format failure traceback for '{0}'".format(fail)
fdcbdf9f7617f401d511c9ce9b58420367419250
704,525
def bytes_to_int(b: bytes) -> int: """ Convert bytes to a big-endian unsigned int. :param b: The bytes to be converted. :return: The int. """ return int.from_bytes(bytes=b, byteorder='big', signed=False)
eb08ae0b2663047557b8f102c6c6ed565aae8044
704,526
import numpy def cos_distance_numpy_vector(v1, v2): """get cos angle (similarity) between two vectors""" d1 = numpy.sum(v1 * v1) d1 = numpy.sqrt(d1) # magnitude of v1 d2 = numpy.sum(v2 * v2) d2 = numpy.sqrt(d2) # magnitude of v2 n1 = v1 / d1 n2 = v2 / d2 return numpy.sum(n1 * n2)
fdbc02c5cba377c561843dd57e9ca13a2e9c6960
704,528
def cell_to_se2_batch(cell_idx, mapmin, mapres): """ Coversion for Batch input : cell_idx = [batch_size, 2] OUTPUT: [batch_size, 2] """ return (cell_idx[:,0] + 0.5) * mapres[0] + mapmin[0], (cell_idx[:,1] + 0.5) * mapres[1] + mapmin[1]
72a91b5b3a90014322ad8754848e5f85751d3b3a
704,529
def pyav_decode_stream( container, start_pts, end_pts, stream, stream_name, buffer_size=0 ): """ Decode the video with PyAV decoder. Args: container (container): PyAV container. start_pts (int): the starting Presentation TimeStamp to fetch the video frames. end_pts (int): the ending Presentation TimeStamp of the decoded frames. stream (stream): PyAV stream. stream_name (dict): a dictionary of streams. For example, {"video": 0} means video stream at stream index 0. buffer_size (int): number of additional frames to decode beyond end_pts. Returns: result (list): list of frames decoded. max_pts (int): max Presentation TimeStamp of the video sequence. """ # Seeking in the stream is imprecise. Thus, seek to an ealier PTS by a # margin pts. margin = 1024 seek_offset = max(start_pts - margin, 0) container.seek(seek_offset, any_frame=False, backward=True, stream=stream) frames = {} buffer_count = 0 max_pts = 0 for frame in container.decode(**stream_name): max_pts = max(max_pts, frame.pts) if frame.pts < start_pts: continue if frame.pts <= end_pts: frames[frame.pts] = frame else: buffer_count += 1 frames[frame.pts] = frame if buffer_count >= buffer_size: break result = [frames[pts] for pts in sorted(frames)] return result, max_pts
5b012899c047dcd3ee90d793c68ebdd1d2f413c1
704,530
import sqlite3 def encode_data_for_sqlite(value): """Fix encoding bytes.""" try: return value.decode() except (UnicodeDecodeError, AttributeError): return sqlite3.Binary(value)
fe59a2b0dde5ff7c41acc02c4de6724cc75553fb
704,531
def ext_binary_gcd_env(a, b): """Extended binary GCD. Given input a, b the function returns d, s, t such that gcd(a,b) = d = as + bt.""" u, v, s, t, r = 1, 0, 0, 1, 0 while (a & 1 == 0) and (b & 1 == 0): a, b, r = a >> 1, b >> 1, r + 1 alpha, beta = a, b # # from here on we maintain a = u * alpha + v * beta # and b = s * alpha + t * beta # while (a & 1 == 0): a = a >> 1 if (u & 1 == 0) and (v & 1 == 0): u, v = u >> 1, v >> 1 else: u, v = (u + beta) >> 1, (v - alpha) >> 1 while a != b: if (b & 1 == 0): b = b >> 1 # # Commentary: note that here, since b is even, # (i) if s, t are both odd then so are alpha, beta # (ii) if s is odd and t even then alpha must be even, so beta is odd # (iii) if t is odd and s even then beta must be even, so alpha is odd # so for each of (i), (ii) and (iii) s + beta and t - alpha are even # if (s & 1 == 0) and (t & 1 == 0): s, t = s >> 1, t >> 1 else: s, t = (s + beta) >> 1, (t - alpha) >> 1 elif b < a: a, b, u, v, s, t = b, a, s, t, u, v else: b, s, t = b - a, s - u, t - v return a << r, s << r, t << r
c189fbdd27dcff14bec9093924067f247ea38f88
704,532
import inspect import sys def get_all(): """Returns all activation functions.""" fns = inspect.getmembers(sys.modules[__name__]) fns = [f[1] for f in fns if len(f)>1 and f[0] != "get_all"\ and isinstance(f[1], type(get_all))] return fns
52772f2aac04a9d68f9c6470a19d99afb79f7f7f
704,533
def calculateMid(paddle): """Calculates midpoint for each paddle, much easier to move the paddle this way""" midpoint = int(paddle[0][1] + paddle[1][1]) / 2 return midpoint
83fbba67945d158807bd9c3aebcab63342ce7599
704,534
def bits_list(number): """return list of bits in number Keyword arguments: number -- an integer >= 0 """ # https://wiki.python.org/moin/BitManipulation if number == 0: return [0] else: # binary_literal string e.g. '0b101' binary_literal = bin(number) bits_string = binary_literal.lstrip('0b') # list comprehension bits = [int(bit_character) for bit_character in bits_string] return bits
6f27715dbccefe56d77c800a44c4fa5e82d35b50
704,535
def join_and_keep_order(left, right, remove_duplicates, keep='first', **kwargs): """ :type left: DataFrame :type right: DataFrame :rtype: DataFrame """ left = left.copy() right = right.copy() left['_left_id'] = range(left.shape[0]) right['_right_id'] = range(right.shape[0]) result = left.merge(right=right, **kwargs) result.sort_values(axis='index', by=['_left_id', '_right_id'], inplace=True) if remove_duplicates: result = result[(~result['_left_id'].duplicated(keep=keep)) & (~result['_right_id'].duplicated(keep=keep))] return result.drop(columns=['_left_id', '_right_id'])
a3044f7de9c1f8ffb50cf1e57997307ee0e3d840
704,536
def crop_images(x, y, w, h, *args): """ Crops all the images passed as parameter using the box coordinates passed """ assert len(args) > 0, "At least 1 image needed." cropped = [] for img in args: cropped.append(img[x : x + h, y : y + w]) return cropped
e8f78246c0bfeb3d370b8fe01e264b2f7e0e1c49
704,537
import time import json def WriteResultToJSONFile(test_suites, results, json_path): """Aggregate a list of unittest result object and write to a file as a JSON. This takes a list of result object from one or more runs (for retry purpose) of Python unittest tests; aggregates the list by appending each test result from each run and writes to a file in the correct format for the --isolated-script-test-output argument passed to test isolates. Args: test_suites: a list of unittest.TestSuite that were run to get the list of result object; each test_suite contains the tests run and is iterated to get all test cases ran. results: a list of unittest.TextTestResult object returned from running unittest tests. json_path: desired path to JSON file of result. """ output = { 'interrupted': False, 'num_failures_by_type': {}, 'path_delimiter': '.', 'seconds_since_epoch': time.time(), 'tests': {}, 'version': 3, } def initialize(test_suite): for test in test_suite: if test.id() not in output['tests']: output['tests'][test.id()] = { 'expected': 'PASS', 'actual': [] } for test_suite in test_suites: initialize(test_suite) def get_pass_fail(test_suite, result): success = [] fail = [] for failure in result.failures + result.errors: fail.append(failure[0].id()) for test in test_suite: if test.id() not in fail: success.append(test.id()) return { 'success': success, 'fail': fail, } for test_suite, result in zip(test_suites, results): pass_fail = get_pass_fail(test_suite, result) for s in pass_fail['success']: output['tests'][s]['actual'].append('PASS') for f in pass_fail['fail']: output['tests'][f]['actual'].append('FAIL') num_fails = 0 for test_result in output['tests'].itervalues(): if test_result['actual'][-1] == 'FAIL': num_fails += 1 test_result['is_unexpected'] = True test_result['actual'] = ' '.join(test_result['actual']) output['num_failures_by_type']['FAIL'] = num_fails output['num_failures_by_type']['PASS'] = len(output['tests']) - num_fails with open(json_path, 'w') as script_out_file: json.dump(output, script_out_file) script_out_file.write('\n')
cb53b65bf5c8ceb1d0695e38c4ebeedd4916fe14
704,538
def split_person_name(name): """ A helper function. Split a person name into a first name and a last name. Example. >>> split_person_name("Filip Oliver Klimoszek") ("Filip Oliver", "Klimoszek") >>> split_person_name("Klimoszek") ("", "Klimoszek") """ parts = name.split(" ") return " ".join(parts[:-1]), parts[-1]
86b7c7cec1e7772437f41f11437834cfa34051c7
704,539
def readFile(file): """Reads file and returns lines from file. Args: string: file name Returns: list: lines from file """ fin = open(file) lines = fin.readlines() fin.close() return lines
52c62e6c97caad053cd6619935d8d3674cc3b8cb
704,540
def format_price(raw_price): """Formats the price to account for bestbuy's raw price format Args: raw_price(string): Bestbuy's price format (ex: $5999 is $59.99) Returns: string: The formatted price """ formatted_price = raw_price[:len(raw_price) - 2] + "." + raw_price[len(raw_price) - 2:] return formatted_price
a3b0adc94421334c3f1c4fe947329d329e68990e
704,541
import os def get_directory(directory=None): """Get directory to work with.""" # Set variable fdir = current directory, if user didn't specify another dir if not directory: fdir = os.getcwd() # Set variable fdir = directory chosen by the user, if a dir is specified else: fdir = os.path.realpath(os.path.expanduser(directory)) # Make sure that the directory exists. Otherwise, print error and exit if not os.path.isdir(fdir): raise ValueError("Directory doesn't exist. Check --directory.") return fdir
9b83f5502cea6ff908b7528c2ae480d9072ccd79
704,542
def replicated_data(index): """Whether data[index] is a replicated data item""" return index % 2 == 0
26223e305d94be6e092980c0eb578e138cfa2840
704,543
def get_user_roles_common(user): """Return the users role as saved in the db.""" return user.role
cf25f029325e545f5d7685e6ac19e0e09105d65a
704,544
def partial_es(Y_idx, X_idx, pred, data_in, epsilon=0.0001): """ The analysis on the single-variable dependency in the neural network. The exact partial-related calculation may be highly time consuming, and so the estimated calculation can be used in the bad case. Args: Y_idx: index of Y to access the target variable of interest X_idx: index of X to access the independent variable of a neural network data_in: the specified data of input layer pred: the specified predictive model Returns: The first-order derivative of Y on X for the specified X index and Y index """ eps = epsilon y1 = pred(data_in) data_in[X_idx] += eps y2 = pred(data_in) return (y2[Y_idx] - y1[Y_idx]) / eps
12186469b27bebea4735372e2b45f463bbfbaff1
704,545
import pandas import numpy def prepare_and_store_dataframe(test_df: pandas.DataFrame, current_datetime: str, prediction: numpy.ndarray, eval_identity: str, df_output_dir: str): """Prepares a dataframe that includes the testing data (timestamp, value), the detected anomalies and the labeled anomalies from the dataset and stores this as a .pkl file on the disk :param test_df: Dataframe that includes the used testing data :param current_datetime: Current datetime as string to be included in filename :param prediction: The predicted anomalies as numpy array :param eval_identity: The evaluation identity, consists of the dataset and the used algorithm, is used in filename :param df_output_dir The output directory for resulting pickled dataframe """ df = test_df.copy(deep=True) df["prediction"] = prediction df["timestamp"] = df.index """Pickle Dataframe and store to file""" df.to_pickle(df_output_dir + eval_identity + "-" + current_datetime + ".pkl") return df
a2aa5d9ffb9ec495abb96ddebc820dd351392b1a
704,546
def isfloat(s): """ Checks whether the string ``s`` represents a float. :param s: the candidate string to test :type s: ``str`` :return: True if s is the string representation of a number :rtype: ``bool`` """ try: x = float(s) return True except: return False
2233d0a06b9ff0be74f76ef2fce31c816f68584c
704,547
def percentage_to_float(x): """Convert a string representation of a percentage to float. >>> percentage_to_float('55%') 0.55 Args: x: String representation of a percentage Returns: float: Percentage in decimal form """ return float(x.strip('%')) / 100
6c1aeac99278963d3dd207d515e72b6e1e79f09f
704,548
def _escape_special_chars(content): """No longer used.""" content = content.replace("\N{RIGHT-TO-LEFT OVERRIDE}", "") if len(content) > 300: # https://github.com/discordapp/discord-api-docs/issues/1241 content = content[:300] + content[300:].replace('@', '@ ') return content
816fc3ba15150c3e254a17d1a021d1ddee11e49f
704,550
from typing import OrderedDict import inspect def build_paramDict(cur_func): """ This function iterates through all inputs of a function, and saves the default argument names and values into a dictionary. If any of the default arguments are functions themselves, then recursively (depth-first) adds an extra field to the dictionary, named <funcName + "_params">, that contains its inputs and arguments. The output of this function can then be passed as a "kwargs" object to the highest level function, which will then pass the parameter values to the lower dictionary levels appropriately """ paramDict = OrderedDict() allArgs = inspect.getfullargspec(cur_func) # Check if there are any default parameters, if no, just return empty dict if allArgs.defaults is None: return paramDict for argname, argval in zip(allArgs.args[-len(allArgs.defaults):], allArgs.defaults): # Save the default argument paramDict[argname] = argval # If the default argument is a function, inspect it for further if callable(argval): # print(argname) paramDict[argname+"_params"] = build_paramDict(argval) return paramDict
b62daf5ffe7b9211d898d26dc754875459dbe1ba
704,551
def get_channel_members_names(channel): """Returns a list of all members of a channel. If the member has a nickname, the nickname is used instead of their name, otherwise their name is used""" names = [] for member in channel.members: if member.nick is None: names.append(member.name) else: names.append(member.nick) return names
955ea4013841fe8aac52f0474a65e221795db571
704,553
import hashlib def get_checksum(file_name: str) -> str: """Returns checksum of the file""" sha_hash = hashlib.sha224() a_file = open(file_name, "rb") content = a_file.read() sha_hash.update(content) digest = sha_hash.hexdigest() a_file.close() return digest
6bb506accc6aa7826976a2d8033116dcff2f4a55
704,554
def compress_vertex_list(individual_vertex: list) -> list: """ Given a list of vertices that should not be fillet'd, search for a range and make them one compressed list. If the vertex is a point and not a line segment, the returned tuple's start and end are the same index. Args: individual_vertex (list): List of UNIQUE ints. Each int refers to an index of a LineString. Returns: list: A compressed list of tuples. So, it combines adjacent vertices into a longer one. """ reduced_idx = list() sorted_vertex = sorted(individual_vertex) len_vertex = len(sorted_vertex) if len_vertex > 0: # initialzie to unrealistic number. start = -1 end = -1 size_of_range = 0 for index, item in enumerate(sorted_vertex): if index == 0: start = item end = item else: if item == end + 1: end = item size_of_range += 1 else: if size_of_range == 0: # Only one vertex in range. reduced_idx.append((start, end)) start = item end = item else: # Two or more vertexes in range. reduced_idx.append((start, end)) size_of_range = 0 start = item end = item if index == len_vertex - 1: if size_of_range == 0: reduced_idx.append((start, end)) else: reduced_idx.append((start, end)) return reduced_idx else: return reduced_idx
a98f8b101219215f719b598ed8c47074a42ecb13
704,555
import argparse def add_rnaseq_args(): """ Arguments for RNAseq pipeline """ parser = argparse.ArgumentParser( description='RNA-seq pipeline') parser.add_argument('-b', '--build-design', dest='build_design', action='store_true', help='Create design for fastq files') parser.add_argument('-d', '--design', default=None, help='design for RNAseq, json format, ignore fq1, fq2') parser.add_argument('--wt', nargs='+', default=None, dest='wildtype', help='read1 fq files for control sample') parser.add_argument('--wt-fq2', nargs='+', dest='wildtype_fq2', default=None, help='read2 fq files for control sample') parser.add_argument('--mut', nargs='+', default=None, dest='mutant', help='read2 fq files for treatment sample') parser.add_argument('--mut-fq2', nargs='+', dest='mutant_fq2', default=None, help='read2 fq files for treatment sample') parser.add_argument('-1', '--fq1', nargs='+', default=None, help='read1 files, (or read1 of PE reads)') parser.add_argument('-2', '--fq2', nargs='+', default=None, help='read2 of PE reads') parser.add_argument('-c', '--wt-dir', nargs='+', dest='wildtype_dir', default=None, help='path to the dirs of control samples') parser.add_argument('-t', '--mut-dir', nargs='+', dest='mutant_dir', default=None, help='path to the dirs of experiment samples') parser.add_argument('-o', '--outdir', default=None, help='The directory to save results, default, \ current working directory.') parser.add_argument('-g', '--genome', default=None, choices=['dm3', 'dm6', 'hg19', 'hg38', 'mm9', 'mm10'], help='Reference genome : dm3, dm6, hg19, hg38, mm10, default: hg38') parser.add_argument('--gtf', default=None, help='The gtf file for quantification, defaut: genome.gtf (None)') parser.add_argument('--gene-bed', dest='gene_bed', default=None, help='The BED or GTF of genes') # optional arguments - 0 parser.add_argument('--trimmed', action='store_true', help='specify if input files are trimmed') parser.add_argument('--cut-to-length', dest='cut_to_length', default=0, type=int, help='cut the read to specific length, from right, default: [0], \ not cut') parser.add_argument('--recursive', action='store_true', help='trim adapter recursively') # optional arguments - 1 parser.add_argument('--overwrite', action='store_true', help='if spcified, overwrite exists file') ## extra: index parser.add_argument('-k', '--spikein', default=None, choices=[None, 'dm3', 'hg19', 'hg38', 'mm10'], help='Spike-in genome : dm3, hg19, hg38, mm10, default: None') parser.add_argument('-x', '--extra-index', dest="extra_index", help='Provide alignment index(es) for alignment, support multiple\ indexes. if specified, ignore -g, -k') parser.add_argument('--align-to-rRNA', action='store_true', help='Align to rRNA') parser.add_argument('--aligner', default='STAR', choices=['STAR', 'bowtie', 'bowtie2', 'bwa', 'hisat2', 'kallisto', 'salmon'], help='Aligner option: [STAR, bowtie, bowtie2, bwa], default: [STAR]') parser.add_argument('-p', '--threads', default=1, type=int, help='Number of threads for each job, default [1]') parser.add_argument('-j', '--parallel-jobs', default=1, type=int, dest='parallel_jobs', help='Number of jobs run in parallel, default: [1]') ## extra: para parser.add_argument('--extra-para', dest='extra_para', default=None, help='Extra parameters for aligner, eg: -X 2000 for bowtie2. \ default: [None]') parser.add_argument('--norm-project', dest='norm_project', default=None, help='The RNAseq_Rx project, for parseing norm scale. eg: \ RNAseq_gene/wt.vs.mut for RNAseq_te, default: [None]') return parser
327e79e26b44933b82f3b31a607112db0e650ce8
704,556
def week_of_year(datetime_col): """Returns the week from a datetime column.""" return datetime_col.dt.week
c1bf4e0cd5d4aeddf2cff9a1142fcb45b17d1425
704,557
def normalize(df, df_ref=None): """ Normalize all numerical values in dataframe :param df: dataframe :param df_ref: reference dataframe """ if df_ref is None: df_ref = df df_norm = (df - df_ref.mean()) / df_ref.std() return df_norm
56c96f43c98593a5cf21425f23cfd92a7f6d6fe3
704,558
def findCongressPerson(name, nicknames_json): """ Checks the nicknames endpoint of the NYT Congress API to determine if the inputted name is that of a member of Congress """ congress_json = [x['nickname'] for x in nicknames_json if x['nickname'] == name] if len(congress_json) > 0: return True return False
d03dc1f55c970379b283f78cfd23e393e494bd48
704,559
def _validate_positive_int(value): """Validate value is a natural number.""" try: value = int(value) except ValueError as err: raise ValueError("Could not convert to int") from err if value > 0: return value else: raise ValueError("Only positive values are valid")
ddc2087d69c96fa72594da62192df58555b25029
704,560