content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
from typing import Dict import torch def get_token_ids_from_text_field_tensors( text_field_tensors: Dict[str, Dict[str, torch.Tensor]], ) -> torch.Tensor: """ Our `TextFieldTensors` are complex output structures, because they try to handle a lot of potential variation. Sometimes, you just want to grab the token ids from this data structure, and that's not trivial without hard-coding assumptions about your data processing, which defeats the entire purpose of that generality. This method tries to let you get the token ids out of the data structure in your model without hard-coding any assumptions. """ for indexer_name, indexer_tensors in text_field_tensors.items(): for argument_name, tensor in indexer_tensors.items(): if argument_name in ["tokens", "token_ids", "input_ids"]: return tensor raise NotImplementedError( "Our heuristic for guessing the right token ids failed. Please open an issue on " "github with more detail on how you got this error, so we can implement more robust " "logic in this method." )
4898da3be21bac6011fe3d559d772da31c31f730
703,316
import re def parse_life_105(file): """Parse a Life 1.05 file, returning a tuple: positions: list of (x,y) co-ordinates comments: all comments in file, as a list of strings, one per line. """ lines = file.split("\n") comments = [] positions = [] ox, oy = 0, 0 x, y = ox, oy pattern_105 = r"\s*(\.|\*|o|O)+\s*\Z" for line in lines: line = line.strip().rstrip() if line.startswith("#"): # comment if line[1] in "CcDd": comments.append(line[2:]) # new block definition if line[1] in "Pp": coords = line[2:] try: ox, oy = [int(p) for p in coords.split()] x, y = ox, oy except: pass else: # skip blanks if len(line) > 0 and re.match(pattern_105, line): # only fill in points which are active for char in line: if char == "*" or char == "o" or char == "O": positions.append((x, y)) x += 1 y = y + 1 x = ox comments = "\n".join(comments) return positions, comments
3faf204bb52f5c1dd5b350401afdaa2a021f80d4
703,317
def eliminate_nonvalid_coords(coords, mapshape): """ Eliminate nonvalid indices Args: coords(set of tuples): input set of positions h(int): height w(int): width Returns: set of valid coordinates """ h, w = mapshape valid = [] for j, i in coords: if j < 0 or j >= h: continue if i < 0 or i >= w: continue valid.append((j, i)) return valid
a872f551c072f0e36e10d7aac02d68a370cdfdf1
703,318
def getPosUntilRoot(object): """ Go through the hierarchy of the object until reaching the top level, increment the position to get the transformation due to parents. @type object: hostObject @param object: the object @rtype: list @return: the cumulative translation along the parenting hierarchy """ stop = False #get the first parent pos=[0,0,0] while not stop : #get the parent position, and add it to pos #get the parent of the previous parent parent=None if parent is None : stop = True return pos
00ed1bbb3cce4c8f1309d2ee4b24025e767f7df9
703,319
def _trash_ratio(text): """ Return ratio of non-common symbols. """ trash_count = 0 for char in text: if char in list(u'.\'"+-!?()[]{}*+@#$%^&_=|/\\'): trash_count += 1 return trash_count / float(len(text))
135bcabaa36a565b2e998932c70aaf4caf943af9
703,320
def index_of_spaces(text): """ Given text, return all space indices @param text is the string to analyze @returns a list of integers representing the indices """ res = [] for i in range(0, len(text)): if text[i] == ' ': res.append(i) return res
97b3618ffa54ee6d1b50c5bca3e196d3a6ae7f2a
703,321
def get_default_hparams(): """ Return default hyper-parameters """ params_dict = { # Experiment Params: 'is_training': True, # train mode (relevant only for accelerated LSTM mode) 'data_set': 'cat', # datasets to train on 'epochs': 50, # how many times to go over the full train set (on average, since batches are drawn randomly) 'save_every': None, # Batches between checkpoints creation and validation set evaluation. Once an epoch if None. 'batch_size': 100, # Minibatch size. Recommend leaving at 100. 'accelerate_LSTM': False, # Flag for using CuDNNLSTM layer, gpu + tf backend only # Loss Params: 'optimizer': 'adam', # adam or sgd 'learning_rate': 0.001, 'decay_rate': 0.9999, # Learning rate decay per minibatch. 'min_learning_rate': .00001, # Minimum learning rate. 'kl_tolerance': 0.2, # Level of KL loss at which to stop optimizing for KL. 'kl_weight': 0.5, # KL weight of loss equation. Recommend 0.5 or 1.0. 'kl_weight_start': 0.01, # KL start weight when annealing. 'kl_decay_rate': 0.99995, # KL annealing decay rate per minibatch. 'grad_clip': 1.0, # Gradient clipping. Recommend leaving at 1.0. # Architecture Params: 'z_size': 128, # Size of latent vector z. Recommended 32, 64 or 128. 'enc_rnn_size': 256, # Units in encoder RNN. 'dec_rnn_size': 512, # Units in decoder RNN. 'use_recurrent_dropout': True, # Dropout with memory loss. Recommended 'recurrent_dropout_prob': 0.9, # Probability of recurrent dropout keep. 'num_mixture': 20, # Number of mixtures in Gaussian mixture model. # Data pre-processing Params: 'random_scale_factor': 0.15, # Random scaling data augmentation proportion. 'augment_stroke_prob': 0.10 # Point dropping augmentation proportion. } return params_dict
803cc08fc9de77b7c2608d10f6eca457ac175ab8
703,323
def bbcMicro_partPhonemeCount(pronunc): """Returns the number of 'part phonemes' (at least that's what I'm calling them) for the BBC Micro phonemes in pronunc. The *SPEAK command cannot take more than 117 part-phonemes at a time before saying "Line too long", and in some cases it takes less than that (I'm not sure why); 115 is a safer limit.""" partCount = 0 pronunc0 = pronunc while pronunc: found = 0 for ( p ) in " ,AA,AE,AH,AI,AO,AW,AY,B,CH,CT,DH,DUX,D,EE,EH,ER,F,G,/H,IH,IX,IY,J,K,L,M,NX,N,OW,OL,OY,O,P,R,SH,S,TH,T,UH,/UL,/U,UW,UX,V,W,Y,ZH,Z".split( "," ): # phonemes and space count, but pitch numbers do not count if pronunc.startswith(p): partCount += { # *SPEAK can take 117 of most single-letter phonemes, or 116 (limited by the 232+6-character input limit) of most 2-letter phonemes "AW": 2, "IY": 2, "OW": 2, "OL": 2, "UW": 2, "/UL": 2, # *SPEAK can take 58 of these "DUX": 3, "AY": 3, "CH": 3, "J": 3, "OY": 3, # *SPEAK can take 39 of these "CT": 4, # *SPEAK can take 29 of these }.get(p, 1) pronunc = pronunc[len(p) :] found = 1 break if not found: assert pronunc[0] in "12345678", ( "Unrecognised BBC Micro phoneme at " + pronunc + " in " + pronunc0 ) pronunc = pronunc[1:] return partCount
bd1337214f8c0d39c79a7cd94e5720717335aad8
703,324
def insert_at_midpoint(item, iterable): """ Inserts an item at the index of the midpoint of a list Returns that list with the item inserted """ midpoint = int(len(iterable) / 2) iterable.insert(midpoint, item) return iterable
9801e2f4cd1011914f15634898ed4d502edfee34
703,325
def get_filenames_request(products, download_directory): """Get local files url corresponding to a Copernicus request (must be already downloaded). :param products: (dict) Copernicus Hub query :param download_directory: (str) Url of folder for downloaded products :return: (list) List of strings with local urls for each product in the request """ # list of id's per requested products ids_request = list(products.keys()) # list of downloaded filenames urls filenames = [ download_directory / f"{products[file_id]['title']}.nc" for file_id in ids_request ] return filenames
5591948ce2445399d06da6c84f3fe1f6b8b4b128
703,326
def create_stream(data_type, transaction_id): """ Construct a 'createStream' message to issue a new stream on which data can travel through. :param data_type: int the RTMP datatype. :param transaction_id: int the transaction id in which the message will be sent on. """ msg = {'msg': data_type, 'command': [u'createStream', transaction_id, None]} return msg
6ceb6f259c590bdb21589c57ba053fad5c7e1851
703,327
def _version_string_to_tuple(version_string): """Convert a version_string to a tuple (major, minor, patch) of integers.""" return (int(n) for n in version_string.split('.'))
d2fe2a3d9f6f23d1d80c2808436386c18893828f
703,328
import re def get_major_version(version): """ Enable checking that 2 versions are within the same major version """ components = re.findall(r"\d+", version) major = components[0] return major
b3d017b3dbe49b0a30d9919272906e8936c14f83
703,329
from typing import Any def is_action(value: Any) -> bool: """Returns ``True`` if the value is an action.""" return isinstance(value, dict) and "action" in value
46c691c7afd221c0f77869428535f6b943332905
703,330
def get_parse_args_definitions(wanted=None): """ Parse the args the script neeeds :param: wanted: list of args the application will use :returns: A list with the options for the wanted args """ definitions = { 'kolibri_dev': [ '-kd', '--kolibri-dev', { 'required': False, 'help': 'path to the Kolibri development installation' }, ], 'kolibri_venv': [ '-kv', '--kolibri-venv', { 'required': False, 'help': 'path to the Kolibri virtualenv' } ], 'kolibri_exec': [ '-ke', '--kolibri-exec', { 'required': False, 'help': 'command to execute Kolibri cli' } ], 'database': [ '-d', '--database', { 'required': False, 'choices': ['sqlite', 'postgresql'], 'help': 'Database type: sqlite or posgresql' } ], 'channel': [ '-c', '--channel', { 'required': False, 'choices': ['large', 'multiple', 'video', 'exercise'], 'help': 'Channels to use in Kolibri: large (1 large channel ~ 1Gb),\n' 'multiple (10 x ~30 Mb channels), video (channel with multiple videos),\n' 'exercise (channel with multiple exercises)' } ], 'learners': [ '-l', '--learners', { 'required': False, 'type': int, 'help': 'Number of learners per classroom that will use the tests' } ], 'classrooms': [ '-s', '--classrooms', { 'required': False, 'type': int, 'help': 'Number of classrooms to be created.' } ], 'test': [ '-t', '--test', { 'required': False, 'help': 'Name of the test to be run (or "all" to run them all)' } ], 'iterations': [ '-i', '--iterations', { 'required': False, 'type': int, 'help': 'Number of times each test will be run' } ] } if wanted: return dict((k, definitions[k]) for k in wanted if k in definitions) return definitions
9215b13917652fe23c053f24ec3ce42b1a9fd924
703,331
import os def expand_path(path): """Get the canonical form of the absolute path from a possibly relative path (which may have symlinks, etc.)""" return os.path.expandvars(os.path.expanduser(path))
d1e58069f5547e8f5452a5c0a5888c08475033c5
703,332
import os def get_path_rel_to_proj(full_path): """ """ #| - get_path_rel_to_proj subdir = full_path PROJ_dir = os.environ["PROJ_irox_oer"] ind_tmp = subdir.find(PROJ_dir.split("/")[-1]) path_rel_to_proj = subdir[ind_tmp:] path_rel_to_proj = "/".join(path_rel_to_proj.split("/")[1:]) return(path_rel_to_proj) #__|
f575a0725ab090ea55091ce74610df2f1ab62010
703,333
def name_to_components(name): """Converts a name to a list of components. Arguments: name - Name in the format /name1=value1/name2=value2/.. Returns: list of (name, value) tuples """ ret = [] components = [x for x in name.split('/') if x] components = [x.split('=') for x in components] components = [x for x in components if len(x) == 2] for key, value in components: ret.append( (key, value) ) return ret
5683e3c4fdce53b53431484a46cd23c8959d20a2
703,334
from typing import Optional import readline def clear_tab_complete_vocabulary() -> None: """ Resets vocabulary used for tab completion. It's important to either use the cleanup argument in tab_complete, or call this function after setting a vocabulary. This will prevent irrelevant options displaying when the user presses tab. :return: """ # Matching signature expected in set_completer according to mypy def completer(arg1: str, arg2: int) -> Optional[str]: return None readline.set_completer(completer)
2014e996474742d956566cc95887ba4c868398a6
703,335
import re def fixupAnchor(anchor): """Miscellaneous fixes to the anchors before I start processing""" # This one issue was annoying if anchor.get("title", None) == "'@import'": anchor["title"] = "@import" # css3-tables has this a bunch, for some strange reason if anchor.get("uri", "").startswith("??"): anchor["uri"] = anchor["uri"][2:] # If any smart quotes crept in, replace them with ASCII. linkingTexts = anchor.get("linking_text", [anchor.get("title")]) for i, t in enumerate(linkingTexts): if t is None: continue if "’" in t or "‘" in t: t = re.sub(r"‘|’", "'", t) linkingTexts[i] = t if "“" in t or "”" in t: t = re.sub(r"“|”", '"', t) linkingTexts[i] = t anchor["linking_text"] = linkingTexts # Normalize whitespace to a single space for k, v in list(anchor.items()): if isinstance(v, str): anchor[k] = re.sub(r"\s+", " ", v.strip()) elif isinstance(v, list): for k1, v1 in enumerate(v): if isinstance(v1, str): anchor[k][k1] = re.sub(r"\s+", " ", v1.strip()) return anchor
ba352cc5b18f82000be3943cf03db8ebcb41ddff
703,336
def deep_merge(base, updates): """ apply updates to base dictionary """ for key, value in updates.iteritems(): if key in base and isinstance(value, dict): base[key] = deep_merge(base[key] or {}, value) else: base[key] = value return base
6add1c048368f1547aa6ce5ebea1fe17b373fb87
703,337
def has_trailing_character_return(str_multiline: str) -> bool: """ >>> has_trailing_character_return('jhgjh\\n') True >>> has_trailing_character_return('jhgjh\\ntestt') False """ if len(str_multiline) and str_multiline[-1] == '\n': preserve_trailing_linefeed = True else: preserve_trailing_linefeed = False return preserve_trailing_linefeed
21e6a7c9bb76e37ee05ed61b2012ec3a92413540
703,338
def get_genotype(read, ref_position, snp_position, cigar_tuple): """ Input read position, read sequence, SNP position, cigar Return the base in read that is aligned to the SNP position """ cigar = { # alignement code, ref shift, read shift 0: ["M", 1, 1], # match, progress both 1: ["I", 0, 1], # insertion, progress read not reference 2: ["D", 1, 0], # deletion, progress reference not read 3: ["N", 1, 0], # skipped, progress reference not read 4: ["S", 0, 1], # soft clipped, progress read not reference 5: ["H", 0, 0], # hard clipped, progress neither 6: ["P", 0, 0], # padded, do nothing (not used) 7: ["=", 1, 1], # match, progress both 8: ["X", 1, 1] # mismatch, progress both } read_position = 0 for i in cigar_tuple: ref_bases = cigar[i[0]][1] * i[1] read_bases = cigar[i[0]][2] * i[1] if ref_bases == 0 and read_bases == 0: # this shouldn't ever happen pass elif ref_bases == 0 and read_bases > 0: # clipped bases or insertion relative to reference read_position += read_bases if ref_position == snp_position: # only happens when first aligned base is the SNP return read[read_position - 1] elif read_bases == 0 and ref_bases > 0: ref_position += ref_bases if ref_position > snp_position: # we've gone past the SNP return None else: if ref_position + ref_bases > snp_position: # we pass snp shift = snp_position - ref_position return read[read_position + shift - 1] elif ref_position + ref_bases < snp_position: ref_position += ref_bases read_position += read_bases else: return read[read_position + read_bases - 1] return None
bf6b212b318ab8425124bbdd40d5ab54d05e8858
703,339
def get_blowout_properties(): """ Return the properties for the base blowout case Return the fluid properties and initial conditions for the base case of a blowout from the Model Inter-comparison Study for the case of 20,000 bbl/d, 2000 m depth, GOR of 2000, and 30 cm orifice. """ # Get the properties for the base case d0 = 0.30 m_gas = 7.4 m_oil = 34.5 rho_gas = 131.8 mu_gas = 0.00002 sigma_gas = 0.06 rho_oil = 599.3 mu_oil = 0.0002 sigma_oil = 0.015 rho = 1037.1 mu = 0.002 return (d0, m_gas, m_oil, rho_gas, mu_gas, sigma_gas, rho_oil, mu_oil, sigma_oil, rho, mu)
1521c62fac787e10ca6e7beaa356c82914043cc8
703,340
import torch def box_refinement(box, gt_box): """Compute refinement needed to transform box to gt_box. box and gt_box are [N, (y1, x1, y2, x2)] """ height = box[:, 2] - box[:, 0] width = box[:, 3] - box[:, 1] center_y = box[:, 0] + 0.5 * height center_x = box[:, 1] + 0.5 * width gt_height = gt_box[:, 2] - gt_box[:, 0] gt_width = gt_box[:, 3] - gt_box[:, 1] gt_center_y = gt_box[:, 0] + 0.5 * gt_height gt_center_x = gt_box[:, 1] + 0.5 * gt_width dy = (gt_center_y - center_y) / height dx = (gt_center_x - center_x) / width dh = torch.log(gt_height / height) dw = torch.log(gt_width / width) result = torch.stack([dy, dx, dh, dw], dim=1) return result
a10380c4a80de52bf7145fe134e12e49b5cf66a6
703,342
def thrush(x, f): """ Applies function M{f} to value M{x} @type x: Any @type f: function @rtype: Any """ return f(x)
7819bfa4e0b394ac94acbd057c6d69e449fb18dc
703,343
import argparse def _init_args(): """ :return: """ parser = argparse.ArgumentParser() parser.add_argument('--net', type=str, default='xception') parser.add_argument('--dataset', type=str, default='ilsvrc_2012') return parser.parse_args()
68fbb00a704968ce082fd8495d0d9a9fe5ee875a
703,344
def top(values, number=5): """ Return the dict containg the top number(defaults to 5) values. If we ask for the top 2 and the data looks like {"A": 5, "B": 5, "C": 5} we will sort on the keys and take A and B. Args: values: the dict with data number: how many we include (default=5) Returns: top_x(dict): dict with the number highest values """ return sorted(values, key=lambda k: (-values[k], k))[:number]
aaa8516c3074f0cc2bc3c37f24ae27acc2844029
703,345
def force_line_char_limit(line, indent): """ If line is longer than limit then create new line at a space in the text :param line: :return: """ clim = 120 if len(line) <= clim: return line rem = line oline = '' for i in range(clim): j = clim - i if rem[j] == ' ': oline += rem[:j] + '\n' + indent rem = rem[j + 1:] if len(rem) < clim: oline += rem break return oline
21858c137cb3c75771bbe911e1c13aa7782acfa6
703,346
import os def _eval_optenv(name, default=''): """ Eval_optenv Returns the value of the environment variable or default @name: name of the environment variable @return: enviroment variable value or default """ if name in os.environ: return os.environ[name] return default
06a126274ed9091e7e545ec5bc6d9fa99c093445
703,347
import random import string def get_random_file_name(length: int) -> str: """Returns a random file name. File name consists of lowercase letters, uppercase letters, and digits. :param length: File name length. """ return ''.join(random.choice(string.ascii_lowercase + string.digits + string.ascii_uppercase) for _ in range(length))
c6a7b2f58bc6d2eb457cee2c01222757c50f7eb9
703,348
def is_valid_followup_permutation(perm, prev_perm): """ Checks if a given permutation is a valid following permutation to all previously known permutations. (only one more) """ for p in prev_perm: if len(perm - p) == 1: return True return False
7195f48ec57273af6f5bf3942e501360558678ab
703,349
def sentihood_strict_acc(y_true, y_pred): """ Calculate "strict Acc" of aspect detection task of Sentihood. """ total_cases=int(len(y_true)/4) true_cases=0 for i in range(total_cases): if y_true[i*4]!=y_pred[i*4]:continue if y_true[i*4+1]!=y_pred[i*4+1]:continue if y_true[i*4+2]!=y_pred[i*4+2]:continue if y_true[i*4+3]!=y_pred[i*4+3]:continue true_cases+=1 aspect_strict_Acc = true_cases/total_cases return aspect_strict_Acc
82b8f35fd449fb112f12c263c6fa40c4efdf381e
703,350
import os import shutil def check_if_dir_exists_create_it_if_not_remove_content(preprocessed_data_dir): """ A helper function used mainly by: - prepare_and_dispatch_lion_detection_data - prepare_and_dispatch_lion_counting_data """ # Check if CONST_PREPROCESSED_DATA_DIR exists. pdd = os.path.isdir(preprocessed_data_dir) if (pdd == True): shutil.rmtree(preprocessed_data_dir) os.makedirs(preprocessed_data_dir) else: os.makedirs(preprocessed_data_dir) return preprocessed_data_dir
54a35a1c6ebbce571e2380c842839133409a4288
703,351
def reverse_string(a_string: str): """Take the input a_string and return it reversed (e.g. "hello" becomes "olleh".""" reversed_string = "" for i in range(len(a_string)): reversed_string += a_string[~i] return reversed_string
888127122856a3537eea99d4e2bad0aa0f1921d1
703,352
def _iterations_implicit_bwd(res, gr): """Runs Sinkhorn in backward mode, using implicit differentiation. Args: res: residual data sent from fwd pass, used for computations below. In this case consists in the output itself, as well as inputs against which we wish to differentiate. gr: gradients w.r.t outputs of fwd pass, here w.r.t size f, g, errors. Note that differentiability w.r.t. errors is not handled, and only f, g is considered. Returns: a tuple of gradients: PyTree for geom, one jnp.ndarray for each of a and b. """ f, g, ot_prob, solver = res gr = gr[:2] return ( *solver.implicit_diff.gradient(ot_prob, f, g, solver.lse_mode, gr), None, None)
8617b6bd8cab2535409e863dae31a928f4de81db
703,353
import torch def huber_loss_temporal(dvf): """ Calculate approximated temporal Huber loss Args: dvf: (Tensor of shape (N, 2, H, W)) displacement vector field estimated Returns: loss: (Scalar) huber loss temporal """ eps = 1e-8 # numerical stability # magnitude of the dvf dvf_norm = torch.norm(dvf, dim=1) # (N, H, W) # temporal derivatives, 1st order dvf_norm_dt = dvf_norm[1:, :, :] - dvf_norm[:-1, :, :] loss = (dvf_norm_dt.pow(2) + eps).sum().sqrt() return loss
12329846e15c18ff9d59aee2f27377ce38eb8208
703,354
def handler(msg): """ Writes input argument back-out to the standard output returning input as output. Generated by: `SQL Server Big Data Cluster` :param msg: The message to echo. :type msg: str :return: The input message `msg` as output `out`. """ print(msg) out = msg return out
b90133e486092c6277f63c2a4f5aa0c2317fa44e
703,355
import sys def _get_remote_or_bail(repo, name): """Get remote by name. name may be None. _get_remote_or_bail(Repo, str) -> Remote """ remote_name = name if not remote_name: # Default to origin since it's the convention. remote_name = 'origin' try: return repo.remote(remote_name) except ValueError as e: if not name and len(repo.remotes) == 1: # Should be safe to use the only remote if it was renamed and user # didn't ask for a specific name. return repo.remotes[0] else: print('ERROR:', e) sys.exit(1)
e709c57fa46e0fe232a30ca21b97d40030e36ee9
703,356
from pathlib import Path def get_file_path(): """ 固有名詞のdfのpathのリスト """ p = Path(__file__).parent.resolve() / ".." / "toots_log" file_paths = sorted([f for f in p.iterdir() if f.is_file()]) return(file_paths)
e51c2683f3ebd40f6d4b05b72e82273c153aa014
703,357
import math def RerangeEulerAngle(angle,deadzone,max1): """ Rerange the angles to [-1 - +1] If one angle is in the deadzone it is set to 0 Angles are cubed for better control Deadzone and max is configurable max1 ---/ /: / : / : deadzone-/ : : : 0 1 """ sign=math.copysign(1, angle) value = abs(angle) if (value < deadzone): angle = 0 return angle else: # current range of value: [deadzone - inifinite] value = float(min(value, max1)) #current range of value: [deadzone - max1] value -= deadzone #current range of value: [0 - (max1 - deadzone)] value /= (max1 - deadzone); #current value [0-1] # m= max1 - deadzone # (y2-y1)/(x2-x1) # value=m*value+deadzone # Y=m(X-x1)+y1 angle = float(sign*value) return angle
a8443cfd9f1c234e16e15efa97db6d7cead5ab46
703,358
def processMultiline(string, removeEmpty=True, removeComments=False, doStrip=True): """split a string into lines, with some default post-processing. Caution if using removeEmpty=False and removeComments==True, it will fail if empty lines are present""" lines = string.split('\n') if doStrip: lines = (f.strip() for f in lines) if removeEmpty: lines = [f for f in lines if len(f)>0] if removeComments: lines = [f for f in lines if f[0]!='#'] #lines = [f for f in lines if (len(f)>0) and f[0]!='#'] return lines
e1367f5094a17363872c297ee898168c0378c5fe
703,359
import argparse from pathlib import Path def parse_arguments() -> argparse.Namespace: """ Parse arguments from the command line using argparse. :return: command line arguments :rtype: argparse.Namespace """ parser = argparse.ArgumentParser(__file__) parser.add_argument('base_conf', type=Path, help='Path to the base config') parser.add_argument('icon_dir', type=Path, help='Path to the icons directory') parser.add_argument('template_dir', type=Path, help='Path to the templates directory') parser.add_argument( '-o', '--output_dir', type=Path, default=Path('output'), help='Path to resource output directory' ) return parser.parse_args()
f3fbdcbda119669d25287b410c63b59d5e513b07
703,360
import re def remove_words(text, pattern): """ This function removes words based on those found in the pattern. test: String of text pattern: List of words to remove returns: new string with specified words removed """ new_string = re.sub(r"\b(%s)\b" % "|".join(pattern), "", text, flags=re.I) return new_string
90499bb65dff72065cc118eeb186b6c0cd30b0c5
703,361
def _get_valid_filename(string): """Generate a valid filename from a string. Strips all characters which are not alphanumeric or a period (.), dash (-) or underscore (_). Based on https://stackoverflow.com/a/295146/4798943 Args: string (str): String file name to process. Returns: str: Generated file name. """ return "".join(c for c in string if (c.isalnum() or c in "._- "))
93777a5458c00a0a751f77953d718080cf51088e
703,362
def parse_output(filename): """ This function parses the output of a test run. For each run of the test, the program should print the following: ## ID: result: [OK|FAIL] ## ID: cycles: N_CYCLES ## ID: instructions: N_INSTR ## ID: Key: Value Multiple runs are allowed. Make sure, that you pipe the execution into a file, whose name is then passed into this function. Example: os.system("make clean all run > result.out") parsed = parse_output("result.out") This function returns a dictionary in the form: { "1": {"result": "OK", "cycles": "215", "instructions": "201"}, ... } """ parsed = {} with open(filename, "r") as _f: for line in _f.readlines(): if not line.startswith("## "): continue line = line.lstrip("# ") parts = [p.strip() for p in line.split(":")] assert len(parts) == 3, line if parts[0] not in parsed: parsed[parts[0]] = {} if parts[1] == "result": parsed[parts[0]]["result"] = parts[2] == "OK" else: parsed[parts[0]][parts[1].lower()] = parts[2] # add IPC to the output for case in parsed.values(): if "cycles" in case and "instructions" in case: case["ipc"] = "{:.3f}".format(int(case["instructions"]) / int(case["cycles"])) return parsed
e24e961e5222d952e79369d22365723919cc3bfa
703,363
def sample_from_scipy_distribution(dist, size, **kwargs): """ use a given distribution to and extract size samples from it.""" if kwargs: return dist.rvs(**kwargs, size=size) return dist.rvs(size=size)
deefc047d0d8c44d38b055a86fabe7c8fdc73064
703,364
def get_atoms_list(mmtf_dict): """Creates a list of atom dictionaries from a .mmtf dictionary by zipping together some of its fields. :param dict mmtf_dict: the .mmtf dictionary to read. :rtype: ``list``""" return [{ "x": x, "y": y, "z": z, "alt_loc": a or None, "bvalue": b, "occupancy": o, "id": i } for x, y, z, a, b, i, o in zip( mmtf_dict["xCoordList"], mmtf_dict["yCoordList"], mmtf_dict["zCoordList"], mmtf_dict["altLocList"], mmtf_dict["bFactorList"], mmtf_dict["atomIdList"], mmtf_dict["occupancyList"] )]
3b5f29362c077585ebc659b8d8a9ff5d60908ead
703,366
def trimesh_swap_edge(mesh, u, v, allow_boundary=True): """Replace an edge of the mesh by an edge connecting the opposite vertices of the adjacent faces. Parameters ---------- mesh : :class:`compas.datastructures.Mesh` Instance of mesh. u : int The key of one of the vertices of the edge. v : int The key of the other vertex of the edge. Returns ------- None """ # check legality of the swap # swapping on the boundary is not allowed fkey_uv = mesh.halfedge[u][v] fkey_vu = mesh.halfedge[v][u] u_on = mesh.is_vertex_on_boundary(u) v_on = mesh.is_vertex_on_boundary(v) if u_on and v_on: return False if not allow_boundary: if mesh.is_vertex_on_boundary(u) or mesh.is_vertex_on_boundary(v): return False # swapping to a half-edge that already exists is not allowed uv = mesh.face[fkey_uv] vu = mesh.face[fkey_vu] o_uv = uv[uv.index(u) - 1] o_vu = vu[vu.index(v) - 1] if o_uv in mesh.halfedge[o_vu] and o_vu in mesh.halfedge[o_uv]: return False # swap # delete the current half-edge del mesh.halfedge[u][v] del mesh.halfedge[v][u] # delete the adjacent faces del mesh.face[fkey_uv] del mesh.face[fkey_vu] # add the faces created by the swap a = mesh.add_face([o_uv, o_vu, v]) b = mesh.add_face([o_vu, o_uv, u]) return a, b
43f150bb52b1f4a9180f6c40a9d420fbe5e67e30
703,367
import os import contextlib import shutil def set_up_directory(day): """Make a new directory for working on an advent of code problem Args: day: int day of the month to work on Returns: new_dir: str path to the directory for that day """ this_dir = os.path.dirname(__file__) new_dir = os.path.join(this_dir, 'day' + str(day)) with contextlib.suppress(FileExistsError): os.mkdir(new_dir) new_file_name = os.path.join(new_dir, 'day' + str(day) + '.py') template_file_name = os.path.join(this_dir, 'template.py') if not(os.path.exists(new_file_name)): shutil.copy(template_file_name, new_file_name) return new_dir
ba76d4abe7ab40517adcfd54af66b8946df31886
703,368
def prompt_vial_range(): """Prompt user for first and last vial of run""" first_ic_vial = int(input('First IC vial? ')) last_ic_vial = int(input('Last IC vial? ')) return first_ic_vial, last_ic_vial
bf5cf536c0aae3808bf16dd6d87a9d83bfbdab04
703,369
import argparse import sys def argparse_setup(): """Initialize the argument parser. Parameters: None Return: None """ parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() # Subparser for implementation listing info_parser = subparsers.add_parser("info", help="List implementation details") info_parser.add_argument("--problems", "-p", type=str, help="Problems to list") info_parser.add_argument("--verbose", "-v", action="store_true", help="Show details for all implementations") info_parser.set_defaults(which="info") # Subparser for problem execution run_parser = subparsers.add_parser("run", help="Execute given implementations") run_parser.add_argument("--problem", "-p", type=int, required=True, help="Which Euler problem to run") run_parser.add_argument("--versions", "-v", type=str, help="Which implementation(s) to run " + "(e.g. '1', '1..3', '1..3,5..7'") run_parser.set_defaults(which="run") # Subparser for testing test_parser = subparsers.add_parser("perf", help="Check problem implementation " + "performance") test_parser.add_argument("--count", "-c", type=int, default=3, help="Number of times to run the solution") test_parser.add_argument("--problem", "-p", type=int, required=True, help="Which Euler problem to run") test_parser.add_argument("--versions", "-v", type=str, help="Which impementation(s) to run " + "(e.g. '1', '1..3', '1..3,5..7'") test_parser.set_defaults(which="perf") args, unknown = parser.parse_known_args() if (len(vars(args))) == 0: # No arguments passed. Print help string and exit. parser.print_help() sys.exit(1) return (args, unknown)
10dbea13f782e9fd19371537984001d300a289ee
703,370
import numpy as np import io import torch def read_txt_embeddings(logger, path): """ Reload pretrained embeddings from a text file. """ word2id = {} vectors = [] # load pretrained embeddings # _emb_dim_file = params.emb_dim _emb_dim_file = 0 with io.open(path, 'r', encoding='utf-8', newline='\n', errors='ignore') as f: for i, line in enumerate(f): if i == 0: split = line.split() assert len(split) == 2 _emb_dim_file = int(split[1]) continue word, vect = line.rstrip().split(' ', 1) vect = np.fromstring(vect, sep=' ') if word in word2id: logger.warning("Word \"%s\" found twice!" % word) continue if not vect.shape == (_emb_dim_file,): logger.warning("Invalid dimension (%i) for word \"%s\" in line %i." % (vect.shape[0], word, i)) continue assert vect.shape == (_emb_dim_file,) word2id[word] = len(word2id) vectors.append(vect[None]) assert len(word2id) == len(vectors) logger.info("Loaded %i pretrained word embeddings from %s" % (len(vectors), path)) # compute new vocabulary / embeddings embeddings = np.concatenate(vectors, 0) embeddings = torch.from_numpy(embeddings).float() # assert embeddings.size() == (len(word2id), params.emb_dim) return word2id, embeddings
1d7cc0845488bb3bd37eef4ca0cdeff67787f6e8
703,371
from typing import Any from typing import List def ensure_list(data: Any) -> List: """Ensure input is a list. Parameters ---------- data: object Returns ------- list """ if data is None: return [] if not isinstance(data, (list, tuple)): return [data] return list(data)
58016feaf49d63255944fa615e7e2e1dac6dcc76
703,372
def var_recode(TEDS_A_Imputed, user_target, TEDS_Af): """Recodes categorical variables present in the data. Returns A recoded data frame.""" # Identify the variable types col_list = list(TEDS_A_Imputed.columns) col_list_cat = [s for s in col_list if s != 'CASEID' and s != 'ADMYR' and s != 'AGE' and s != 'EDUC' and s != 'ARRESTS'] col_list_ord = [s for s in col_list if s == 'ADMYR' or s == 'AGE' or s == 'EDUC' or s == 'ARRESTS' and s != user_target] col_list_cat_f = list(TEDS_Af.columns) TEDS_A_Imputed[col_list_cat] = TEDS_A_Imputed[col_list_cat].astype('category') TEDS_A_Imputed[col_list_ord] = TEDS_A_Imputed[col_list_ord].astype('category') TEDS_A_Imputed['CASEID'] = TEDS_A_Imputed['CASEID'].astype('int64') TEDS_A_Imputed[user_target] = TEDS_A_Imputed[user_target].astype('category') TEDS_Af[col_list_cat_f] = TEDS_Af[col_list_cat_f].astype('category') if 'STFIPS' in col_list: # Recoding the States variable TEDS_A_Imputed['STFIPS'] = TEDS_A_Imputed['STFIPS'].replace([1,2,4,5,6,7,8,10,11,12, 13,15,16,17,18,19,20,21, 22,23,24,25,26,27,28,29, 30,31,32,33,34,35,36,37, 38,39,40,42,44,45,46,47, 48,49,50,51,53,54,55,56, 72], ['Alabama','Alaska','Arizona', 'Arkansas','California','Colorado', 'Connecticut','Delaware','District of Columbia', 'Florida','Georgia','Hawaii','Idaho', 'Illinois','Indiana','Iowa','Kansas', 'Kentucky','Louisiana','Maine','Maryland', 'Massachusetts','Michigan','Minnesota', 'Mississippi','Missouri','Montana','Nebraska', 'Nevada','New Hampshire','New Jersey', 'New Mexico','New York','North Carolina', 'North Dakota','Ohio','Oklahoma','Pennsylvania', 'Rhode Island','South Carolina','South Dakota', 'Tennessee','Texas','Utah','Vermont','Virginia', 'Washington','West Virginia','Wisconsin', 'Wyoming','Puerto Rico']) #Recast as a categorical variable TEDS_A_Imputed["STFIPS"] = TEDS_A_Imputed["STFIPS"].astype("category") if 'EDUC' in col_list: # Recoding the Education variable TEDS_A_Imputed['EDUC'] = TEDS_A_Imputed['EDUC'].replace([1,2,3,4,5], ['8 years or less', '9–11 years','12 years (or GED)', '13–15 years','16 years or more']) #Recast as a categorical variable TEDS_A_Imputed['EDUC'] = TEDS_A_Imputed['EDUC'].astype("category") if 'AGE' in col_list: # Recoding the Age variable TEDS_A_Imputed['AGE'] = TEDS_A_Imputed['AGE'].replace([1,2,3,4,5,6,7,8,9,10,11,12], ['12–14 years','15–17 years','18–20 years', '21–24 years','25–29 years','30–34 years', '35–39 years','40–44 years','45–49 years', '50–54 years','55–64 years','65 years and older']) #Recast as a categorical variable TEDS_A_Imputed['AGE'] = TEDS_A_Imputed['AGE'].astype("category") if 'SERVICES' in col_list: # Recoding the Services variable TEDS_A_Imputed['SERVICES'] = TEDS_A_Imputed['SERVICES'].replace([1,2,3,4,5,6,7,8], ['Detox, 24-hour, hospital inpatient', 'Detox, 24-hour, free-standing residential', 'Rehab/residential, hospital (non-detox)', 'Rehab/residential, (30 days or fewer)', 'Rehab/residential, (more than 30 days)', 'Ambulatory, intensive outpatient', 'Ambulatory, non-intensive outpatient', 'Ambulatory, detoxification']) #Recast as a categorical variable TEDS_A_Imputed['SERVICES'] = TEDS_A_Imputed['SERVICES'].astype("category") if 'RACE' in col_list: # Recoding the Race variable TEDS_A_Imputed['RACE'] = TEDS_A_Imputed['RACE'].replace([1,2,3,4,5,6,7,8,9], ['Alaska Native (Aleut, Eskimo, Indian)', 'American Indian (other than Alaska Native)', 'Asian or Pacific Islander', 'Black or African American', 'White','Asian','Other single race','Two or more races', 'Native Hawaiian or Other Pacific Islander']) #Recast as a categorical variable TEDS_A_Imputed['RACE'] = TEDS_A_Imputed['RACE'].astype("category") if 'ETHNIC' in col_list: # Recoding the Ethnicity variable TEDS_A_Imputed['ETHNIC'] = TEDS_A_Imputed['ETHNIC'].replace([1,2,3,4,5], ['Puerto Rican','Mexican', 'Cuban or other specific Hispanic', 'Not of Hispanic or Latino origin', 'Hispanic or Latino, specific origin not specified']) #Recast as a categorical variable TEDS_A_Imputed['ETHNIC'] = TEDS_A_Imputed['ETHNIC'].astype("category") if 'HLTHINS' in col_list: # Recoding the Health Insurance variable TEDS_A_Imputed['HLTHINS'] = TEDS_A_Imputed['HLTHINS'].replace([1,2,3,4], ['Private insurance', 'Medicaid','Medicare', 'None']) #Recast as a categorical variable TEDS_A_Imputed['HLTHINS'] = TEDS_A_Imputed['HLTHINS'].astype("category") if 'REGION' in col_list: # Recoding the Region variable TEDS_A_Imputed['REGION'] = TEDS_A_Imputed['REGION'].replace([1,2,3,4], ['U.S. territories', 'Northeast','Midwest', 'South','West']) #Recast as a categorical variable TEDS_A_Imputed['REGION'] = TEDS_A_Imputed['REGION'].astype("category") if 'DSMCRIT' in col_list: # Recoding the DSM Diagnosis variable TEDS_A_Imputed['DSMCRIT'] = TEDS_A_Imputed['DSMCRIT'].replace([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19], ['Alcohol-induced disorder','Substance-induced disorder', 'Alcohol intoxication','Alcohol dependence', 'Opioid dependence','Cocaine dependence', 'Cannabis dependence','Other substance dependence', 'Alcohol abuse','Cannabis abuse', 'Other substance abuse','Opioid abuse','Cocaine abuse', 'Anxiety disorders','Depressive disorders', 'Schizophrenia/other psychotic disorders', 'Bipolar disorders', 'Attention deficit/disruptive behavior disorders', 'Other mental health condition']) #Recast as a categorical variable TEDS_A_Imputed['DSMCRIT'] = TEDS_A_Imputed['DSMCRIT'].astype("category") return(TEDS_A_Imputed, col_list_cat, col_list_ord, col_list_cat_f)
cc656e9a6840af4d95a6a114073459d815c533dc
703,373
def init_split(df, featname, init_bins=100): """ 对df下的featname特征进行分割, 最后返回中间的分割点刻度 为了保证所有值都有对应的区间, 取两个值之间的中值作为分割刻度 注意这里的分割方式不是等频等常用的方法, 仅仅是简单地找出分割点再进行融合最终进行分割 注意, 分出的箱刻度与是否闭区间无关, 这个点取决于用户,这个函数仅考虑分箱的个数 同时, 分箱多余的部分会进入最后一个箱, 如101个分100箱, 则最后一个箱有两个样本 Parameters: ---------- df: dataframe,输入的df, featname:str, 特征名称 init_bins:int, 需要分的箱个数 Returns: ------- 返回分割的刻度列表(升序),如[1,5,9,18] """ # 初始化取值个数列表, 同时排序 list_unique_vals_order = sorted(list(set(df[featname]))) # 取得中间的刻度值, 注意是遍历到len - 1 list_median_vals = [] for i in range(len(list_unique_vals_order) - 1): list_median_vals.append((list_unique_vals_order[i] + list_unique_vals_order[i + 1]) / 2) # 初始化初始分箱的个数, cnt_unique_vals = len(list_median_vals) # 如果初始分箱个数小于init_bins了, 则直接返回 # 如果初始分箱个数大于init_bins, 则从头开始抓取init_bins个值,所以剩余值会留在最后一组 if cnt_unique_vals <= init_bins: return list_median_vals else: # 计算每个箱的个数, 注意这里要用求商 cnt_perbin = cnt_unique_vals // init_bins # 取得中间的init_bins个值 list_median_vals = [list_median_vals[i * cnt_perbin] for i in range(init_bins - 1)] return list_median_vals
172f48bb019fd4cf4941b2ff0fefcb24709fe7a4
703,375
import numpy def gower_distance_numpy(point1, point2, max_range): """! @brief Calculate Gower distance between two vectors using numpy. @param[in] point1 (array_like): The first vector. @param[in] point2 (array_like): The second vector. @param[in] max_range (array_like): Max range in each data dimension. @return (float) Gower distance between two objects. """ with numpy.errstate(divide='ignore', invalid='ignore'): result = numpy.divide(numpy.abs(point1 - point2), max_range) if len(result.shape) > 1: return numpy.sum(numpy.nan_to_num(result), axis=1).T / len(result[0]) else: return numpy.sum(numpy.nan_to_num(result)) / len(point1)
b97c40cd62654172b0d502e35bbef8ce9e9175c5
703,376
def can_register(extra, password_meta): """ :param extra: {str: any?}, additional fields that the user will fill out when registering such as gender and birth. :param password_meta: {'count': int, 'count_number': int, 'count_uppercase': int, 'count_lowercase': int, 'count_special': int}, Meta information for passwords :return: bool, Determines if user can register. """ if password_meta['count'] <= 6: return False return True
a6ba8cc6fc4a4180bbbceb9a8ae14b07d7dad809
703,377
from typing import OrderedDict def filter_excluded_fields(fields, Meta, exclude_dump_only): """Filter fields that should be ignored in the OpenAPI spec :param dict fields: A dictionary of of fields name field object pairs :param Meta: the schema's Meta class :param bool exclude_dump_only: whether to filter fields in Meta.dump_only """ exclude = getattr(Meta, "exclude", []) if exclude_dump_only: exclude += getattr(Meta, "dump_only", []) filtered_fields = OrderedDict( (key, value) for key, value in fields.items() if key not in exclude ) return filtered_fields
a39a7665052cde0ba9f694696a57f2b1f6ca0603
703,378
def findNearestNeighbourPixel(img, seg, i, j, segSize, fourConnected): """ For the (i, j) pixel, choose which of the neighbouring pixels is the most similar, spectrally. Returns tuple (ii, jj) of the row and column of the most spectrally similar neighbour, which is also in a clump of size > 1. If none is found, return (-1, -1) """ (nBands, nRows, nCols) = img.shape minDsqr = -1 ii = jj = -1 # Cope with image edges (iiiStrt, iiiEnd) = (max(i-1, 0), min(i+1, nRows-1)) (jjjStrt, jjjEnd) = (max(j-1, 0), min(j+1, nCols-1)) for iii in range(iiiStrt, iiiEnd+1): for jjj in range(jjjStrt, jjjEnd+1): connected = ((not fourConnected) or ((iii == i) or (jjj == j))) if connected: segNbr = seg[iii, jjj] if segSize[segNbr] > 1: # Euclidean distance in spectral space. Note that because # we are only interested in the order, we don't actually # need to do the sqrt (which is expensive) dSqr = ((img[:, i, j] - img[:, iii, jjj]) ** 2).sum() if minDsqr < 0 or dSqr < minDsqr: minDsqr = dSqr ii = iii jj = jjj return (ii, jj)
e205652d7b39c922a203162f3cdc58672347b938
703,379
import json def _load_json_as_list(path: str) -> list: """Load json file and convert it into list. :param path: path to json file :type path: str :return: dict :rtype: dict """ with open(path, "r") as json_data: data = json.load(json_data) if isinstance(data, list): return data return []
d7aa301a830a826937522dc9c737bf84bf6f4f9a
703,380
from typing import List def readFile(filename: str) -> List[str]: """ Reads a file and returns a list of the data """ try: with open(filename, "r") as fp: return fp.readlines() except: raise Exception(f"Failed to open {filename}")
a5817ab563b83d5cf4999290d10a14a0b68954b6
703,381
def calculate_residuals(df, fit, names): """Calculates residuals values by comparing values in df and in fit. Arguments: df (pandas.DataFrame): Holds the data to be fitted. Either concentrations vs time or charge passed vs time depending on the situation. fit (numpy.ndarray): Holds the fit evaluation. names: Names of the columns in df that hold the data to be compared to the fit values. Necessary because in some cases not all of the data stored in df is fitted. """ res = list() # if the fit data only has one dimension convert it to a two dimensional # array, this is necessary for parsing this array along its second # dimension in the following for loop if len(fit.shape) == 1: fit = fit.reshape(-1,1) for i, name in enumerate(names): # in order to obtain a similar quality of fit, independently from the # amplitude of the fitted data a normalization is required; here we # normalize the residuals by the sum of the fitted data and of the fit # result norm = df[name] + fit[:,i] partial_res = (df[name] - fit[:,i])/norm # to remove nan values due to division by zero (induces ValueError in # the lmfit.minimize function) the residuals are set to 0 when both the # fitted data and the fit result are 0 idx_div0 = norm == 0 partial_res[idx_div0] = 0 res.extend(partial_res) return res
5b597dd8ff83241993bee8cae27e6f85c43b1959
703,382
def get_trunc_hour_time(obstime): """Truncate obstime to nearest hour""" return((int(obstime)/3600) * 3600)
8379aafc76c1987f537ea294c7def73ad32f1b23
703,384
import re def normalize_package_name(python_package_name): """ Normalize Python package name to be used as Debian package name. :param python_package_name: The name of a Python package as found on PyPI (a string). :returns: The normalized name (a string). >>> from py2deb import normalize_package_name >>> normalize_package_name('MySQL-python') 'mysql-python' >>> normalize_package_name('simple_json') 'simple-json' """ return re.sub('[^a-z0-9]+', '-', python_package_name.lower()).strip('-')
47a850c601e64b570470a339769742d8f3732e28
703,385
def delete_prtg_device(prtg_single_device_obj): """ deletes the host at PRTG WITHOUT confirmation """ result = prtg_single_device_obj.delete(confirm=False) return result
4cb9b2a35294f0c56a62a7dbd220ab2ada81a15d
703,386
def fuse_bg_features(feats): """ :param feats: :return: """ fused_feats = [] for feat in feats: feat = feat.flatten(1) feat = feat.mean(dim=0) fused_feats.append(feat) return fused_feats
e268f925b0b54c12c0270a1df6345950f5b21f5f
703,387
def url_join(*args): """Join combine URL parts to get the full endpoint address.""" return '/'.join(arg.strip('/') for arg in args)
2b8409910186d3058c2e49bbf6f974a7cfceeffb
703,389
def filter_word_ids_with_non_zero_probability(word_ids, probas, pad_id=None): """ Filter out entries of word_ids that have exactly zero probability or are mapped to pad positions (if pad_id is not None). Args: word_ids (torch.LongTensor): tensor with word ids. Shape of (batch_size, seq_len) probas (torch.Tensor): tensor with probabilities for each word id. It can be also any other kind of measure of importance that could express 0 magnitude (e.g. norms). Shape of (batch_size, seq_len) pad_id (int): padding id in your vocabulary. If not None, word ids that are pad will be filtered out. Otherwise, all words are considered. Default is None Returns: a new list: valid word ids """ valid_top_word_ids = [] for seq_probas, seq_word_ids in zip(probas.tolist(), word_ids.tolist()): ids = [] for prob, word_id in zip(seq_probas, seq_word_ids): if word_id != pad_id and prob > 0: ids.append(word_id) valid_top_word_ids.append(ids) return valid_top_word_ids
4e3e79c087dc0a3396dc23468091e8cf58f474a3
703,390
def is_pairwise_disjoint(sets): """ This function will determine if a collection of sets is pairwise disjoint. Args: sets (List[set]): A collection of sets """ all_objects = set() for collection in sets: for x in collection: if x in all_objects: return False all_objects.add(x) return True
4d5c434b2db2cb167f51aa4c04534a9b12239547
703,391
def project_has_hook_attr_value(project, hook, attr, value): """Finds out if project's hook has attribute of given value. :arg project: The project to inspect :type project: pagure.lib.model.Project :arg hook: Name of the hook to inspect :type hook: str :arg attr: Name of hook attribute to inspect :type attr: str :arg value: Value to compare project's hook attribute value with :type value: object :return: True if project's hook attribute value is equal with given value, False otherwise """ retval = False hook_obj = getattr(project, hook, None) if hook_obj is not None: attr_obj = getattr(hook_obj, attr, None) if attr_obj == value: retval = True return retval
f275c8877d9df2a58a0812d657879b370220532a
703,392
def type_to_python(typename, size=None): """type_to_python(typename: str, size: str) -> str Transforms a Declarations.yaml type name into a Python type specification as used for type hints. """ typename = typename.replace(' ', '') # normalize spaces, e.g., 'Generator *' # Disambiguate explicitly sized int/tensor lists from implicitly # sized ones. These permit non-list inputs too. (IntArrayRef[] and # TensorList[] are not real types; this is just for convenience.) if typename in {'IntArrayRef', 'TensorList'} and size is not None: typename += '[]' typename = { 'Device': 'Device', 'Generator': 'Generator', 'IntegerTensor': 'Tensor', 'Scalar': 'Number', 'ScalarType': '_dtype', 'Storage': 'Storage', 'BoolTensor': 'Tensor', 'IndexTensor': 'Tensor', 'Tensor': 'Tensor', 'MemoryFormat': 'memory_format', 'IntArrayRef': '_size', 'IntArrayRef[]': 'Union[_int, _size]', 'TensorList': 'Union[Tuple[Tensor, ...], List[Tensor]]', 'TensorList[]': 'Union[Tensor, Tuple[Tensor, ...], List[Tensor]]', 'bool': '_bool', 'double': '_float', 'int64_t': '_int', 'accreal': 'Number', 'real': 'Number', 'void*': '_int', # data_ptr 'void': 'None', 'std::string': 'str', 'Dimname': 'Union[str, ellipsis, None]', 'DimnameList': 'Sequence[Union[str, ellipsis, None]]', 'QScheme': '_qscheme', }[typename] return typename
ae7131d40e9e8da9d5543d57577661cce5c68d28
703,394
import os def GetExecutable(executable, location=None): """Returns the path to the given executable or None if it is not found. This algorithm provides a reasonably accurate determination of whether or not the given executable is on the machine and can be used without knowing its full path. An optional path can be added to the search. The algorithm returns the path or None. Args: executable: The name of the executable (no path info). (string) location: Optional location to search for the tool. (string) Returns: The path to the executable or None. (string or None) """ extensions = os.environ.get('PATHEXT', '').split(os.pathsep) paths = os.environ.get('PATH', '').split(os.pathsep) if location: paths.append(location) # Loop over every combination of path and file extension. for extension in extensions: for path in paths: full_path = os.path.join(path, '%s%s' % (executable, extension)) if os.path.isfile(full_path) and os.access(full_path, os.X_OK): return full_path return None
1b4e695e683a81400798cc11c82e0821358a7ccc
703,395
from typing import Tuple from typing import List from typing import Dict def _tasks_scheduler_config(venv_path, project_config) -> Tuple[List[Dict], List[str]]: """ tasks_scheduler supervisor config """ async_tasks_config = { 'name': 'async_tasks', 'command': f"{venv_path}/bin/python run.py async-tasks", 'directory': project_config['PROJECT_PATH'], 'log': f"{project_config['LOG_PATH']}/actorcloud.log", 'user': project_config['USERNAME'] } timer_tasks_config = { 'name': 'timer_tasks', 'command': f"{venv_path}/bin/python run.py timer-tasks", 'directory': project_config['PROJECT_PATH'], 'log': f"{project_config['LOG_PATH']}/actorcloud.log", 'user': project_config['USERNAME'] } services_config = [async_tasks_config, timer_tasks_config] group_names = ['async_tasks', 'timer_tasks'] return services_config, group_names
f308ca9661c3ecdde35b76d48a034102b1e39373
703,399
import functools def caching_module_getattr(cls): """ Helper decorator for implementing module-level ``__getattr__`` as a class. This decorator must be used at the module toplevel as follows:: @caching_module_getattr class __getattr__: # The class *must* be named ``__getattr__``. @property # Only properties are taken into account. def name(self): ... The ``__getattr__`` class will be replaced by a ``__getattr__`` function such that trying to access ``name`` on the module will resolve the corresponding property (which may be decorated e.g. with ``_api.deprecated`` for deprecating module globals). The properties are all implicitly cached. Moreover, a suitable AttributeError is generated and raised if no property with the given name exists. """ assert cls.__name__ == "__getattr__" # Don't accidentally export cls dunders. props = {name: prop for name, prop in vars(cls).items() if isinstance(prop, property)} instance = cls() @functools.lru_cache(None) def __getattr__(name): if name in props: return props[name].__get__(instance) raise AttributeError( f"module {cls.__module__!r} has no attribute {name!r}") return __getattr__
17a05df9556947a5760a2341e35395af5376047e
703,400
def min_query(): """Convert sentence to query """ return {'hello', 'world', 'of', 'geek'}
98ff87ecc8397c38ecd96eda368153905436a9de
703,401
def format_timedelta(days: int = 0, hours: int = 0, minutes: int = 0, seconds: int = 0) -> str: """Returns a simplified string representation of the given timedelta.""" s = '' if days == 0 else f'{days:d}d' if hours > 0: if len(s) > 0: s += ' ' s += f'{hours:d}h' if minutes > 0: if len(s) > 0: s += ' ' s += f'{minutes:d}min' if seconds > 0 or len(s) == 0: if len(s) > 0: s += ' ' s += f'{seconds:d}sec' return s
6414e2be5a01f178d6515ab6f21ea7c5ab4d5004
703,402
def _try_encode(text, charset): """Attempt to encode using the default charset if none is provided. Should we permit encoding errors?""" if charset: return text.encode(charset) else: return text.encode()
ce418ff0b5e2ee938781fe2074bf146b9ba89447
703,403
def accuracy(letters, target_string): """ Comparing accuracy to the correct answer. Args: letters(np.array): (num_chars, ) target_string(str) Return: float: accuracy. """ count = 0 assert len(letters) == len(target_string) for i in range(len(target_string)): if letters[i] == target_string[i]: count += 1 return count / len(target_string)
5898a086997d3b9ff9f9bcf84b747dd553a0e4cb
703,404
from typing import List async def cringo_card(list_of_emojis: List[List[str]]) -> List[List[str]]: """This makes the Cringo! card complete with headers.""" top_row = ['🇦', '🇧', '🇨', '🇩', '🇪', '🇫'] side_column = ['<:lemonface:623315737796149257>', '1️⃣', '2️⃣', '3️⃣', '4️⃣', '5️⃣', '6️⃣'] list_of_emojis.insert(0, top_row[0:len(list_of_emojis)]) emojis_to_send = [] for row in range(0, len(list_of_emojis)): list_of_emojis[row].insert(0, side_column[row]) emoji_string = '\u200A'.join(list_of_emojis[row]) emojis_to_send.append(emoji_string) return list_of_emojis
44421d02c8b9c0b7f873e6d5ad5e8de803ac70d6
703,405
import random def lucky_enough(luck=0): """ Check if you lucky enough. :param luck: should be an int between 0-100 :return: Bool """ return random.randint(0, 99) < luck
158179ab5da330561f6d3b7be06697b0b319b1db
703,406
def relu(attrs, inputs, proto_obj): """Computes rectified linear function.""" return 'relu', attrs, inputs
d45c7f517c2cef57206db56b1ed7402127e72a01
703,407
import importlib import argparse def application_type(value): """ Return aiohttp application defined in the value. """ try: module_name, app_name = value.split(":") except ValueError: module_name, app_name = value, "app" module = importlib.import_module(module_name) try: if app_name.endswith("()"): app_name = app_name.strip("()") factory_app = getattr(module, app_name) return factory_app() return getattr(module, app_name) except AttributeError as error: raise argparse.ArgumentTypeError(error) from error
0de6fb898edc48d319415fb48c9d2f51e210d1db
703,408
import logging def check_multiple_insert_param(columns_to_insert, insert_values_dict_lst): """ Checks if the pararmeter passed are of correct order. :param columns_to_insert: :param insert_values_dict_lst: :return: """ column_len = len(columns_to_insert) for row in insert_values_dict_lst: if column_len != len(row): logging.error("%s doesn't match the dimensions" % (row)) return False for column in columns_to_insert: if column not in row: logging.error("%s column isn't present in dictionary" % (column)) return False return True
ffb5a4eb595b84a439f3a910d81d699289c1d69a
703,410
import random import string def random_string(length=16): """Generate a random string of the given length.""" result = "" while len(result) < length: result += random.choice(string.ascii_letters + string.digits) return result
04bf92658ce8d9535aa91c751c5d9f1746827eaf
703,412
import os def basename(filename): """ Return basename of given filename. Parameters ---------- filename : :class:`str` Name of the file (may contain absolute path and extension) the basename should be returned for. Returns ------- basename : :class:`str` Basename corresponding to the filename provided as input. """ return os.path.splitext(os.path.split(filename)[-1])[0]
d10f50c4f0264e6b1e8ce3bc82fea97230708a3b
703,413
def generate_kml_end_str(): """ Generates the footer for a KML file """ kml_str = '</Document>' kml_str += '</kml>' return kml_str
b67ed779e29f0c358a7e76c7662c36a714e50649
703,414
def relationship(flights, planes): """ Here we are having two columns common between flights and planes tables, which are tailnum and year. from above two common fields we consider tailnum column as keys in both the table. In """ print(planes.shape) # (3322, 9) print(planes.tailnum.nunique()) # 3322 print(flights.shape) # (336776, 16) print(flights.tailnum.nunique()) # 4043 # from the above line we got unique tailnum as 3322 which is actual size of planes table # i.e., for unique tailnums from planes table we are having many connections in table flights. # therefore we can conclude that there exists 1-many relationship between planes and flights table return "one-to-many relationship"
da1f31a4afa1720caeb01fc4bbec445999da4f56
703,415
def numpy_unpad(x, pad_width): """Unpad an array. Args: x (numpy.ndarray): array to unpad pad_width (tuple): padding Returns: numpy.ndarray """ slices = [] for c in pad_width: e = None if c[1] == 0 else -c[1] slices.append(slice(c[0], e)) return x[tuple(slices)]
31f41a7a741d7efa870670c95a8acf8be365975a
703,416
def linear(a, b, c): """exec a * b + c""" print('exec linear') ret = a * b + c print('linear: %s * %s + %s = %s' % (a, b, c, ret)) return ret
fc5c1c99bd03f61e5f8fd87d4d1863002469c57d
703,417
def validate_spec(index, spec): """ Validate the value for a parameter specialization. This validator ensures that the value is hashable and not None. Parameters ---------- index : int The integer index of the parameter being specialized. spec : object The parameter specialization. Returns ------- result : object The validated specialization object. """ if spec is None: msg = "cannot specialize template parameter %d with None" raise TypeError(msg % index) try: hash(spec) except TypeError: msg = "template parameter %d has unhashable type: '%s'" raise TypeError(msg % (index, type(spec).__name__)) return spec
3612d927ef7e61613e0991ffc04ea76555d1b115
703,418
import hashlib def password_hasher(password): """ Just hashes the password :param password: :return: 32 byte hash :rtype: bytes """ return hashlib.pbkdf2_hmac('sha256', password.encode('utf-8'), b'salt', 100000)
44e921903c63f1703bbc79869f1a63a25bfe5916
703,419
import logging def logmethod(func): """ Decorator to add logging information around calls for use with . """ def _wrapper(self, *args, **kwds): logging.info("Start::%s.%s:%s", func.__module__, self.__class__.__name__, func.__name__) returnval = func(self, *args, **kwds) logging.info("Finish::%s.%s:%s", func.__module__, self.__class__.__name__, func.__name__) return returnval return _wrapper
7f8223ab99d6f9101088bff050f79fd0669231d4
703,421
def _replace_suffix(string, old, new): """Returns a string with an old suffix replaced by a new suffix.""" return string.endswith(old) and string[:-len(old)] + new or string
4e487f62126b130d973f249cd34abc5a75df3eaa
703,422
def is_empty_json_response_from_s3(context): """Check if the JSON response from S3 is empty (but not None).""" return context.s3_data == {}
6ec3a41646de74d82f3786e772228da55d91b63a
703,423
def mock_validator_execute_validator(*args, **kwargs): """ Mock method to just return the builded command line without executing it. """ cls = args[0] command = args[1] return command
d03cc70f1f0a5bd59e7a116e3774b99aa8db5a03
703,424
def has_gap(k1, k2, min_gap=0.002): """判断 k1, k2 之间是否有缺口""" assert k2['dt'] > k1['dt'] if k1['high'] < k2['low'] * (1-min_gap) \ or k2['high'] < k1['low'] * (1-min_gap): return True else: return False
642d9793dcc6f85d3bf21e1abf6f481292c053f1
703,425
import logging def _set_logger( logger_file_path: str, logger_name: str = "default_logger", write_to_console: bool = True, ) -> logging.Logger: """Set logger to log to the given path. Modified from https://docs.python.org/3/howto/logging-cookbook.html Args: logger_file_path (str): Filepath to write to logger_name (str, optional): Name of the logger to use. Defaults to "default_logger" write_to_console (bool, optional): Should write the logs to console. Defaults to True Returns: logging.Logger: Logger object """ logger = logging.getLogger(name=logger_name) logger.setLevel(level=logging.INFO) # create file handler which logs all the messages file_handler = logging.FileHandler(filename=logger_file_path) file_handler.setLevel(level=logging.INFO) # create formatter and add it to the handlers formatter = logging.Formatter(fmt="%(message)s") file_handler.setFormatter(fmt=formatter) # add the handlers to the logger logger.addHandler(hdlr=file_handler) if write_to_console: # create console handler with a higher log level stream_handler = logging.StreamHandler() stream_handler.setLevel(level=logging.INFO) # add formatter to the handlers stream_handler.setFormatter(fmt=formatter) # add the handlers to the logger logger.addHandler(hdlr=stream_handler) return logger
338999fbcd34367f1dc4140143688965663bf486
703,426
def sort_and_print(body, num): """ Sorts the values of dictionaries and prints respective top sentences :param body: list of dictionaries of 'sentence': score :param num: no of sentences to be printed :return: prints """ result = [] rank = [] for sentdict in body: for sent in sentdict: rank.append(sentdict[sent]) rank = list(set(rank)) # remove duplicates rank = sorted(rank, reverse=True) count = 0 # print top 'num' sentences in same order as of original document for sentdict in body: for sent in sentdict: if count == num: break for r in rank[:num]: if sentdict[sent] == r: result.append(sent) count += 1 return result
400b11f84d0e68fb393c261736856257b4681c52
703,427
import torch def dagger(x: torch.Tensor) -> torch.Tensor: """Conjugate transpose of a batch of matrices. Matrix dimensions are assumed to be the final two, with all preceding dimensions batched over.""" return x.conj().transpose(-2, -1)
672d26333182803b18f3d1d10e49ef393e216f5f
703,428