content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def enrich(alert, rules): """Determine if an alert meets an enrichment rule :param alert: The alert to test :param rules: An array of enrichment rules to test against :returns: Alert - The enriched Alert object """ for enrichment in rules: updates = enrichment(alert) if not updates: continue for name, value in updates.items(): alert[name] = value return alert
97bf2d387e4c6e1ab38628860415bdf83c4634b9
704,175
def re(rm,rf,beta): """Returns cost of equity using CAPM formula.""" return rf + beta*(rm-rf)
5f91fd21ba1833dcb816ac767c8e1a15e2a30a5a
704,176
import socket def check_tcp_port(host, port, timeout=3): """ Try connecting to a given TCP port. :param host: Host to connect to :param port: TCP port to connect to :param timeout: Connection timeout, in seconds :return: True if the port is open, False otherwise. """ s = socket.socket() try: s.settimeout(timeout) s.connect((host, port)) except socket.error: return False else: s.close() return True
5e49ebab2c219e9772174d830dffcb958033befd
704,177
def get_sid_trid_combination_score(site_id, tr_ids_list, idfilt2best_trids_dic): """ Get site ID - transcript ID combination score, based on selected transcripts for each of the 10 different filter settings. 10 transcript quality filter settings: EIR EXB TSC ISRN ISR ISRFC SEO FUCO TCOV TSL idfilt2best_trids_dic: "site_id,filter_id" -> top transcript ID(s) after applying filter on exon IDs > min_eir >>> site_id = "s1" >>> idfilt2best_trids_dic = {"s1,EIR" : ["t1"], "s1,EXB" : ["t1"], "s1,TSC" : ["t1"], "s1,ISRN" : ["t1"], "s1,ISR" : ["t1"], "s1,ISRFC" : ["t1"], "s1,SEO" : ["t1"], "s1,FUCO" : ["t1"], "s1,TCOV" : ["t1"], "s1,TSL" : ["t1"]} >>> tr_ids_list = ["t1"] >>> get_sid_trid_combination_score(site_id, tr_ids_list, idfilt2best_trids_dic) {'t1': 10} >>> idfilt2best_trids_dic = {"s1,EIR" : ["t1", "t2"], "s1,EXB" : ["t1", "t2"], "s1,TSC" : ["t1"], "s1,ISRN" : ["t2"], "s1,ISR" : ["t1"], "s1,ISRFC" : ["t1"], "s1,SEO" : ["t1"], "s1,FUCO" : ["t1", "t2"], "s1,TCOV" : ["t1"], "s1,TSL" : ["t2"]} >>> tr_ids_list = ["t1", "t2", "t3"] >>> get_sid_trid_combination_score(site_id, tr_ids_list, idfilt2best_trids_dic) {'t1': 8, 't2': 5, 't3': 0} """ assert tr_ids_list, "tr_ids_list empty" filter_ids = ["EIR", "EXB", "TSC", "ISRN", "ISR", "ISRFC", "SEO", "FUCO", "TCOV", "TSL"] trid2comb_sc_dic = {} for tr_id in tr_ids_list: trid2comb_sc_dic[tr_id] = 0 for tr_id in tr_ids_list: for fid in filter_ids: sitefiltid = "%s,%s" %(site_id, fid) if tr_id in idfilt2best_trids_dic[sitefiltid]: trid2comb_sc_dic[tr_id] += 1 return trid2comb_sc_dic
9cc2d9a0f2fab4e4bf3030ef360b582caeaab45f
704,179
def ergsperSecondtoLsun(ergss): """ Converts ergs per second to solar luminosity in L_sun. :param ergss: ergs per second :type ergss: float or ndarray :return: luminosity in L_sun :rtype: float or ndarray """ return ergss / 3.839e33
806b590c713bc9177db66993aff2f6feaa32d736
704,181
from typing import Any def produces_record(obj: Any) -> bool: """Check if `obj` is annotated to generate records.""" if hasattr(obj, 'get_data_specs'): return True else: return False
b65ffe3d599963f8f5ee4d1581179ab7567aa074
704,182
import json def readJson(fname): """ Read json file and load it line-by-line into data """ data = [] line_num = 0 with open(fname, encoding="utf-8") as f: for line in f: line_num += 1 try: data.append(json.loads(line)) except: print("error", line_num) return data
0a4a78ce7e36fbc444b27ca6eec3ad5ba582b7cd
704,183
def binary_search(arr, val): """ Summary of binary_search function: searches an input array for a value and returns the index to matching element in array or -1 if not found. Parameters: array (array): An array of values val (integer): An integer value Returns: index (integer): Returns index of array element """ index = -1 start = 0 end = len(arr) - 1 found = False while (found == False) and (start <= end): # import pdb; pdb.set_trace() middle_index = (start + end) // 2 if (val == arr[middle_index]): index = middle_index found = True else: # reassign the end and start value excluding the middle index. if (val < arr[middle_index]): end = middle_index - 1 else: start = middle_index + 1 return index
3d5a44b5edce3820d1e669e549f9395d9052d433
704,184
import torch def compute_jacobian(x, y, structured_tensor=False, retain_graph=False): """Compute the Jacobian matrix of output with respect to input. If input and/or output have more than one dimension, the Jacobian of the flattened output with respect to the flattened input is returned if `structured_tensor` is `False`. If `structured_tensor` is `True`, the Jacobian is structured in dimensions `[y_shape, flattened_x_shape]`. Note that `y_shape` can contain multiple dimensions. Args: x (list or torch.Tensor): Input tensor or sequence of tensors with the parameters to which the Jacobian should be computed. Important: the `requires_grad` attribute of input needs to be `True` while computing output in the forward pass. y (torch.Tensor): Output tensor with the values of which the Jacobian is computed. structured_tensor (bool): A flag indicating if the Jacobian should be structured in a tensor of shape `[y_shape, flattened_x_shape]` instead of `[flattened_y_shape, flattened_x_shape]`. Returns: (torch.Tensor): 2D tensor containing the Jacobian of output with respect to input if `structured_tensor` is `False`. If `structured_tensor` is `True`, the Jacobian is structured in a tensor of shape `[y_shape, flattened_x_shape]`. """ if isinstance(x, torch.Tensor): x = [x] # Create the empty Jacobian. output_flat = y.view(-1) numel_input = 0 for input_tensor in x: numel_input += input_tensor.numel() jacobian = torch.Tensor(y.numel(), numel_input) # Compute the Jacobian. for i, output_elem in enumerate(output_flat): if i == output_flat.numel() - 1: gradients = torch.autograd.grad(output_elem, x, retain_graph=retain_graph, create_graph=False, only_inputs=True) else: gradients = torch.autograd.grad(output_elem, x, retain_graph=True, create_graph=False, only_inputs=True) jacobian_row = torch.cat([g.view(-1).detach() for g in gradients]) jacobian[i, :] = jacobian_row if structured_tensor: shape = list(y.shape) shape.append(-1) jacobian = jacobian.view(shape) return jacobian
bd5fd8e3e2b8171680bf059d10fadfe1c39d8899
704,186
def make_rows(cngrs_prsn): """Output a list of dicitonaries for each JSON object representing a congressperson. Each individaul dictionary will contain information about the congressperson as well as info about their term. """ name = cngrs_prsn["name"]["first"] + " " + cngrs_prsn["name"]["last"] birthday = cngrs_prsn["bio"].get("birthday", None) gender = cngrs_prsn["bio"]["gender"] terms = cngrs_prsn["terms"] rows = [] for t in terms: row = {} row["name"] = name row["birthday"] = birthday row["gender"] = gender row["term_start"] = t["start"] row["term_end"] = t["end"] row["term_type"] = t["type"] row["party"] = t.get("party") # Defaults to None rows.append(row) return rows
a80c55c3db1261a339ec08814c0f532efd35e45a
704,191
def get_list_of_results(results): """Modify the outputs so that they are returned in a list format where it is sometimes easier to be used by other functions. Parameters ---------- results : list A list of named tuples for each iteration Returns ------- list, list, list Three lists that include all waits, services and blocks of all runs of all individuals """ all_waits = [w.waiting_times for w in results] all_services = [s.service_times for s in results] all_blocks = [b.blocking_times for b in results] all_props = [p.proportion_within_target for p in results] return all_waits, all_services, all_blocks, all_props
b5903e3b99aeb37ce90190e86a7cd6e2408ad35b
704,193
def return_last(responses): """Return last item of a list.""" return responses[-1]
f4aedfe0b10adcdb859ac1d0f5809ca666abac80
704,199
def get_boundary_cell_count(plate_dims, exclude_outer=1): """Get number of wells in outer or inner edges Parameters ---------- plate_dims : array dimensions of plate Returns ------- boundary_cell_count : int number of wells in the edges """ boundary_cell_count = 2 * (plate_dims[0] + plate_dims[1] - 2) if exclude_outer == 2: boundary_cell_count += 2 * (plate_dims[0]-2 + plate_dims[1]-2 - 2) return boundary_cell_count
8e5056af647f893854bab3de3e6e5038c0d703e1
704,201
def compute_border_indices(log2_T, J, i0, i1): """ Computes border indices at all scales which correspond to the original signal boundaries after padding. At the finest resolution, original_signal = padded_signal[..., i0:i1]. This function finds the integers i0, i1 for all temporal subsamplings by 2**J, being conservative on the indices. Maximal subsampling is by `2**log2_T` if `average=True`, else by `2**max(log2_T, J)`. We compute indices up to latter to be sure. Parameters ---------- log2_T : int Maximal subsampling by low-pass filtering is `2**log2_T`. J : int / tuple[int] Maximal subsampling by band-pass filtering is `2**J`. i0 : int start index of the original signal at the finest resolution i1 : int end index (excluded) of the original signal at the finest resolution Returns ------- ind_start, ind_end: dictionaries with keys in [0, ..., log2_T] such that the original signal is in padded_signal[ind_start[j]:ind_end[j]] after subsampling by 2**j References ---------- This is a modification of https://github.com/kymatio/kymatio/blob/master/kymatio/scattering1d/utils.py Kymatio, (C) 2018-present. The Kymatio developers. """ if isinstance(J, tuple): J = max(J) ind_start = {0: i0} ind_end = {0: i1} for j in range(1, max(log2_T, J) + 1): ind_start[j] = (ind_start[j - 1] // 2) + (ind_start[j - 1] % 2) ind_end[j] = (ind_end[j - 1] // 2) + (ind_end[j - 1] % 2) return ind_start, ind_end
09d29c4de2c808a1947d513580817bda16a6bfe7
704,202
def isbuffer(obj) -> bool: """ Test whether `obj` is an object that supports the buffer API, like a bytes or bytearray object. """ try: with memoryview(obj): return True except TypeError: return False
bede4ffeb154e765c7c2f4dea3bfa77281b313f2
704,203
def aggregate(loss, weights=None, mode='mean'): """Aggregates an element- or item-wise loss to a scalar loss. Parameters ---------- loss : Theano tensor The loss expression to aggregate. weights : Theano tensor, optional The weights for each element or item, must be broadcastable to the same shape as `loss` if given. If omitted, all elements will be weighted the same. mode : {'mean', 'sum', 'normalized_sum'} Whether to aggregate by averaging, by summing or by summing and dividing by the total weights (which requires `weights` to be given). Returns ------- Theano scalar A scalar loss expression suitable for differentiation. Notes ----- By supplying binary weights (i.e., only using values 0 and 1), this function can also be used for masking out particular entries in the loss expression. Note that masked entries still need to be valid values, not-a-numbers (NaNs) will propagate through. When applied to batch-wise loss expressions, setting `mode` to ``'normalized_sum'`` ensures that the loss per batch is of a similar magnitude, independent of associated weights. However, it means that a given datapoint contributes more to the loss when it shares a batch with low-weighted or masked datapoints than with high-weighted ones. """ if weights is not None: loss = loss * weights if mode == 'mean': return loss.mean() elif mode == 'sum': return loss.sum() elif mode == 'normalized_sum': if weights is None: raise ValueError("require weights for mode='normalized_sum'") return loss.sum() / weights.sum() else: raise ValueError("mode must be 'mean', 'sum' or 'normalized_sum', " "got %r" % mode)
6d888d1854cfa78e13fcd5eba412e224164386d7
704,209
def is_workinprogress(change): """Return True if the patchset is WIP :param dict change: De-serialized dict of a gerrit change :return: True if one of the votes on the review sets it to WIP. """ # This indicates WIP for older Gerrit versions if change['status'] != 'NEW': return True # Gerrit 2.8 WIP last_patch = change['patchSets'][-1] try: approvals = last_patch['approvals'] except KeyError: # Means no one has voted on the latest patch set yet return False for a in approvals: if a['type'] == 'Workflow' and int(a['value']) < 0: return True return False
ac2f5ba1ab8d5fd432ef7b13c5b033e0c3710fd4
704,214
import pickle def pickle_load(namefile: str): """Load Python variable, given name of file. :param namefile: A string of file to load. :return output: A loaded variable. """ with open(namefile, 'rb') as load_file: output = pickle.load(load_file) return output
425e53b8daf69bf832abc45a4270cc01f383c50e
704,216
def negate(condition): """ Returns a CPP conditional that is the opposite of the conditional passed in. """ if condition.startswith('!'): return condition[1:] return "!" + condition
5f31ed3ee2f16a53674f830402fdec890af25032
704,218
def points(start, end): """ Bresenham's Line Drawing Algorithm in 2D """ l = [] x0, y0 = start x1, y1 = end dx = abs(x1 - x0) dy = abs(y1 - y0) if x0 < x1: sx = 1 else: sx = -1 if y0 < y1: sy = 1 else: sy = -1 err = dx - dy while True: l.append((x0, y0)) if x0 == x1 and y0 == y1: break e2 = 2 * err if e2 > -dy: # overshot in the y direction err = err - dy x0 = x0 + sx if e2 < dx: # overshot in the x direction err = err + dx y0 = y0 + sy return l
ffa8be5eb09e2b454242e4095883bfee239e5319
704,222
def ab_from_mv(m, v): """ estimate beta parameters (a,b) from given mean and variance; return (a,b). Note, for uniform distribution on [0,1], (m,v)=(0.5,1/12) """ phi = m*(1-m)/v - 1 # z = 2 for uniform distribution return (phi*m, phi*(1-m))
0326c165e44c1ab9df091e0344f12b9fab8c0e19
704,223
import csv def csvReadCallback(inputFile, **kw): """Read callback for CSV data""" inputFile.readline() # skip header reader = csv.reader(inputFile, lineterminator='\n', **kw) return [row for row in reader]
e36c92e5792e905da22438a58c8ce810c2a22e2a
704,230
def _LJ_rminepsilon_to_ab(coeffs): """ Convert rmin/epsilon representation to AB representation of the LJ potential """ A = coeffs['epsilon'] * coeffs['Rmin']**12.0 B = 2 * coeffs['epsilon'] * coeffs['Rmin']**6.0 return {"A": A, "B": B}
0963c0e8b949d35842660a499ce80a388485773f
704,233
def CycleTarget_to_c(self): """Syntax for a target of a cycle.""" return f"cycle_{self.targetID}: continue;"
12cc7a57e5a24a62aba43ac99879d5a5d364ee29
704,240
def get_pipeline_lines(input_pipeline): """Returns a list with the lines in the .cppipe file""" with open(input_pipeline) as f: lines = f.readlines() return lines
403e7531b1cadfe25f519d2b176b97ac344cde6b
704,243
def get_interface_type(interface): """Gets the type of interface """ if interface.upper().startswith("ET"): return "ethernet" elif interface.upper().startswith("VL"): return "svi" elif interface.upper().startswith("LO"): return "loopback" elif interface.upper().startswith("MG"): return "management" elif interface.upper().startswith("MA"): return "management" elif interface.upper().startswith("PO"): return "portchannel" elif interface.upper().startswith("NV"): return "nve" else: return "unknown"
f770a3ef1c43574d22630a5c4fff2f25d4975279
704,246
def rgb2gray(image): """Convert 3-channel RGB image into grayscale""" if image.ndim == 3: return (0.299 * image[:, :, 0] + 0.587 * image[:, :, 1] + 0.114 * image[:, :, 2]) elif image.ndim == 4: return (0.299 * image[:, :, :, 0] + 0.587 * image[:, :, :, 1] + 0.114 * image[:, :, :, 2])
f87ed301dfd9c13ebfbabf99ad4b56c959a91e46
704,247
def move_ship_waypoint(instructions: list) -> list: """Move the ship using the waypoint movement rules :param instructions: List of movement instructions :return: Final position of the ship """ waypoint = [10, 1] ship = [0, 0] for instruction in instructions: cmd, val = instruction if cmd == 'F': ship[0] += val * waypoint[0] ship[1] += val * waypoint[1] if cmd == 'N': waypoint[1] += val elif cmd == 'S': waypoint[1] -= val elif cmd == 'E': waypoint[0] += val elif cmd == 'W': waypoint[0] -= val elif cmd == 'L' or cmd == 'R': rotation = (2 * (cmd == 'L') - 1) * val % 360 if rotation == 90: waypoint[0], waypoint[1] = -waypoint[1], waypoint[0] elif rotation == 180: waypoint[0] *= -1 waypoint[1] *= -1 elif rotation == 270: waypoint[0], waypoint[1] = waypoint[1], -waypoint[0] return ship
7202392e4826d522287455d94f7b06c0e2f931ee
704,251
import itertools def get_hyperparams_combinations(hyperparams): """Get list of hyperparmeter (dict) combinations.""" # transforms tuning hyperparams to a list of dict params for each option return [ {k:v for k,v in zip(hyperparams.keys(), hypms)} for hypms in itertools.product(*[vals for vals in hyperparams.values()]) ]
e5f52a8eddb8a2a476e0daa47f63161d440263f2
704,252
def p_to_stars(p, thres=(0.1, 0.05, 0.01)): """Return stars for significance values.""" stars = [] for t in thres: if p < t: stars.append("*") return "".join(stars)
d88c2fd6c1b4e2d75a9cb664dfc10fab308bc6ee
704,255
def factorial_r(number): """ Calculates the factorial of a number, using a recursive process. :param number: The number. :return: n! """ # Check to make sure the argument is valid. if number < 0: raise ValueError # This is the recursive part of the function. if number == 0: # If the argument is 0, then we simply return the value of 0!, which is 1. return 1 else: # Otherwise, return n multiplied by the (n - 1)! return number * factorial_r(number - 1)
e5c28edac93b965f438bd61c5bb1c0a935c96700
704,257
from typing import Any def create_result_scalar(name: str, item_type: str, value: Any) -> dict: """ Create a scalar result for posting to EMPAIA App API. :param name: Name of the result :param item_type: Type of result :param value: Value of the result """ result = {"name": name, "type": item_type, "value": value} return result
3fb16c540cc8c76cfc42e4a906e4be280346b802
704,264
import sympy def replace_heaviside(formula): """Set Heaviside(0) = 0 Differentiating sympy Min and Max is giving Heaviside: Heaviside(x) = 0 if x < 0 and 1 if x > 0, but Heaviside(0) needs to be defined by user. We set Heaviside(0) to 0 because in general there is no sensitivity. This done by setting the second argument to zero. """ if not isinstance(formula, sympy.Expr): return formula w = sympy.Wild("w") return formula.replace(sympy.Heaviside(w), sympy.Heaviside(w, 0))
d1aff5e4a2dd68ba53ced487b665e485dab4b54d
704,265
def untempering(p): """ see https://occasionallycogent.com/inverting_the_mersenne_temper/index.html >>> mt = MersenneTwister(0) >>> mt.tempering(42) 168040107 >>> untempering(168040107) 42 """ e = p ^ (p >> 18) e ^= (e << 15) & 0xEFC6_0000 e ^= (e << 7) & 0x0000_1680 e ^= (e << 7) & 0x000C_4000 e ^= (e << 7) & 0x0D20_0000 e ^= (e << 7) & 0x9000_0000 e ^= (e >> 11) & 0xFFC0_0000 e ^= (e >> 11) & 0x003F_F800 e ^= (e >> 11) & 0x0000_07FF return e
4118b55fd24008f9e96a74db937f6b41375484c3
704,271
def multiplicar(a, b): """ MULTIPLICAR realiza la multiplicacion de dos numeros Parameters ---------- a : float Valor numerico `a`. b : float Segundo valor numerico `b`. Returns ------- float Retorna la suma de `a` + `b` """ return a*b
2d1a56924e02f05dcf20d3e070b17e4e602aecf6
704,272
def can_comment(request, entry): """Check if current user is allowed to comment on that entry.""" return entry.allow_comments and \ (entry.allow_anonymous_comments or request.user.is_authenticated())
04bcd019af083cff0367e236e720f4f7b00f7a65
704,274
def data_scaling(Y): """Scaling of the data to have pourcent of baseline change columnwise Parameters ---------- Y: array of shape(n_time_points, n_voxels) the input data Returns ------- Y: array of shape(n_time_points, n_voxels), the data after mean-scaling, de-meaning and multiplication by 100 mean: array of shape(n_voxels), the data mean """ mean = Y.mean(0) Y = 100 * (Y / mean - 1) return Y, mean
94b550386b8411a96b9ccd3f5e93098560c327e1
704,278
import json def load_json(path: str): """Load json file from given path and return data""" with open(path) as f: data = json.load(f) return data
d165d087c78a0ba88d318a6dbe8b2ac8f9a8c4b5
704,279
def _get_int_val(val, parser): """Get a possibly `None` single element list as an `int` by using the given parser on the element of the list. """ if val is None: return 0 return parser.parse(val[0])
d2e029657b3424027e83ee8e1e2be76e3abf8fda
704,280
def get_n_p(A_A, n_p_in='指定しない'): """付録 C 仮想居住人数 Args: A_A(float): 床面積 n_p_in(str): 居住人数の入力(「1人」「2人」「3人」「4人以上」「指定しない」) Returns: float: 仮想居住人数 """ if n_p_in is not None and n_p_in != '指定しない': return { '1人': 1.0, '2人': 2.0, '3人': 3.0, '4人以上': 4.0 }[n_p_in] if A_A < 30: return 1.0 elif A_A < 120: return A_A / 30 else: return 4.0
db257abdb76ee35f16b07e5baccec82211737971
704,282
def make_mmvt_boundary_definitions(cv, milestone): """ Take a Collective_variable object and a particular milestone and return an OpenMM Force() object that the plugin can use to monitor crossings. Parameters ---------- cv : Collective_variable() A Collective_variable object which contains all the information for the collective variable describine this variable. In fact, the boundaries are contours of the function described by cv. This variable contains information like the groups of atoms involved with the CV, and the expression which describes the function. milestone : Milestone() A Milestone object which describes the boundary between two Voronoi cells. This variable contains information like the values of the variables which will be entered into the Force() object. Returns ------- myforce : openmm.Force() An OpenMM force object which does not affect atomic motion, but allows us to conveniently monitor a function of atomic position. """ myforce = cv.make_force_object() myforce.setForceGroup(1) variable_names_list = cv.add_parameters(myforce) cv.add_groups_and_variables(myforce, cv.get_variable_values_list( milestone)) return myforce
45baaaa70ea24cb564c529cd885597415561a25d
704,283
def parseConfigFile(configFilePath): """ :param configFilePath: :return: a hash map of the parameters defined in the given file. Each entry is organized as <parameter name, parameter value> """ # parse valid lines lines = [] with open(configFilePath) as f: for line in f: line = line.strip() if line == '': # ignore empty line continue if line.startswith('#'): # ignore the comment in config file continue lines.append(line) params = {} for line in lines: if not line.__contains__("="): raise Exception("Invalid parameter definition as \"" + line + "\" in file " + configFilePath) paramName = line.split("=")[0].strip() value = line.split("=")[1].strip() params[paramName] = value return params
aee6a1da052f4c2ef907bf41b2cfaa4b93612a5e
704,286
def has_annotations(doc): """ Check if document has any mutation mention saved. """ for part in doc.values(): if len(part['annotations']) > 0: return True return False
6b57893bc35af45950ec2eeb5008b663028d48bf
704,287
import hashlib def calc_checksum(filename): """ Calculates a checksum of the contents of the given file. :param filename: :return: """ try: f = open(filename, "rb") contents = f.read() m = hashlib.md5() m.update(contents) checksum = m.hexdigest() return checksum except IOError: return None
080e3686279ae126951cd1b66efdb9a0d2448011
704,295
import time def get_elapsed_time(start_time) -> str: """ Gets nicely formatted timespan from start_time to now """ end = time.time() hours, rem = divmod(end-start_time, 3600) minutes, seconds = divmod(rem, 60) return "{:0>2}:{:0>2}:{:05.2f}".format(int(hours), int(minutes), seconds)
d75a1873254e1b1cc9ffc714e65d3a9ed95e5803
704,297
def clean_string_columns(df): """Clean string columns in a dataframe.""" try: df.email = df.email.str.lower() df.website = df.website.str.lower() except AttributeError: pass str_columns = ["name", "trade_name", "city", "county"] for column in str_columns: try: df[column] = df[column].astype(str).str.title() df[column] = df[column].astype(str).str.replace("Llc", "LLC") df[column] = df[column].astype(str).str.replace("L.L.C.", "LLC") df[column] = df[column].astype(str).str.strip() except (AttributeError, KeyError): pass return df
eb9aaa474fe517b346eaa8cd93e669b3fcc3459d
704,302
def split_exon(exon, cds): """Takes an exon and a CDS, and returns a map of regions for each feature (UTR5/3, CDS) that may be inferred from the arguments. Note that the CDS is simply returned as is, to simplify downstream handling of these features.""" results = [cds] if exon["start"] < cds["start"]: utr = dict(exon) utr.update( end=cds["start"] - 1, feature=(exon["strand"] == "+" and "UTR5" or "UTR3") ) results.append(utr) if exon["end"] > cds["end"]: utr = dict(exon) utr.update( start=cds["end"] + 1, feature=(exon["strand"] == "+" and "UTR3" or "UTR5") ) results.append(utr) return results
e2bb12a688bbe3e5c79039c2a9cce4e5aa9e9a1b
704,307
def state_dict_to_cpu(state_dict): """Make a copy of the state dict onto the cpu.""" # .state_dict() references tensors, so we detach and copy to cpu return {key: par.detach().cpu() for key, par in state_dict.items()}
2d1fcc07ab8eac192a846cbcdb8d7363ffd8e9e8
704,308
import unicodedata def normalize_str(text): """ Normalizes unicode input text (for example remove national characters) :param text: text to normalize :type text: unicode """ # unicodedata NFKD doesn't convert properly polish ł trans_dict = { u'ł': u'l', u'Ł': u'L' } trans = dict((ord(k), ord(v)) for k, v in trans_dict.items()) text = text.translate(trans) return unicodedata.normalize('NFKD', text).encode('ascii', 'ignore')
40c8f77cdbf08b12a3867cd4a9d9bb91b323b50b
704,309
def public_dict(obj): """Same as obj.__dict__, but without private fields.""" return {k: v for k, v in obj.__dict__.items() if not k.startswith('_')}
2edee1a17d0dad6ab4268f80eb565406656a77b4
704,317
def wilight_to_opp_position(value): """Convert wilight position 1..255 to opp.format 0..100.""" return min(100, round((value * 100) / 255))
4f6e4298a77c29ff0375d0ce5e5fd23e77e30622
704,319
def get_task_link(task_id, task_df): """Get the link from the PYBOSSA task.""" try: task = task_df.loc[int(task_id)] except KeyError: return None return task['info']['link']
d90e994d2f0a4718bbedf8fd5fd534f6d5d32549
704,320
from typing import List from typing import Tuple def partwise_function(function: str, parts: List[Tuple[str, str]], add_zero_otherwise: bool = True) -> str: """ Returns a string representing the definition a part-wise mathematical function. **Parameters** - `function`: str The name of the function. - `parts`: list Each element is a tuple yields whose 1st element is the value of the function and whose second is a condition stating where the 1st applies. - `add_zero_otherwise`: bool If True, one last part stating "0, otherwise" is added. **Returns** `out`: str TeX compatible string. """ res = f'{function}=' res += '\\begin{cases}\n' for p in parts: res += f'{p[0]},& {p[1]} \\\\' if add_zero_otherwise: res += r'0,& \text{otherwise}' res += r'\end{cases}' return res
b2954a9c947add4cf4b4740ac62f4ca16d3e1d70
704,321
def read_restrictions_file(file): """ <Purpose> Reads in the contents of a restrictions file. <Arguments> file: name/path of the file to open <Returns> A list, where each element is a line in the file """ # Get the file object, read mode with universal newlines fileo = open(file,"rU") # Read in all the contents contents = fileo.readlines() # Close the file object fileo.close() return contents
df7207ea3bab49af47fcfbdaa9cc51f54692bb85
704,322
def get_file_content(file: str) -> str: """ Get file content. """ try: with open(file, 'r') as f: content = f.read() return content except IOError as e: print(e) print('Exiting...') exit(1)
c10407d73ba2cd2d84eb99c0f131d3895ede460d
704,327
def resource_name_for_resource_type(resource_type, row): """Return the resource name for the resource type. Each returned row contains all possible changed fields. This function returns the resource name of the changed field based on the resource type. The changed field's parent is also populated but is not used. Args: resource_type: the string equivalent of the resource type row: a single row returned from the service Returns: The resource name of the field that changed. """ resource_name = '' # default for UNSPECIFIED or UNKNOWN if resource_type == 'AD_GROUP': resource_name = row.change_status.ad_group.value elif resource_type == 'AD_GROUP_AD': resource_name = row.change_status.ad_group_ad.value elif resource_type == 'AD_GROUP_CRITERION': resource_name = row.change_status.ad_group_criterion.value elif resource_type == 'CAMPAIGN': resource_name = row.change_status.campaign.value elif resource_type == 'CAMPAIGN_CRITERION': resource_name = row.change_status.campaign_criterion.value return resource_name
500bc32be1765f1e516f4f7cd386b24c3c4f373f
704,329
import re def to_yw7(text): """Convert html tags to yWriter 6/7 raw markup. Return a yw6/7 markup string. """ # Clean up polluted HTML code. text = re.sub('</*font.*?>', '', text) text = re.sub('</*span.*?>', '', text) text = re.sub('</*FONT.*?>', '', text) text = re.sub('</*SPAN.*?>', '', text) # Put everything in one line. text = text.replace('\n', ' ') text = text.replace('\r', ' ') text = text.replace('\t', ' ') while ' ' in text: text = text.replace(' ', ' ').rstrip().lstrip() # Replace HTML tags by yWriter markup. text = text.replace('<i>', '[i]') text = text.replace('<I>', '[i]') text = text.replace('</i>', '[/i]') text = text.replace('</I>', '[/i]') text = text.replace('</em>', '[/i]') text = text.replace('</EM>', '[/i]') text = text.replace('<b>', '[b]') text = text.replace('<B>', '[b]') text = text.replace('</b>', '[/b]') text = text.replace('</B>', '[/b]') text = text.replace('</strong>', '[/b]') text = text.replace('</STRONG>', '[/b]') text = re.sub('<em.*?>', '[i]', text) text = re.sub('<EM.*?>', '[i]', text) text = re.sub('<strong.*?>', '[b]', text) text = re.sub('<STRONG.*?>', '[b]', text) # Remove orphaned tags. text = text.replace('[/b][b]', '') text = text.replace('[/i][i]', '') text = text.replace('[/b][b]', '') return text
59b9b961f7a94d23e2829b9d940f63c32207600b
704,330
import time def wait_for_job(res, ping_time=0.5): """ Blocks execution and waits for an async Forest Job to complete. :param JobResult res: The JobResult object to wait for. :param ping_time: The interval (in seconds) at which to ping the server. :return: The completed JobResult """ while not res.is_done(): res.get() time.sleep(ping_time) return res
1a7202f58affa97b0001b246fb7cd187d6a59f44
704,335
def numeric_type(param): """ Checks parameter type True for float; int or null data; false otherwise :param param: input param to check """ if ((type(param) == float or type(param) == int or param == None)): return True return False
a5f67a30b3128c1214d8825abbc6ae5170680d80
704,336
def get_page_generator(s,max_items=0): """Get the generator that returns the Page objects that we're interested in, from Site s. """ page_generator = s.allpages() if(max_items>0): page_generator.set_maximum_items(max_items) return page_generator
d53a890523c999df878fecc71ef1dbd8d17c188c
704,338
def segment_text_to_sentences(text_file, sentence_splitter): """ Segment text into sentences. Text is provided by BRAT in .txt file. Args: text_file (str): the full path to the BRAT .txt file. sentence_splitter (spacy LM): SpaCy EN language model. Returns: sentences (list((int, int, str))): list of sentence spans. Spans are triples of (start_offset, end_offset, text), where offset is relative to the text. """ sentences = [] ftext = open(text_file, "r") for line in ftext: splits = sentence_splitter(line.strip()) for sent in splits.sents: sentences.append((sent.start_char, sent.end_char, sent.text)) ftext.close() return sentences
d74857a4931d162b9573b1b086a8720563b4fd41
704,340
def param_to_string(metric) -> str: """Convert a list / tuple of parameters returned from IE to a string""" if isinstance(metric, (list, tuple)): return ', '.join([str(x) for x in metric]) else: return str(metric)
54476f88936336728ba73425bb57860e17fb7561
704,342
def fitness_func_large(vector): """ returns a very large number for fitness""" return 9999999999999999999
08e6f43c5f891fe7138dfc7b1d0809ba048bf070
704,345
def remove_prefix(s, pre): """ Remove prefix from the beginning of the string Parameters: ---------- s : str pre : str Returns: ------- s : str string with "pre" removed from the beginning (if present) """ if pre and s.startswith(pre): return s[len(pre):] return s
6bae14cddd38fcfabfb0fadb9f4dbeaea81ff4ac
704,346
def remove_empty(df): """ Drop all rows and columns that are completely null. Implementation is shamelessly copied from `StackOverflow`_. .. _StackOverflow: https://stackoverflow.com/questions/38884538/python-pandas-find-all-rows-where-all-values-are-nan # noqa: E501 Functional usage example: .. code-block:: python df = remove_empty(df) Method chaining example: .. code-block:: python df = pd.DataFrame(...) df = jn.DataFrame(df).remove_empty() :param df: The pandas DataFrame object. :returns: A pandas DataFrame. """ nanrows = df.index[df.isnull().all(axis=1)] df.drop(index=nanrows, inplace=True) nancols = df.columns[df.isnull().all(axis=0)] df.drop(columns=nancols, inplace=True) return df
c2ea9fc13bfa57bc357a83c607bfe9ce9348fb2e
704,348
from typing import Optional def calculate_serving_size_weight( weight: Optional[float], number_of_servings: Optional[float] ) -> Optional[float]: """ Given a weight (representing the total weight of the component included in a recipe) and a number of servings (how many servings of the component are included), returns a number representing the weight of just one serving of the component. """ if weight is not None and number_of_servings is not None: return weight/number_of_servings else: return None
b22732a60f1f6000277861a615c78e785b4757bb
704,349
def format_key(key): """ Format the key provided for consistency. """ if key: return key if key[-1] == "/" else key + "/" return "/"
8b5e41bb76c524ec8c45a22ad0dae84c84ed530b
704,350
from typing import Dict def _define_problem_with_groups(problem: Dict) -> Dict: """ Checks if the user defined the 'groups' key in the problem dictionary. If not, makes the 'groups' key equal to the variables names. In other words, the number of groups will be equal to the number of variables, which is equivalent to no groups. Parameters ---------- problem : dict The problem definition Returns ------- problem : dict The problem definition with the 'groups' key, even if the user doesn't define it """ # Checks if there isn't a key 'groups' or if it exists and is set to 'None' if 'groups' not in problem or not problem['groups']: problem['groups'] = problem['names'] elif len(problem['groups']) != problem['num_vars']: raise ValueError("Number of entries in \'groups\' should be the same " "as in \'names\'") return problem
ab29954f3349509a9153219d040feb8fa3125ec7
704,351
def gaspari_cohn_mid(z,c): """ Gaspari-Cohn correlation function for middle distances (between c and 2*c) Arguments: - z: Points to be evaluated - c: Cutoff value """ return 1./12*(z/c)**5 - 0.5*(z/c)**4 + 5./8*(z/c)**3 \ + 5./3*(z/c)**2 - 5*z/c - 2./3*c/z + 4
0852e84c1ce10856d69420fcc585054488591e73
704,352
import math def DrawTextBar(value, max_value, max_width=53): """Return a simple ASCII bar graph, making sure it fits within max_width. Args: value: integer or float representing the value of this bar. max_value: integer or float representing the largest bar. max_width: How many characters this graph can use (int) Returns: string """ hash_width = max_value / max_width return int(math.ceil(value/hash_width)) * '#'
7f4b267527317cbceddadc9f7a0307f8ec430bb4
704,355
def _FindBinmanNode(dtb): """Find the 'binman' node in the device tree Args: dtb: Fdt object to scan Returns: Node object of /binman node, or None if not found """ for node in dtb.GetRoot().subnodes: if node.name == 'binman': return node return None
bf924d173a1adf81c1705ad1ea1fae490567a317
704,356
def _host_is_same(host1: str, host2: str) -> bool: """Check if host1 and host2 are the same.""" return host1.split(":")[0] == host2.split(":")[0]
0bd9393786801d0f69d4982fc9f8edce378e9656
704,359
import math def round_half_up(n: float, decimals: float = 0) -> float: """This function rounds to the nearest integer number (e.g 2.4 becomes 2.0 and 2.6 becomes 3); in case of tie, it rounds up (e.g. 1.5 becomes 2.0 and not 1.0) Args: n (float): number to round decimals (int): number of decimal figures that we want to keep; defaults to zero Returns: rounded_number (float): input number rounded with the desired decimals """ multiplier = 10 ** decimals rounded_number = math.floor(n * multiplier + 0.5) / multiplier return rounded_number
e0aab5cba456b4ffe6fab11a21b97fe4e17b045a
704,362
from typing import Set def _possible_edges(n1: Set, n2: Set, directed: bool, self_loops: bool = False): """Compute the number of possible edges between two sets.""" a = n1.intersection(n2) e = (len(n1) - len(a)) * (len(n2) - len(a)) if directed: e *= 2 if self_loops: e += len(n1) + len(n2) - len(a) return e
4cf21d9521c3d071d7d1376bd917f2ec39435108
704,363
def default_category_orders() -> dict: """Returns the default dictionary of category orders""" day_order = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] weekend_order = ["Weekday", "Weekend"] season_order = ["Spring", "Summer", "Autumn", "Winter"] month_order = [ "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December", ] category_orders = { "dayofweek": day_order, "weekend": weekend_order, "season": season_order, "month": month_order, } return category_orders
4110287bc30445f27c7c3d0c38cb662d769a5217
704,365
import re def is_guid(techfin_tenant): """Validate guid arg Args: tenant (str): techfin tenant id Returns: bool: true if is valid guid value """ c = re.compile('[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}', re.I) res = c.match(techfin_tenant) return res
7242f0da279375ab5873670ffef1fd4aa8749546
704,366
def __standard_cand_fun(candidates): """ Convert candidates from the forms accepted by :py:fun:`recommend` into a standard form, a function that takes a user and returns a candidate list. """ if isinstance(candidates, dict): return candidates.get elif candidates is None: return lambda u: None else: return candidates
ad206802bfbcd0ec8f4601ebc043f8d468709c75
704,368
def dBrickId(brickId): """Return box id if valid, raise an exception in other case""" if brickId >= 0 and brickId <= 15: return brickId else: raise ValueError( '{} is not a valid Brick Id, Brick Id must be between 0-15'.format( brickId))
10e0f27f179dcd54c5cc4967ea960b77a4c5a924
704,370
import hashlib def hash(text, digest_alg = 'md5'): """ Generates hash with the given text using the specified digest hashing algorithm """ if not isinstance(digest_alg,str): h = digest_alg(text) else: h = hashlib.new(digest_alg) h.update(text) return h.hexdigest()
386268086a55b8e622c00b407cabd3207bb94ffb
704,371
def merge_params(params, config): """Merge CLI params with configuration file params. Configuration params will overwrite the CLI params. """ return {**params, **config}
a1dc002a900968e6cf7c5ba401519759e6ef485e
704,372
def tasks(tmpdir): """ Set up a project with some tasks that we can test displaying """ task_l = [ "", " ^ this is the first task (released)", " and it has a second line", " > this is the second task (committed)", " . this is the third task (changed, not yet committed)", " - this is the fourth task (not yet made)", " and this one has a second line also", " + fifth task -- completed", " < sixth task -- moved elsewhere", " x seventh task -- abandoned", "", ] # project dir prjdir = tmpdir.join("myproj") # .project file prjdir.join(".project").ensure() # DODO file with content dodo = prjdir.join("DODO") dodo.write("\n".join(task_l) + "\n") data = { 'tmpdir': tmpdir, 'prj': prjdir, 'dodo': dodo, } return data
64104bde2aab55021cf0d49fbb1d47670d0e4e0d
704,376
def singer_map(pop, rate): """ Define the equation for the singer map. Arguments --------- pop: float current population value at time t rate: float growth rate parameter values Returns ------- float scalar result of singer map at time t+1 """ return rate * (7.86 * pop - 23.31 * pop ** 2 + 28.75 * pop ** 3 - 13.3 * pop ** 4)
84aba1d96304b67fba1b4a0e7a909e23121a3d6b
704,378
import json def format_rpc_response(data, exception=None): """ Formats a response from a RPC Manager. It provides the data and/or a serialized exception so it can be re-created by the caller. :param Any data: A JSON Serializable object. :param Exception exception: An Exception object :return str: JSON Response. """ exception_data = None if exception: args = exception.__getargs__() if hasattr(exception, "__getargs__") else exception.args kwargs = exception.__getkwargs__() if hasattr(exception, "__getkwargs__") else {} if kwargs is None: kwargs = {} try: module = exception.__module__ except: module = None exception_data = { 'exception': type(exception).__name__, 'message': str(exception), 'args': args, "kwargs": kwargs, 'module': module, } return json.dumps({ 'data': data, 'exception': exception_data })
c900e2512fd486c91789ab4312883061553a2fb1
704,379
import logging def select_best_haplotype_match(all_matches): """Returns the best HaplotypeMatch among all_matches. The best matching HaplotypeMatch is the one with the lowest match_metrics score. Args: all_matches: iterable[HaplotypeMatch]. An iterable of HaplotypeMatch objects we want to select the best match from. Returns: The best matching HaplotypeMatch object. """ sorted_matches = sorted(all_matches, key=lambda x: x.match_metrics) best = sorted_matches[0] equivalents = [ f for f in all_matches if f.match_metrics == best.match_metrics ] # redacted if len(equivalents) > 1: for i, f in enumerate(equivalents): extra_info = 'best' if i == 0 else i logging.warning('Equivalent match to best: %s [%s]', f, extra_info) return equivalents[0]
0e40fef830055e5cd297b0f00672d8b0caedc62e
704,380
def get_tokens_list_from_column_list(column_name_list: list, delimiter: str = '!!') -> list: """Function that returns list of tokens present in the list of column names. Args: column_name_list: The list of column name strings. delimiter: delimiter seperating tokens within single column name string. Returns: A list of tokens present in the list of column names. """ tokens = [] for column_name in column_name_list: for tok in column_name.split(delimiter): if tok not in tokens: tokens.append(tok) return tokens
66e2c3c280188d2cc3e8df35e0112095f3244918
704,381
def load_variable_config(project_config): """Extract the variable configuration out of the project configuration. Args: project_config (dict-like): Project configuration. Returns: dict: Variable dictionary with name: [levels] (single level will have a list containing None.) """ # Extract the different rank variables v2ds = project_config['variables_2d'] v3ds = project_config['variables_3d'] # Create a dictionary of variables to process keyed to an empty list of levels for 2D variables = {v2d: [None] for v2d in v2ds} # Add in the 3D variables, with levels this time for v3d, levels in v3ds.items(): variables[v3d] = levels return variables
37caccfa5f9c3a724e61233610c3e4a3e9938695
704,385
import torch def cross_op_torch(r): """ Return the cross operator as a matrix i.e. for input vector r \in \R^3 output rX s.t. rX.dot(v) = np.cross(r, v) where rX \in \R^{3 X 3} """ if len(r.shape) > 1: rX = torch.zeros(r.shape[0], 3, 3).to(r) rX[..., 0, 1] = -r[..., 2] rX[..., 0, 2] = r[..., 1] rX[..., 1, 2] = -r[..., 0] rX = rX - rX.transpose(-1, -2) else: rX = torch.zeros(3, 3).to(r) rX[0, 1] = -r[2] rX[0, 2] = r[1] rX[1, 2] = -r[0] rX = rX - rX.T return rX
04f926f00f6ed58bee3feae80ef573f5a8822d20
704,386
from typing import Callable from typing import Optional from typing import Union from typing import Tuple from typing import Iterable def solve_nr( f: Callable[[float], float], df: Callable[[float], float], estimate: float, eps: Optional[float]=1.0e-6, max_num_iter=100, throw_if_failed_converge=True, return_vector=False) -> Union[Tuple[float, int], Iterable[float]]: """ Solves f(x) = 0 using Newton-Raphson method :param f: function of x to solve :param df: derivative of f(x) :param estimate: initial estimate :param eps: max absolute error; if None, will continue calculating until max_num_iter is reached :param max_num_iter: Max number of iterations :param throw_if_failed_converge: if True, will throw if fails to converge in max_num_iter (unless eps is None) :param return_vector: if true, returns vector of all iterated values :return: x, number of iterations; or vector of all iterations if return_vector is True """ if max_num_iter < 1: raise ValueError('max_num_iter must be at least 1') x = estimate xv = [] if return_vector else None n = 0 for n in range(1, max_num_iter + 1): if xv is not None: xv.append(x) residue = f(x) / df(x) x -= residue if eps and (abs(residue) < eps): break else: if throw_if_failed_converge and eps: raise RuntimeError('Failed to converge in %i iterations' % n) if xv is not None: return xv else: return x, n
c6ab8b6bb27f8b9be9c31fe7cbd58300637d9fef
704,394
import calendar import pytz def epoch(dt): """ Returns the epoch timestamp of a timezone-aware datetime object. """ return calendar.timegm(dt.astimezone(pytz.utc).timetuple())
027ea75bf75b6bb6b4da14b2bed1afc363a9121a
704,397
def set_produce_compilation_cache(enabled: bool) -> dict: """Forces compilation cache to be generated for every subresource script. Parameters ---------- enabled: bool **Experimental** """ return {"method": "Page.setProduceCompilationCache", "params": {"enabled": enabled}}
3d2dd7fa6c8d04713ace26c666d9b00407a5a586
704,398
def clean_column(df, column): """ Function to return clean column text. Pass each cell to a cleaner and return the cleaned text for that specific column :params: -------- :df dataframe(): containing the column :column str(): in which column the text is located :returns: --------- :list(): of str containing the cleaned text """ # Remove all the NaN values and transform it into a list return list(df[column].dropna().values)
095a854c452f87b9a960eabb81ace5c18814f266
704,404
def file_readlines(fn): """Open file with name `fn`, return open(fn).readlines().""" fd = open(fn, 'r') lst = fd.readlines() fd.close() return lst
2594e6763b566f4e83844f2f4457bcc8ea3663a5
704,407
def __num_elems(shape): """Returns the number of elements in the given shape Args: shape: TensorShape Return: tot_elems: int """ tot_elems = 1 for s in shape: tot_elems *= int(s) return tot_elems
fd4f72394b22c98e6bedb545d7d11b8bfae11add
704,411
def players_player_id_get(player_id): # noqa: E501 """Retrieve a single player&#x27;s record Returns a player record # noqa: E501 :param player_id: ID of player to return :type player_id: str :rtype: Player """ return 'do some magic!'
d9c2c92dbba3d139b2b5188e8722a0add7668393
704,417
def build_info_str(username: str, name_len: int, remaining_chip: int, action: str, chip: int, is_waiting: bool, countdown: int) -> str: """Build a string to explain action of a user Args: username (str): user name name_len (int): characters to show the name remaining_chip (int): remaining chip of the user action (str): the action being taken, should be one of the following: check, bet, raise, all-in, fold The differences of `bet` and `raise` are that `bet` is the first put-chip action, while `raise` is another put-chip action against prior `bet` chip (int): the chip of an action, only meaningful when `action` is `bet`, `raise` or `all-in` is_waiting (bool): a flag that indicate if this user is in execution position countdown (int): the countdown of waiting, only meaningful when `is_waiting` is `True` Return: info_str (str): a string to explain action of a user """ info = f"{username:{name_len}} (${str(remaining_chip) + ')':<5} {action}" if action in ("bet", "raise", "all-in"): info += f" ${chip} " if is_waiting: info += f"{countdown}s" info = "-> " + info else: info = " " + info return info
1ecbb6c33d54a55500d51ce09cf9740ac28def96
704,419
def get_similarity_score(dict1, dict2, dissimilarity = False): """ The keys of dict1 and dict2 are all lowercase, you will NOT need to worry about case sensitivity. Args: dict1: frequency dictionary of words or n-grams for one text dict2: frequency dictionary of words or n-grams for another text dissimilarity: Boolean, optional parameter. Default to False. If this is True, return the dissimilarity score, 100*(DIFF/ALL), instead. Returns: int, a percentage between 0 and 100, inclusive representing how similar the texts are to each other The difference in text frequencies = DIFF sums words from these three scenarios: * If a word or n-gram occurs in dict1 and dict2 then get the difference in frequencies * If a word or n-gram occurs only in dict1 then take the frequency from dict1 * If a word or n-gram occurs only in dict2 then take the frequency from dict2 The total frequencies = ALL is calculated by summing all frequencies in both dict1 and dict2. Return 100*(1-(DIFF/ALL)) rounded to the nearest whole number if dissimilarity is False, otherwise returns 100*(DIFF/ALL) """ DIFF = 0 for i in dict1: x = False #Boolean used to not add repeated frequencies as it will be seen later for j in dict2: if i == j: #use of == instead of i in j as for example word "meme" could #be in "memes" and would therefore cause a problem DIFF += abs(dict1[i] - dict2[j]) #if the word/n-gram appears in both dictionnaires then #the absolute value of the difference between the frequencies #in each dictionnary is added to DIFF x = True if x == False: #Boolean used so that frequencies of a word/n-gram are not added again #and again to DIFF DIFF += dict1[i] for j in dict2: x = False #same use of boolean for same reasons as previou for loop for i in dict1: if i == j: #use of == due to the same reason x = True #this time the absolute value of the difference between the #frequencies doesn't have to be added as it already has been if x == False: DIFF += dict2[j] ALL = 0 for i in dict1: ALL += dict1[i] #all the frequencies of the first dictionnary are added to ALL for j in dict2: ALL += dict2[j] #same occurs as in the previous loop but for the second dictionnary #Depending on the input of dissimilarity this will occur if dissimilarity == False: result = round(100*(1 - (DIFF/ALL))) #similarity between the dictionnaries of word/n-grams is the result else: result = round(100*(DIFF/ALL)) #dissimilarity between the dictionnaries of word/n-grams is the result return result
31e8602d6ef098a58a8eaf497badebf2e19288eb
704,421
def predict_fn(input_data, model): """Predict using input and model""" return model(input_data)
00f7bf0bd71f70833f8f77b16ffa62559747e915
704,422
def get_unnormalized_text(words): """ Returns the (unnormalized) text composed from the given words.""" return "".join([x.unnormalized_with_whitespaces for x in words])
162854d917ee4d49c3b2b824abc07697ac4f05ba
704,423
import json def get_ability_icons(champion, input_path): """ This function takes a champion and input path strings as input and returns a dictionary of png file paths with keys corresponding to the following abilities: Passive, Q, W, E, and R """ global ability_icon_paths ability_icon_paths = dict() # Rek'Sai appears to be the exception in naming conventions if champion == 'Reksai': champion = 'RekSai' # Read champ-specific json with open(f"{input_path}{champion}.json") as f: data = json.load(f) P_png = data['data'][champion]['passive']['image']['full'] Q_png = data['data'][champion]['spells'][0]['image']['full'] W_png = data['data'][champion]['spells'][1]['image']['full'] E_png = data['data'][champion]['spells'][2]['image']['full'] R_png = data['data'][champion]['spells'][3]['image']['full'] ability_icon_paths['Passive'] = f"data/dragontail-11.1.1/11.1.1/img/passive/{P_png}" ability_icon_paths['Q'] = f"data/dragontail-11.1.1/11.1.1/img/spell/{Q_png}" ability_icon_paths['W'] = f"data/dragontail-11.1.1/11.1.1/img/spell/{W_png}" ability_icon_paths['E'] = f"data/dragontail-11.1.1/11.1.1/img/spell/{E_png}" ability_icon_paths['R'] = f"data/dragontail-11.1.1/11.1.1/img/spell/{R_png}" return ability_icon_paths
e33c01bedcd8bf20959978df2bc2b33b934e2181
704,425
def _l1_regularization(l1, model): """Computes the L1 regularization for the given model Args: l1 (float): L1 parameter model (:obj:`torch.nn.Module`): Model to use Returns: float: L1 loss (i.e. l1 * l1_norm(params)) """ l1_loss = sum(param.norm(1) for param in model.parameters()) return l1 * l1_loss
32826672a7de00f8a0412e2496e6ebfea213b502
704,427