content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import requests def query_yelp_lookup(biz_id): """ Lookup resturant using id """ headers = {'Authorization': ('Bearer ' 'w5JFtwCUKq05GlSpm8cKo51dBYDQ6r9tyzo-qRsKt4wDyB5' '_ro6gW5gnG9hS6bvnNHNxOQLHfw7o_9S1e86nkvgcU7DQI_' 'sM6GVt9rqcq_rRYKtagQrexuH0zsU0WXYx')} url = 'https://api.yelp.com/v3/businesses/' + biz_id query = requests.get(url, headers=headers) return query.json()
ab2087d42833f0092229870ab3208a24bd041b95
706,344
def northing_and_easting(dictionary): """ Retrieve and return the northing and easting strings to be used as dictionary keys Parameters ---------- dictionary : dict Returns ------- northing, easting : tuple """ if not 'x' and 'y' in dictionary.keys(): northing = 'latitude' easting = 'longitude' else: northing = 'x' easting = 'y' return northing, easting
2f41d8b681d27f6ef29265c1945591ea18bba79f
706,345
import sys def decode_path(name): """ Attempt to decode path with correct encoding """ return name.decode(sys.getfilesystemencoding())
14da12b60c1f734e59ee5daec249c3658f3a23e4
706,346
import os import pickle def save_account(account): """ Function that serializes the account such that it can be saved. """ root_dir = "./accounts/"+account.name+"/" if not os.path.exists(root_dir): os.makedirs(root_dir) with open(root_dir+account.name, "wb+") as f: pickle.dump(account, f) return 0
08e0253764695dd71e767190c70dd32189839988
706,347
def totaled_no_review_url(cc, sql_time_specification): # pragma: no cover """Counts the number of commits with no review url in a given timeframe Args: cc(cursor) sql_time_specification(str): a sql command to limit the dates of the returned results Return: count(int): a count of all commits with no review_url results(list): a list of lists with all tbr'ed commits with no lgtm in the format [rietveld_url, git_timestamp, git_subject, git_hash] """ cc.execute("""SELECT git_commit.review_url, git_commit.timestamp, git_commit.subject, git_commit.hash FROM git_commit WHERE git_commit.review_url = '' AND %s""" % sql_time_specification) result = cc.fetchall() count = len(result) formatted_data = [] for data in result: subject = data[2] formatted_data.append([data[0], data[1].strftime("%Y-%m-%d %H:%M:%S"), subject.replace('-', ' '), data[3]]) results = sorted(formatted_data, key=lambda x: x[1], reverse=True) return count, results
027f49b13316ecb36eed3e7dde880848b261e3b4
706,348
def sort(obs, pred): """ Return sorted obs and pred time series' """ obs = obs.sort_values(ascending=True) pred = pred.sort_values(ascending=True) return obs,pred
11c44c1fd605611a2722321b3c3d58a822b9c643
706,349
import random def random_point_of_triangle(vertices): """Compute a random point of the triangle with given vertices""" p, q, r = vertices pq = q-p pr = r-p while True: x = random.random() y = random.random() if x + y <= 1: return p + pq*x + pr*y
ba3bf9183ddae4a16561a06b6f2455ce0ede6c8f
706,350
import time def get_minutes(hour:str) -> int: """ Get total number of minutes from time in %H:%M . Args: hour (str): String containing time in 24 hour %H:%M format Returns: int: Returns total number of minutes """ t = time.strptime(hour, '%H:%M') minutes = t[3] * 60 + t[4] return minutes
069835bdb6b0919d6206e0379a1933986ad2d5bd
706,351
def snr2Ivar(flux, snr): """ Estimate the inverse variance given flux and S/N. Parameters ---------- flux : scalar or array of float Flux of the obejct. snr : scalar or array of float Signal to noise ratio """ return 1.0 / ((flux / snr) ** 2.0)
91c76cd942a8f37f57a227ccb35cf4968a16193b
706,353
import csv def readCSV(name,shape = [None], delimiter = ","): """ Lectura de archivo csv name Devuelve matriz con los datos y cabecera """ data = [] with open(name, 'r') as f: reader = csv.reader(f,delimiter = delimiter) for row in reader: data.append(row[slice(*shape)]) return data
789341daf51b2f1e92086a42698ea0fef1130505
706,354
import os import time def wait_for_save(filename, timeout=5): """Waits for FILENAME to update, waiting up to TIMEOUT seconds. Returns True if a save was detected, and False otherwise. """ modification_time = os.path.getmtime(filename) start_time = time.time() while time.time() < start_time + timeout: if (os.path.getmtime(filename) > modification_time and os.path.getsize(filename) > 0): return True time.sleep(0.2) return False
fa65a638188d32dba9904bb19e2327f5b0390996
706,355
def preprocess_dataframe(data): """Helper method to preprocess the dataframe. Creates new columns for year,month,recalls and percentage change. Limits the date range for the experiment (these data are trustworthy).""" data['recalls'] = data['doc_count'] + 1 data.drop(columns=['product', 'Unnamed: 0', 'key', 'key_as_string', 'doc_count'], inplace=True) data = data.resample("M").sum() mask = (data.index > '2007-05-31') & (data.index < '2019-09-30') data = data.loc[mask] data['pct'] = data['recalls'].pct_change() return data
f6670cac1319108c88ee9ee409ce0ecdd1eca746
706,356
def is_solution(x:int, y:int) -> bool: """Returns try if (x, y) is a solution.""" # x and y are the values in a sequence of 15 terms of the following form: # xxxxyxxxxxyxxxx # x must be a positive integer if x <= 0: return False # y must be a negative integer if y >= 0: return False # a run of 6 consecutive terms must be positive if 5 * x + y <= 0: return False # a run of 11 consecutive terms must be negative if 9 * x + 2 * y >= 0: return False # x must be <= 16 or y must be >= 16 return x <= 16 or y >= -16
5e620fc390f6a79fd25d00c8c8b51d0af788d48c
706,357
def normalize(a, seqlength=None, rv=None): """ Normalize the VSA vector :param a: input VSA vector :param seqlength: Optional, for BSC vectors must be set to a valid. :param rv: Optional random vector, used for splitting ties on binary and ternary VSA vectors. :return: new VSA vector """ return a.normalize(a, seqlength, rv)
ef8ec307add55a56be5991bb13579bc989726d3c
706,358
def default_context(plugin, context): """ Return the default context for plugins rendered with a template, which simply is a single variable named ``plugin`` containing the plugin instance. """ return {"plugin": plugin}
5f7a88c02b6c11a150197e50a5be1847cba422b0
706,359
import torch def make_positions(tensor, padding_idx, left_pad): """Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored, but it is necessary to specify whether padding is added on the left side (left_pad=True) or right side (left_pad=False). """ max_pos = padding_idx + 1 + tensor.size(1) device = tensor.get_device() buf_name = f'range_buf_{device}' if not hasattr(make_positions, buf_name): setattr(make_positions, buf_name, tensor.new()) setattr(make_positions, buf_name, getattr(make_positions, buf_name).type_as(tensor)) if getattr(make_positions, buf_name).numel() < max_pos: torch.arange(padding_idx + 1, max_pos, out=getattr(make_positions, buf_name)) mask = tensor.ne(padding_idx) positions = getattr(make_positions, buf_name)[:tensor.size(1)].expand_as(tensor) if left_pad: positions = positions - mask.size(1) + mask.long().sum(dim=1).unsqueeze(1) new_tensor = tensor.clone() return new_tensor.masked_scatter_(mask, positions[mask]).long()
8e65c68daae2e40710c777d6e74f048b8b0ad547
706,360
def adjust_labels(data_y, dataset, pred_type='actions'): """ Transforms original labels into the range [0, nb_labels-1] :param data_y: numpy integer array Sensor labels :param pred_type: string, ['gestures', 'locomotion', 'actions', 'tasks'] Type of activities to be recognized :return: numpy integer array Modified sensor labels """ data_y[data_y == "null_class"] = 0 if dataset == 'wetlab': if pred_type == 'tasks': # Labels for tasks are adjusted data_y[data_y == "1solvent"] = 1 data_y[data_y == "2catalysator"] = 2 data_y[data_y == "3cutting"] = 3 data_y[data_y == "4mixing"] = 4 data_y[data_y == "5catalysator"] = 5 data_y[data_y == "6waterbath"] = 6 data_y[data_y == "7solvent"] = 7 data_y[data_y == "8catalysator"] = 8 data_y[data_y == "9cutting"] = 9 data_y[data_y == "10mixing"] = 10 data_y[data_y == "11catalysator"] = 11 data_y[data_y == "12waterbath"] = 12 data_y[data_y == "13waterbath"] = 13 data_y[data_y == "14catalysator"] = 14 data_y[data_y == "15pestling"] = 15 data_y[data_y == "16filtrate"] = 16 data_y[data_y == "17catalysator"] = 17 data_y[data_y == "18pouring"] = 18 data_y[data_y == "19detect"] = 19 data_y[data_y == "20waterbath"] = 20 data_y[data_y == "21catalysator"] = 21 data_y[data_y == "22pestling"] = 22 data_y[data_y == "23filtrate"] = 23 data_y[data_y == "24catalysator"] = 24 data_y[data_y == "25pouring"] = 25 data_y[data_y == "26detect"] = 26 data_y[data_y == "27end"] = 27 elif pred_type == 'actions': # Labels for actions are adjusted data_y[data_y == "cutting"] = 1 data_y[data_y == "inverting"] = 2 data_y[data_y == "peeling"] = 3 data_y[data_y == "pestling"] = 4 data_y[data_y == "pipetting"] = 5 data_y[data_y == "pouring"] = 6 data_y[data_y == "pour catalysator"] = 6 data_y[data_y == "stirring"] = 7 data_y[data_y == "transfer"] = 8 elif dataset == 'sbhar': data_y[data_y == 'walking'] = 1 data_y[data_y == 'walking_upstairs'] = 2 data_y[data_y == 'walking_downstairs'] = 3 data_y[data_y == 'sitting'] = 4 data_y[data_y == 'standing'] = 5 data_y[data_y == 'lying'] = 6 data_y[data_y == 'stand-to-sit'] = 7 data_y[data_y == 'sit-to-stand'] = 8 data_y[data_y == 'sit-to-lie'] = 9 data_y[data_y == 'lie-to-sit'] = 10 data_y[data_y == 'stand-to-lie'] = 11 data_y[data_y == 'lie-to-stand'] = 12 elif dataset == 'rwhar' or dataset == 'rwhar_3sbjs': data_y[data_y == 'climbing_down'] = 0 data_y[data_y == 'climbing_up'] = 1 data_y[data_y == 'jumping'] = 2 data_y[data_y == 'lying'] = 3 data_y[data_y == 'running'] = 4 data_y[data_y == 'sitting'] = 5 data_y[data_y == 'standing'] = 6 data_y[data_y == 'walking'] = 7 elif dataset == 'hhar': data_y[data_y == 'bike'] = 1 data_y[data_y == 'sit'] = 2 data_y[data_y == 'stand'] = 3 data_y[data_y == 'walk'] = 4 data_y[data_y == 'stairsup'] = 5 data_y[data_y == 'stairsdown'] = 6 elif dataset == 'opportunity' or 'opportunity_ordonez': if pred_type == 'locomotion': data_y[data_y == "stand"] = 1 data_y[data_y == "walk"] = 2 data_y[data_y == "sit"] = 3 data_y[data_y == "lie"] = 4 elif pred_type == 'gestures': data_y[data_y == 'open_door_1'] = 1 data_y[data_y == 'open_door_2'] = 2 data_y[data_y == 'close_door_1'] = 3 data_y[data_y == 'close_door_2'] = 4 data_y[data_y == 'open_fridge'] = 5 data_y[data_y == 'close_fridge'] = 6 data_y[data_y == 'open_dishwasher'] = 7 data_y[data_y == 'close_dishwasher'] = 8 data_y[data_y == 'open_drawer_1'] = 9 data_y[data_y == 'close_drawer_1'] = 10 data_y[data_y == 'open_drawer_2'] = 11 data_y[data_y == 'close_drawer_2'] = 12 data_y[data_y == 'open_drawer_3'] = 13 data_y[data_y == 'close_drawer_3'] = 14 data_y[data_y == 'clean_table'] = 15 data_y[data_y == 'drink_from_cup'] = 16 data_y[data_y == 'toggle_switch'] = 17 return data_y
1d201a20a8865cd505c0ee6b5385622a0ae28817
706,361
def is_str_or_bytes(x): """ True if x is str or bytes. This doesn't use rpartial to avoid infinite recursion. """ return isinstance(x, (str, bytes, bytearray))
ff4bf19177ffe62f24713e077824e48ec45f8587
706,362
def _type_convert(new_type, obj): """ Convert type of `obj` to `force`. """ return new_type(obj)
fc47c100508d41caa7ffc786746b58e3d6f684e2
706,363
def _process_labels(labels, label_smoothing): """Pre-process a binary label tensor, maybe applying smoothing. Parameters ---------- labels : tensor-like Tensor of 0's and 1's. label_smoothing : float or None Float in [0, 1]. When 0, no smoothing occurs. When positive, the binary ground truth labels `y_true` are squeezed toward 0.5, with larger values of `label_smoothing` leading to label values closer to 0.5. Returns ------- torch.Tensor The processed labels. """ assert label_smoothing is not None labels = (1 - label_smoothing) * labels + label_smoothing * 0.5 return labels
5a71ded8ac9d3ef4b389542814a170f35ef18fdd
706,364
import sys import traceback def format_exc(limit=None): """Like print_exc() but return a string. Backport for Python 2.3.""" try: etype, value, tb = sys.exc_info() return ''.join(traceback.format_exception(etype, value, tb, limit)) finally: etype = value = tb = None
29bdbfbff4a1ce2d399a95c3a4685467a4022eaf
706,366
def _escape_value(value): """Escape a value.""" value = value.replace(b"\\", b"\\\\") value = value.replace(b"\n", b"\\n") value = value.replace(b"\t", b"\\t") value = value.replace(b'"', b'\\"') return value
b58a3236c0686c7fb6a33859986123dc2b8089cc
706,367
def getLinkToSong(res): """ getLinkToSong(res): link to all songs :param: res: information about the playlist -> getResponse(pl_id) :returns: list of links to each song """ return res['items'][0]['track']['external_urls']['spotify']
e59fe598ed900a90dcf5376d265eedfc51d8e0a7
706,368
def parse_playing_now_message(playback): """parse_playing_now_message :param playback: object :returns str """ track = playback.get("item", {}).get("name", False) artist = playback.get("item", {}).get("artists", []) artist = map(lambda a: a.get("name", ""), artist) artist = ", ".join(list(artist)) message = "Playing '%s' from '%s' now!" % (track, artist) if not track: message = "Could not get current track!" return message
88d7c35257c2aaee44d1bdc1ec06640603c6a286
706,369
def _create_preactivation_hook(activations): """ when we add this hook to a model's layer, it is called whenever it is about to make the forward pass """ def _linear_preactivation_hook(module, inputs): activations.append(inputs[0].cpu()) return _linear_preactivation_hook
7f4cc10f7e051ed8e30556ee054a65c4878f6c0f
706,371
import importlib def import_by_path(path): """ Given a dotted/colon path, like project.module:ClassName.callable, returns the object at the end of the path. """ module_path, object_path = path.split(":", 1) target = importlib.import_module(module_path) for bit in object_path.split("."): target = getattr(target, bit) return target
939b3426f36b3a188f7a48e21551807d42cfa254
706,372
def _exceeded_threshold(number_of_retries: int, maximum_retries: int) -> bool: """Return True if the number of retries has been exceeded. Args: number_of_retries: The number of retry attempts made already. maximum_retries: The maximum number of retry attempts to make. Returns: True if the maximum number of retry attempts have already been made. """ if maximum_retries is None: # Retry forever. return False return number_of_retries >= maximum_retries
c434e1e752856f9160d40e25ac20dde0583e50a6
706,373
def moveb_m_human(agents, self_state, self_name, c, goal): """ This method implements the following block-stacking algorithm: If there's a block that can be moved to its final position, then do so and call move_blocks recursively. Otherwise, if there's a block that needs to be moved and can be moved to the table, then do so and call move_blocks recursively. Otherwise, no blocks need to be moved. """ if self_name in self_state.isReachableBy[c] and c in goal.isOnStack and goal.isOnStack[c] and not self_state.isOnStack[c]: return [("human_pick", c), ("human_stack",)] return []
f99fd14b2091a1e8d0426dcef57ce33b96fc1352
706,374
def is_ipv4(line): """检查是否是IPv4""" if line.find("ipv4") < 6: return False return True
bd602f5a9ac74d2bd115fe85c90490556932e068
706,375
import torch def bert_text_preparation(text, tokenizer): """Preparing the input for BERT Takes a string argument and performs pre-processing like adding special tokens, tokenization, tokens to ids, and tokens to segment ids. All tokens are mapped to seg- ment id = 1. Args: text (str): Text to be converted tokenizer (obj): Tokenizer object to convert text into BERT-re- adable tokens and ids Returns: list: List of BERT-readable tokens obj: Torch tensor with token ids obj: Torch tensor segment ids """ marked_text = "[CLS] " + text + " [SEP]" tokenized_text = tokenizer.tokenize(marked_text) indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text) segments_ids = [1]*len(indexed_tokens) # Convert inputs to PyTorch tensors tokens_tensor = torch.tensor([indexed_tokens]) segments_tensors = torch.tensor([segments_ids]) return tokenized_text, tokens_tensor, segments_tensors
f9b3de4062fd0cc554e51bd02c750daea0a8250c
706,376
def possibly_equal(first, second): """Equality comparison that propagates uncertainty. It represents uncertainty using its own function object.""" if first is possibly_equal or second is possibly_equal: return possibly_equal #Propagate the possibilities return first == second
12662df45d6ee0c6e1aadb6a5c4c0ced9352af35
706,377
def chroms_from_build(build): """ Get list of chromosomes from a particular genome build Args: build str Returns: chrom_list list """ chroms = {'grch37': [str(i) for i in range(1, 23)], 'hg19': ['chr{}'.format(i) for i in range(1, 23)] # chroms = {'grch37': [i for i in range(1, 23)] + ['X', 'Y'], } try: return chroms[build] except KeyError: raise ValueError("Oops, I don't recognize the build {}".format(build))
c87431911c07c00aaa63357771258394cfff859e
706,378
def LU_razcep(A): """ Vrne razcep A kot ``[L\\U]`` """ # eliminacija for p, pivot_vrsta in enumerate(A[:-1]): for i, vrsta in enumerate(A[p + 1:]): if pivot_vrsta[p]: m = vrsta[p] / pivot_vrsta[p] vrsta[p:] = vrsta[p:] - pivot_vrsta[p:] * m vrsta[p] = m return A
79d6a00b4e16254739b987228fd506cae133907b
706,379
from typing import Union from datetime import datetime import pytz def api_timestamp_to_datetime(api_dt: Union[str, dict]): """Convertes the datetime string returned by the API to python datetime object""" """ Somehow this string is formatted with 7 digits for 'microsecond' resolution, so crop the last digit (and trailing Z) The cropped string will be written into api_dt_str_mod """ api_dt_str_mod = None if isinstance(api_dt, str): api_dt_str_mod = api_dt[:-2] elif isinstance(api_dt, dict): api_dt_str_mod = api_dt["dateTime"][:-2] else: raise dt = datetime.strptime(api_dt_str_mod, "%Y-%m-%dT%H:%M:%S.%f") dt = pytz.utc.localize(dt) return dt
26f4828a19d17c883a8658eb594853158d70fbcf
706,381
import os import subprocess def run_command(command, filename=None, repeat=1, silent=False): """ Run `command` with `filename` positional argument in the directory of the `filename`. If `filename` is not given, run only the command. """ if filename is not None: fdir = os.path.dirname(os.path.abspath(filename)) fname = os.path.basename(filename) cmd = command + ' ' + fname else: fdir = None cmd = command status = 0 for ii in range(repeat): if silent: with open(os.devnull, 'w') as devnull: st = subprocess.call(cmd.split(), cwd=fdir, stdout=devnull, stderr=devnull) else: st = subprocess.call(cmd.split(), cwd=fdir) status = status or st return status
0f24c79f9d557198645de75fe7160af41638ecb6
706,382
import os def attempt_input_load(input_path): """Attempts to load the file at the provided path and return it as an array of lines. If the file does not exist we will exit the program since nothing useful can be done.""" if not os.path.isfile(input_path): print("Input file does not exist: %s" % input_path) exit() print("Loading input from file: %s" % input_path) with open(input_path, "r", encoding='utf-8') as f: lines = f.readlines() return lines
f75e95258c803175d1f13d82c6479987cfdecbf9
706,383
def __discount_PF(i, n): """ Present worth factor Factor: (P/F, i, N) Formula: P = F(1+i)^N :param i: :param n: :return: Cash Flow: F | | -------------- | P """ return (1 + i) ** (-n)
b6e7424647921b945a524a22d844925573b6490a
706,384
def shortstr(s,max_len=144,replace={'\n':';'}): """ Obtain a shorter string """ s = str(s) for k,v in replace.items(): s = s.replace(k,v) if max_len>0 and len(s) > max_len: s = s[:max_len-4]+' ...' return s
396794506583dcf39e74941a20f27ac63de325ec
706,385
def FTCS(Uo, diffX, diffY=None): """Return the numerical solution of dependent variable in the model eq. This routine uses the explicit Forward Time/Central Space method to obtain the solution of the 1D or 2D diffusion equation. Call signature: FTCS(Uo, diffX, diffY) Parameters ---------- Uo: ndarray[float], =1d, 2d The dependent variable at time level, n within the entire domain. diffX : float Diffusion number for x-component of the parabolic/diffusion equation. diffY : float, Default=None for 1-D applications Diffusion number for y-component of the parabolic/diffusion equation. Returns ------- U: ndarray[float], =1d, 2d The dependent variable at time level, n+1 within the entire domain. """ shapeU = Uo.shape # Obtain Dimension U = Uo.copy() # Initialize U if len(shapeU) == 1: U[1:-1] = ( Uo[1:-1] + diffX*(Uo[2:] - 2.0*Uo[1:-1] + Uo[0:-2]) ) elif len(shapeU) == 2: U[1:-1, 1:-1] = ( Uo[1:-1, 1:-1] + diffX*(Uo[2:, 1:-1] - 2.0*Uo[1:-1, 1:-1] + Uo[0:-2, 1:-1]) + diffY*(Uo[1:-1, 2:] - 2.0*Uo[1:-1, 1:-1] + Uo[1:-1, 0:-2]) ) return U
4b02749f3f50a2cff74abb75146159289d42b99e
706,387
def maplist(f, xs): """Implement `maplist` in pure Python.""" return list(map(f, xs))
894a58f9e2cd66fe9c327ea65433b8210051ed60
706,388
def write_code(): """ Code that checks the existing path and snaviewpath in the environmental viriables/PATH """ msg = """\n\n[Code]\n""" msg += """function InstallVC90CRT(): Boolean;\n""" msg += """begin\n""" msg += """ Result := not DirExists('C:\WINDOWS\WinSxS\\x86_Microsoft.VC90.""" msg += """CRT_1fc8b3b9a1e18e3b_9.0.21022.8_x-ww_d08d0375');\n""" msg += """end;\n\n""" msg += """function NeedsAddPath(): boolean;\n""" msg += """var\n""" msg += """ oldpath: string;\n""" msg += """ newpath: string;\n""" msg += """ pathArr: TArrayOfString;\n""" msg += """ i: Integer;\n""" msg += """begin\n""" msg += """ RegQueryStringValue(HKEY_CURRENT_USER,'Environment',""" msg += """'PATH', oldpath)\n""" msg += """ oldpath := oldpath + ';';\n""" msg += """ newpath := '%SASVIEWPATH%';\n""" msg += """ i := 0;\n""" msg += """ while (Pos(';', oldpath) > 0) do begin\n""" msg += """ SetArrayLength(pathArr, i+1);\n""" msg += """ pathArr[i] := Copy(oldpath, 0, Pos(';', oldpath)-1);\n""" msg += """ oldpath := Copy(oldpath, Pos(';', oldpath)+1,""" msg += """ Length(oldpath));\n""" msg += """ i := i + 1;\n""" msg += """ // Check if current directory matches app dir\n""" msg += """ if newpath = pathArr[i-1] \n""" msg += """ then begin\n""" msg += """ Result := False;\n""" msg += """ exit;\n""" msg += """ end;\n""" msg += """ end;\n""" msg += """ Result := True;\n""" msg += """end;\n""" msg += """\n""" return msg
429eb64485a4fe240c1bebbfd2a2a89613b4fddd
706,389
import re def get_filenames(filename): """ Return list of unique file references within a passed file. """ try: with open(filename, 'r', encoding='utf8') as file: words = re.split("[\n\\, \-!?;'//]", file.read()) #files = filter(str.endswith(('csv', 'zip')), words) files = set(filter(lambda s: s.endswith(('.csv', '.zip', '.pdf', '.txt', '.tsv', '.cfg', '.ini')), words)) return list(files) except Exception as e: print(e) return []
a1d8c396245cfc682ecc37edb3e673f87939b6fa
706,390
def format_filename_gen(prefix, seq_len, tgt_len, bi_data, suffix, src_lang,tgt_lang,uncased=False,): """docs.""" if not uncased: uncased_str = "" else: uncased_str = "uncased." if bi_data: bi_data_str = "bi" else: bi_data_str = "uni" file_name = "{}-{}_{}.seqlen-{}.tgtlen-{}.{}{}.gen.{}".format( src_lang[:2],tgt_lang[:2], prefix, seq_len, tgt_len, uncased_str, bi_data_str, suffix) return file_name
4a54c1fbfe371d628c1d7019c131b8fa6755f900
706,391
def token_groups(self): """The groups the Token owner is a member of.""" return self.created_by.groups
9db411660db1def09b8dc52db800ca4c09a38cce
706,392
import requests def get_html_content_in_text(url): """ Grab all the content in webpage url and return it's content in text. Arguments: url -- a webpage url string. Returns: r.text -- the content of webpage in text. """ r = requests.get(url) return r.text
fd8ddc992f34c186051ca8985ffb110c50004970
706,393
import optparse def _GetOptionsParser(): """Get the options parser.""" parser = optparse.OptionParser(__doc__) parser.add_option('-i', '--input', dest='inputs', action='append', default=[], help='One or more input files to calculate dependencies ' 'for. The namespaces in this file will be combined with ' 'those given with the -n flag to form the set of ' 'namespaces to find dependencies for.') parser.add_option('-n', '--namespace', dest='namespaces', action='append', default=[], help='One or more namespaces to calculate dependencies ' 'for. These namespaces will be combined with those given ' 'with the -i flag to form the set of namespaces to find ' 'dependencies for. A Closure namespace is a ' 'dot-delimited path expression declared with a call to ' 'goog.provide() (e.g. "goog.array" or "foo.bar").') parser.add_option('--root', dest='roots', action='append', default=[], help='The paths that should be traversed to build the ' 'dependencies.') parser.add_option('-o', '--output_mode', dest='output_mode', type='choice', action='store', choices=['list', 'script', 'compiled'], default='list', help='The type of output to generate from this script. ' 'Options are "list" for a list of filenames, "script" ' 'for a single script containing the contents of all the ' 'files, or "compiled" to produce compiled output with ' 'the Closure Compiler. Default is "list".') parser.add_option('-c', '--compiler_jar', dest='compiler_jar', action='store', help='The location of the Closure compiler .jar file.') parser.add_option('-f', '--compiler_flags', dest='compiler_flags', default=[], action='append', help='Additional flags to pass to the Closure compiler. ' 'To pass multiple flags, --compiler_flags has to be ' 'specified multiple times.') parser.add_option('--output_file', dest='output_file', action='store', help=('If specified, write output to this path instead of ' 'writing to standard output.')) return parser
e1ec0530357ad3bebbac80c86b9d9b1010e6688c
706,394
def update_security_schemes(spec, security, login_headers, security_schemes, unauthorized_schema): """Patch OpenAPI spec to include security schemas. Args: spec: OpenAPI spec dictionary Returns: Patched spec """ # login_headers = {'Set-Cookie': # {'schema': # {'type': 'string', # 'example': 'session=abcde12345; Path=/; HttpOnly'}}} # security_schemes = {'cookieAuth': {'description': 'Session Cookie', # 'type': 'apiKey', # 'in': 'cookie', # 'name': 'session'}} # unauthorized_schema = {'UnauthorizedError': # {'description': "The auth cookie isn't present", # 'properties': # {'schema': {'type': 'string', 'example': 'Unauthorized'}}}} spec["components"]["securitySchemes"] = security_schemes spec["security"] = security spec["paths"]["/login"]["post"]["responses"][200]["headers"] = login_headers.copy() return spec
1ecb5cc3a121fc151794e4e24cd4aca4bc07ce46
706,395
def inherits_from(obj, parent): """ Takes an object and tries to determine if it inherits at *any* distance from parent. Args: obj (any): Object to analyze. This may be either an instance or a class. parent (any): Can be either instance, class or python path to class. Returns: inherits_from (bool): If `parent` is a parent to `obj` or not. Notes: What differs this function from e.g. `isinstance()` is that `obj` may be both an instance and a class, and parent may be an instance, a class, or the python path to a class (counting from the evennia root directory). """ if callable(obj): # this is a class obj_paths = ["%s.%s" % (mod.__module__, mod.__name__) for mod in obj.mro()] else: obj_paths = ["%s.%s" % (mod.__module__, mod.__name__) for mod in obj.__class__.mro()] if isinstance(parent, str): # a given string path, for direct matching parent_path = parent elif callable(parent): # this is a class parent_path = "%s.%s" % (parent.__module__, parent.__name__) else: parent_path = "%s.%s" % (parent.__class__.__module__, parent.__class__.__name__) return any(1 for obj_path in obj_paths if obj_path == parent_path)
9d7e0665b4e4fe2a3f7c136436a2502c8b72527c
706,396
def name(model): """A repeatable way to get the formatted model name.""" return model.__name__.replace('_', '').lower()
3d9ca275bfbfff6d734f49a47459761c559d906e
706,397
import imp import os def pseudo_import( pkg_name ): """ return a new module that contains the variables of pkg_name.__init__ """ init = os.path.join( pkg_name, '__init__.py' ) # remove imports and 'from foo import' lines = open(init, 'r').readlines() lines = filter( lambda l: l.startswith('__'), lines) code = '\n'.join(lines) module = imp.new_module(pkg_name) exec(code, module.__dict__) return module
5982282d545c361d5198459073498ce5cba740a8
706,398
import socket def get_hostname(ipv) -> str: """ Get hostname from IPv4 and IPv6. :param ipv: ip address :return: hostname """ return socket.gethostbyaddr(ipv)[0]
e7d660dc3c5e30def646e56fa628099e997145be
706,399
import os def find_invalid_filenames(filenames, repository_root): """Find files that does not exist, are not in the repo or are directories. Args: filenames: list of filenames to check repository_root: the absolute path of the repository's root. Returns: A list of errors. """ errors = [] for filename in filenames: if not os.path.abspath(filename).startswith(repository_root): errors.append((filename, 'Error: File %s does not belong to ' 'repository %s' % (filename, repository_root))) if not os.path.exists(filename): errors.append((filename, 'Error: File %s does not exist' % (filename,))) if os.path.isdir(filename): errors.append((filename, 'Error: %s is a directory. Directories are' ' not yet supported' % (filename,))) return errors
c207442e08fa7a2188ab8e792ee76d596b4f19f0
706,400
def get_score(true, predicted): """Returns F1 per instance""" numerator = len(set(predicted.tolist()).intersection(set(true.tolist()))) p = numerator / float(len(predicted)) r = numerator / float(len(true)) if r == 0.: return 0. return 2 * p * r / float(p + r)
115a4847e3d991f47415554401df25d72d74bb2f
706,401
import torch def proto_factor_cosine(local_proto, global_proto): """ [C, D]: D is 64 or 4 """ # factor = 1 norm_local = torch.norm(local_proto, dim=-1, keepdim=False) norm_global = torch.norm(global_proto, dim=-1, keepdim=False) # [C] factor_refined = torch.sum(local_proto*global_proto, dim=-1, keepdim=False)/(norm_local*norm_global+1e-6) return factor_refined
6e9f7540ec1339efe3961b103633f5175cb38c49
706,402
def esc_quotes(strng): """ Return the input string with single and double quotes escaped out. """ return strng.replace('"', '\\"').replace("'", "\\'")
25956257e06901d4f59088dd2c17ddd5ea620407
706,403
def jaccard(set1, set2): """ computes the jaccard coefficient between two sets @param set1: first set @param set2: second set @return: the jaccard coefficient """ if len(set1) == 0 or len(set2) == 0: return 0 inter = len(set1.intersection(set2)) return inter / (len(set1) + len(set2) - inter)
9a99c6c5251bdb7cb10f6d3088ac6ac52bb02a55
706,404
def to_numeric(arg): """ Converts a string either to int or to float. This is important, because e.g. {"!==": [{"+": "0"}, 0.0]} """ if isinstance(arg, str): if '.' in arg: return float(arg) else: return int(arg) return arg
e82746e1c5c84b57e59086030ff7b1e93c89a8ec
706,405
def read_tracker(file_name): """ """ with open(file_name, "r") as f: return int(f.readline())
9d1f43b8f833b5ca86c247760ae79e18f33aa019
706,406
def purelin(n): """ Linear """ return n
493c4ae481702194fe32eec44e589e5d15614b99
706,407
def point_in_fence(x, y, points): """ 计算点是否在围栏内 :param x: 经度 :param y: 纬度 :param points: 格式[[lon1,lat1],[lon2,lat2]……] :return: """ count = 0 x1, y1 = points[0] x1_part = (y1 > y) or ((x1 - x > 0) and (y1 == y)) # x1在哪一部分中 points.append((x1, y1)) for point in points[1:]: x2, y2 = point x2_part = (y2 > y) or ((x2 > x) and (y2 == y)) # x2在哪一部分中 if x2_part == x1_part: x1, y1 = x2, y2 continue mul = (x1 - x) * (y2 - y) - (x2 - x) * (y1 - y) if mul > 0: # 叉积大于0 逆时针 count += 1 elif mul < 0: count -= 1 x1, y1 = x2, y2 x1_part = x2_part if count == 2 or count == -2: return True else: return False
bb25f399eadf818fbafdeee6c8adbb1254a579f7
706,408
def validate_output(value): """Validate "output" parameter.""" if value is not None: if isinstance(value, str): value = value.split(",") # filter out empty names value = list(filter(None, value)) return value
f00773674868ebde741f64b47fdc3372ad6a1e7d
706,409
def euler(derivative): """ Euler method """ return lambda t, x, dt: (t + dt, x + derivative(t, x) * dt)
08d636ec711f4307ab32f9a8bc3672197a3699d9
706,410
import os def GetFilesToConcatenate(input_directory): """Get list of files to concatenate. Args: input_directory: Directory to search for files. Returns: A list of all files that we would like to concatenate relative to the input directory. """ file_list = [] for dirpath, _, files in os.walk(input_directory): for input_file in files: file_list.append( os.path.relpath( os.path.join(dirpath, input_file), input_directory)) return file_list
f8e2805b94171645ef9c4d51ded83a8f2f9e7675
706,411
import numpy import math def rotation_matrix_from_quaternion(quaternion): """Return homogeneous rotation matrix from quaternion.""" q = numpy.array(quaternion, dtype=numpy.float64)[0:4] nq = numpy.dot(q, q) if nq == 0.0: return numpy.identity(4, dtype=numpy.float64) q *= math.sqrt(2.0 / nq) q = numpy.outer(q, q) return numpy.array(( (1.0-q[1,1]-q[2,2], q[0,1]-q[2,3], q[0,2]+q[1,3], 0.0), ( q[0,1]+q[2,3], 1.0-q[0,0]-q[2,2], q[1,2]-q[0,3], 0.0), ( q[0,2]-q[1,3], q[1,2]+q[0,3], 1.0-q[0,0]-q[1,1], 0.0), ( 0.0, 0.0, 0.0, 1.0) ), dtype=numpy.float64)
51b169ffa702e3798f7a6138271b415b369566ba
706,412
def format_channel(channel): """ Returns string representation of <channel>. """ if channel is None or channel == '': return None elif type(channel) == int: return 'ch{:d}'.format(channel) elif type(channel) != str: raise ValueError('Channel must be specified in string format.') elif 'ch' in channel: return channel elif channel.lower() in 'rgb': return format_channel('rgb'.index(channel.lower())) elif channel.lower() in ('red', 'green', 'blue'): return format_channel('rgb'.index(channel.lower()[0])) else: raise ValueError('Channel string not recognized.')
4eeb42899762d334599df831b7520a956998155a
706,413
import random def shuffle_sequence(sequence: str) -> str: """Shuffle the given sequence. Randomly shuffle a sequence, maintaining the same composition. Args: sequence: input sequence to shuffle Returns: tmp_seq: shuffled sequence """ tmp_seq: str = "" while len(sequence) > 0: max_num = len(sequence) rand_num = random.randrange(max_num) tmp_char = sequence[rand_num] tmp_seq += tmp_char tmp_str_1 = sequence[:rand_num] tmp_str_2 = sequence[rand_num + 1:] sequence = tmp_str_1 + tmp_str_2 return tmp_seq
9e833aed9e5a17aeb419a77176713e76566d2d06
706,414
import os def _get_next_foldername_index(name_to_check,dir_path): """Finds folders with name_to_check in them in dir_path and extracts which one has the hgihest index. Parameters ---------- name_to_check : str The name of the network folder that we want to look repetitions for. dir_path : str The folder where we want to look for network model repetitions. Returns ------- str If there are no name matches, it returns the string '1'. Otherwise, it returns str(highest index found + 1) """ dir_content = os.listdir(dir_path) dir_name_indexes = [int(item.split('.')[-1]) for item in dir_content if os.path.isdir(item) and name_to_check in item] #extracting the counter in the folder name and then we find the maximum if len(dir_name_indexes) == 0: return '1' else: highest_idx = max(dir_name_indexes) return str(highest_idx + 1) #find all folders that have name_to_check in them:
c8239f8ae8a34c2ae8432e7a9482f169ce0962ce
706,415
import torch def _get_activation_fn(activation): """Return an activation function given a string""" if activation == "relu": return torch.nn.functional.relu if activation == "gelu": return torch.nn.functional.gelu if activation == "glu": return torch.nn.functional.glu raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
ecc690e9b9ec6148b6ea8df4bd08ff2d0c1c322e
706,416
def make_05dd(): """移動ロック終了(イベント終了)""" return ""
bb85e01a4a4515ac88688690cacd67e7c9351034
706,417
import pandas as pd import pkgutil def check_is_pandas_dataframe(log): """ Checks if a log object is a dataframe Parameters ------------- log Log object Returns ------------- boolean Is dataframe? """ if pkgutil.find_loader("pandas"): return type(log) is pd.DataFrame return False
93fa02445302cf695fd86beb3e4836d58660e376
706,418
from pathlib import Path def get_root_path(): """ this is to get the root path of the code :return: path """ path = str(Path(__file__).parent.parent) return path
bc01b4fb15569098286fc24379a258300ff2dfa0
706,419
import os def _bin_file(script): """Return the absolute path to scipt in the bin directory""" return os.path.abspath(os.path.join(__file__, "../../../bin", script))
3b7dbe4061ff88bd42da91f854f561715df101df
706,420
def rotate_2d_list(squares_list): """ http://stackoverflow.com/questions/8421337/rotating-a-two-dimensional-array-in-python """ return [x for x in zip(*squares_list[::-1])]
a6345ce6954643b8968ffb4c978395baf777fca4
706,421
def icecreamParlor4(m, arr): """I forgot about Python's nested for loops - on the SAME array - and how that's actually -possible-, and it makes things so much simplier. It turns out, that this works, but only for small inputs.""" # Augment the array of data, so that it not only includes the item, but # also the item's index into the array. decorated_arr = [] for index, item in enumerate(arr): decorated_arr.append((item, index)) # Iterate each combination of index, and see if conditions are met with # them. There are 2 things we care about: both amounts equal to m, and # they both aren't the same item. for i in decorated_arr: for j in decorated_arr: if (i[0] + j[1]) == m and i[1] != j[1]: return [i[1] + 1, j[1] + 1]
6af2da037aa3e40c650ac48ebeb931399f1a6eaa
706,422
import os import sys def findBaseDir(basename, max_depth=5, verbose=False): """ Get relative path to a BASEDIR. :param basename: Name of the basedir to path to :type basename: str :return: Relative path to base directory. :rtype: StringIO """ MAX_DEPTH = max_depth BASEDIR = os.path.abspath(os.path.dirname(__file__)) print("STARTING AT: %s\n Looking for: %s" % (BASEDIR, basename)) for level in range(MAX_DEPTH): if verbose: print('LEVEL %d: %s\n Current basename: %s' % (level, BASEDIR, os.path.basename(BASEDIR))) if os.path.basename(BASEDIR) == basename: break else: BASEDIR = os.path.abspath(os.path.dirname(BASEDIR)) if level == MAX_DEPTH - 1: sys.exit('Could not find correct basedir\n Currently at %s' % BASEDIR) return os.path.relpath(BASEDIR)
75d11503ef02cb0671803ab7b13dcd6d77322330
706,424
import math def inverse_gamma(data, alpha=0.1, beta=0.1): """ Inverse gamma distributions :param data: Data value :param alpha: alpha value :param beta: beta value :return: Inverse gamma distributiion """ return (pow(beta, alpha) / math.gamma(alpha)) *\ pow(alpha, data-1) * math.exp(-beta/data)
c13f5e4a05e111ae0082b7e69ef5b31498d2c221
706,425
import statistics def linear_regression(xs, ys): """ Computes linear regression coefficients https://en.wikipedia.org/wiki/Simple_linear_regression Returns a and b coefficients of the function f(y) = a * x + b """ x_mean = statistics.mean(xs) y_mean = statistics.mean(ys) num, den = 0.0, 0.0 for x, y in zip(xs, ys): num += (x - x_mean) * (y - y_mean) den += (x - x_mean) * (x - x_mean) a = num / den b = y_mean - a * x_mean return a, b
6b6ecbd31262e5fe61f9cf7793d741a874327598
706,426
import torch def collate_fn_feat_padded(batch): """ Sort a data list by frame length (descending order) batch : list of tuple (feature, label). len(batch) = batch_size - feature : torch tensor of shape [1, 40, 80] ; variable size of frames - labels : torch tensor of shape (1) ex) samples = collate_fn([batch]) batch = [dataset[i] for i in batch_indices]. ex) [Dvector_train_dataset[i] for i in [0,1,2,3,4]] batch[0][0].shape = torch.Size([1,64,774]). "774" is the number of frames per utterance. """ batch.sort(key=lambda x: x[0].shape[2], reverse=True) feats, labels = zip(*batch) # Merge labels => torch.Size([batch_size,1]) labels = torch.stack(labels, 0) labels = labels.view(-1) # Merge frames lengths = [feat.shape[2] for feat in feats] # in decreasing order max_length = lengths[0] # features_mod.shape => torch.Size([batch_size, n_channel, dim, max(n_win)]) padded_features = torch.zeros(len(feats), feats[0].shape[0], feats[0].shape[1], feats[0].shape[2]).float() # convert to FloatTensor (it should be!). torch.Size([batch, 1, feat_dim, max(n_win)]) for i, feat in enumerate(feats): end = lengths[i] num_frames = feat.shape[2] while max_length > num_frames: feat = torch.cat((feat, feat[:,:,:end]), 2) num_frames = feat.shape[2] padded_features[i, :, :, :] = feat[:,:,:max_length] return padded_features, labels
b901b6c3eacfd4d0bac93e1569d59ad944365fd2
706,428
def getCubePixels(cubeImages): """ Returns a list containing the raw pixels from the `bpy.types.Image` images in the list `cubeImages`. Factoring this functionality out into its own function is useful for performance profiling. """ return [face.pixels[:] for face in cubeImages]
cdb2ba02ce9466e1b92a683dbea409e66b60c8da
706,430
import torch def shuffle_tensor(input): """ Returns a new tensor whose elements correspond to a randomly shuffled version of the the elements of the input. Args: input (`torch.Tensor`): input tensor. Returns: (`torch.Tensor`): output tensor. """ return input[torch.randperm(input.nelement())]
e7c3ff4180123de1fe6322296ba08863de9766a4
706,433
def make_word_list1(): """Reads lines from a file and builds a list using append.""" t = [] fin = open('words.txt') for line in fin: word = line.strip() t.append(word) return t
7af7b0697557e8bba891d73bd8217860350b810e
706,434
from datetime import datetime def filter_posts(posts: list, parsing_date: datetime) -> list: """Отфильтровывает лишние посты, которые не входят в месяц парсинга""" res = [] for post in posts: post_date = datetime.fromtimestamp(post['date']) if post_date.month == parsing_date.month: res.append(post) return res
381d5cb37e4ae3439c335a7962352431ad3ca17c
706,435
import codecs def open_file(path): """open_file.""" return codecs.open(path, encoding='utf8').read()
f7fd375ea76e8e7872e465e89eea5c02f3396115
706,436
def get_api_status(): """Get API status""" return "<h4>API Is Up</h4>"
5c88fc39bc5a970c4d223d8fe87c4fa3ad473b50
706,437
def create_alert_from_slack_message(payload, time): """ Create a new raw alert (json) from the new alert form in Slack """ alert_json = {} values = payload['view']['state']['values'] for value in values: for key in values[value]: if key == 'severity': alert_json[key] = \ values[value][key]['selected_option']['text']['text'] else: alert_json[key] = values[value][key]['value'] alert_json['datetime'] = time return alert_json
1ae8b93a6b9f8bd7532ac193cb6dfde58bf8d409
706,438
def get_cell_ids(num_celltypes=39): """get valid cell ids by removing cell types with missing data. Return: A cell id list. """ missing_ids = [8,23,25,30,32,33,34,35,38,39,17] return [item for item in list(range(1,num_celltypes+1)) if item not in missing_ids]
a7c8f881ad62af9c4287cd50b9b01118f724c4f8
706,439
import re def proper_units(text: str) -> str: """ Function for changing units to a better form. Args: text (str): text to check. Returns: str: reformatted text with better units. """ conv = { r"degK": r"K", r"degC": r"$^{\circ}$C", r"degrees\_celsius": r"$^{\circ}$C", r"degrees\_north": r"$^{\circ}$N", r"degrees\_east": r"$^{\circ}$E", r"degrees\_west": r"$^{\circ}$W", r"I metric": r"$\mathcal{I}$--metric", } regex = re.compile( "|".join( re.escape(key) for key in sorted(conv.keys(), key=lambda item: -len(item)) ) ) return regex.sub(lambda match: conv[match.group()], text)
5113d227db1a75ec8fa407c5f9edd5a897960d82
706,440
def does_algorithm_implementation_have_capabilities_to_execute_parameter(parameter_kisao_id, algorithm_specs): """ Determine if an implementation of an algorithm has the capabilities to execute a model langugae Args: parameter_kisao_id (:obj:`str`): KiSAO id for an algorithm parameter algorithm_specs (:obj:`dict` with schema ``https://api.biosimulators.org/openapi.json#/components/schemas/Algorithm``): specifications of the implementation of an algorithm Returns: :obj:`bool`: whether the implementation of the algorithm has the capabilities to execute the SED parameter """ for parameter_specs in algorithm_specs['parameters']: if parameter_specs['kisaoId']['id'] == parameter_kisao_id: return True return False
653712ae621bd014547e04009243cefe4c9eb8e1
706,441
def has_remove_arg(args): """ Checks if remove argument exists :param args: Argument list :return: True if remove argument is found, False otherwise """ if "remove" in args: return True return False
9b07fe70cecfbdf6e6e2274e5b3e715f903331c7
706,442
def convert_to_boolean(value): """Turn strings to bools if they look like them Truthy things should be True >>> for truthy in ['true', 'on', 'yes', '1']: ... assert convert_to_boolean(truthy) == True Falsey things should be False >>> for falsey in ['false', 'off', 'no', '0']: ... assert convert_to_boolean(falsey) == False Other things should be unchanged >>> for value in ['falsey', 'other', True, 0]: ... assert convert_to_boolean(value) == value """ if isinstance(value, str): if value.lower() in ['t', 'true', 'on', 'yes', '1']: return True elif value.lower() in ['f', 'false', 'off', 'no', '0']: return False return value
7cbf7a8fd601904c7aa8b685f6a3b3f5eaaa5c51
706,443
def getSampleBandPoints(image, region, **kwargs): """ Function to perform sampling of an image over a given region, using ee.Image.samp;e(image, region, **kwargs) Args: image (ee.Image): an image to sample region (ee.Geometry): the geometry over which to sample Returns: An ee.FeatureCollection of sampled points along with coordinates """ dargs = { 'numPixels': 1000, 'region': region } dargs.update(kwargs) sample = image.sample(**dargs) return sample
4cfbc3c180b805abe52c718f81cc16c409693922
706,444
def updateRIPCount(idx,RIPtracker,addRev=0,addFwd=0,addNonRIP=0): """Add observed RIP events to tracker by row.""" TallyRev = RIPtracker[idx].revRIPcount + addRev TallyFwd = RIPtracker[idx].RIPcount + addFwd TallyNonRIP = RIPtracker[idx].nonRIPcount + addNonRIP RIPtracker[idx] = RIPtracker[idx]._replace(revRIPcount=TallyRev,RIPcount=TallyFwd,nonRIPcount=TallyNonRIP) return RIPtracker
7f83c547d9acd6c697174fffa1ccb3aec6e91a24
706,445
def get_text(cell): """ get stripped text from a BeautifulSoup td object""" return ''.join([x.strip() + ' ' for x in cell.findAll(text=True)]).strip()
08037cbe5d2058206de029417f03d211d350820f
706,447
import torch def test_augmentation(text, text_lengths, augmentation_class): """ test_augmentation method is written for augment input text in evaluation :param text: input text :param text_lengths: text length :param augmentation_class: augmentation class :return: """ augmentation_text = augmentation_class.test_augment(text, text_lengths) augmentation_text.append(text) augmentation_text = torch.FloatTensor(augmentation_text).long() return augmentation_text
2f83ec9fa0afa110d05f05f52e85cae65a28c6f9
706,449
import math def ToMercPosition(lat_deg, num_tiles): """Calculate position of a given latitude on qt grid. LOD is log2(num_tiles) Args: lat_deg: (float) Latitude in degrees. num_tiles: (integer) Number of tiles in the qt grid. Returns: Floating point position of latitude in tiles relative to equator. """ lat_rad = lat_deg / 180.0 * math.pi y_merc = math.log(math.tan(lat_rad / 2.0 + math.pi / 4.0)) return num_tiles / 2.0 * (1 + y_merc / math.pi)
1ae7e7b2da9ec3ee20756ef7ffa13d99485aaea7
706,450
def check_output_filepath(filepath): """ Check and return an appropriate output_filepath parameter. Ensures the file is a csv file. Ensures a value is set. If a value is not set or is not a csv, it will return a default value. :param filepath: string filepath name :returns: a string representing a filepath location. """ if filepath.endswith('.csv'): return filepath return "clean_rules_report.csv"
63fcf697dbde9a62cc39311b4d234955520f6394
706,451
def _single_value_set(target_list, value): """ Return true if this constraint has only one value and it is this one. """ return len(target_list) == 1 and target_list[0] == value
472ebe1aa9726c70642423d05fa55723496e9bc5
706,452
def get_positive_input(message, float_parse=False, allow_zero=False): """ Obtains and returns a positive int from the user. Preconditions: message: non-empty string float_parse: bool defaulted to False allow_zero: bool defaulted to False Parameters: message: The message that is printed when obtaining the input. float_parse: Whether to parse input to float or int allow_zero: Whether to allow zero as an input Postconditions: num: The valid inputted number. """ # use ternary operator to determine the sign to use sign = ">=" if allow_zero else ">" # try to parse input to either a float or int try: if float_parse: num = float(input("(must be " + sign + " 0), " + message).strip()) else: num = int(input("(must be " + sign + " 0), " + message).strip()) # raise a ValueError if input was invalid if (not allow_zero) and (num <= 0): raise ValueError() elif num < 0: raise ValueError() return num # catch any ValueErrors. except ValueError: print("Not a valid input.") # recurse the method until proper input was found return get_positive_input(message, float_parse, allow_zero)
17982ff069907464c70df7b6efb1f42d3811962e
706,453
def flights_preclean(df): """ Input: Raw dataframe of Flights table. Output: Cleaned flights table: - Remove cancelled rows, made available in new dataframe "df_can" - Drop columns ['Unnamed: 0', 'branded_code_share', 'mkt_carrier', 'cancelled', 'cancellation_code', 'flights', 'air_time', 'first_dep_time', 'total_add_gtime', 'longest_add_gtime', 'no_name'] - Fill null values in delay columns - Drop remaining null values """ global df_can df_can = df[df.cancelled == 1].copy() print("Removed cancelled flights - now available in dataframe 'df_can'") df = df[df.cancelled == 0] df = df.drop(columns=['Unnamed: 0', 'branded_code_share', 'mkt_carrier', 'cancelled', 'cancellation_code', 'flights', 'air_time', 'first_dep_time', 'total_add_gtime', 'longest_add_gtime', 'no_name']) for col in ['carrier_delay', 'weather_delay', 'nas_delay', 'security_delay', 'late_aircraft_delay']: df[col] = df[col].fillna(value=0) df = df.dropna() return df
61dcfa6afd6ec7dd0abb5525187938d6ab978996
706,454
def convert_spectral_kernel_quint(sequences, list_seq_to_id): """ Return a list seq of nb of time the seq in list_seq_to_id appear in sequence""" final = [] for j in range(len(sequences)): sequence = sequences[j] dico_appear = {seq: 0 for seq in list_seq_to_id} for i in range(len(sequence) - 4): seq_to_add = sequence[i] + sequence[i+1] + sequence[i+2] + sequence[i+3] + sequence[i+4] dico_appear[seq_to_add] += 1 final.append([dico_appear[k] for k in list_seq_to_id]) return final
49f727dd26822834bad2c9a448136288dc1c426c
706,455